mirror of
https://github.com/servo/servo.git
synced 2025-06-06 16:45:39 +00:00
rustfmt
This commit is contained in:
parent
3fc9ecace0
commit
b26a3bd31b
10 changed files with 478 additions and 332 deletions
|
@ -35,18 +35,20 @@ pub struct AudioBuffer {
|
|||
impl AudioBuffer {
|
||||
#[allow(unrooted_must_root)]
|
||||
#[allow(unsafe_code)]
|
||||
pub fn new_inherited(global: &Window,
|
||||
number_of_channels: u32,
|
||||
length: u32,
|
||||
sample_rate: f32,
|
||||
initial_data: Option<&[f32]>) -> AudioBuffer {
|
||||
pub fn new_inherited(
|
||||
global: &Window,
|
||||
number_of_channels: u32,
|
||||
length: u32,
|
||||
sample_rate: f32,
|
||||
initial_data: Option<&[f32]>,
|
||||
) -> AudioBuffer {
|
||||
let initial_data = match initial_data {
|
||||
Some(initial_data) => {
|
||||
let mut data = vec![];
|
||||
data.extend_from_slice(initial_data);
|
||||
data
|
||||
},
|
||||
None => vec![0.; (length * number_of_channels) as usize]
|
||||
None => vec![0.; (length * number_of_channels) as usize],
|
||||
};
|
||||
let cx = global.get_cx();
|
||||
let mut js_channels: Vec<JSAudioChannel> = Vec::with_capacity(number_of_channels as usize);
|
||||
|
@ -57,7 +59,8 @@ impl AudioBuffer {
|
|||
Float32Array::create(
|
||||
cx,
|
||||
CreateWith::Slice(&initial_data.as_slice()[offset..offset + (length as usize)]),
|
||||
array.handle_mut())
|
||||
array.handle_mut(),
|
||||
)
|
||||
};
|
||||
let js_channel = Heap::default();
|
||||
js_channel.set(array.get());
|
||||
|
@ -66,7 +69,10 @@ impl AudioBuffer {
|
|||
AudioBuffer {
|
||||
reflector_: Reflector::new(),
|
||||
js_channels: DomRefCell::new(js_channels),
|
||||
shared_channels: DomRefCell::new(ServoMediaAudioBuffer::new(number_of_channels as u8, length as usize)),
|
||||
shared_channels: DomRefCell::new(ServoMediaAudioBuffer::new(
|
||||
number_of_channels as u8,
|
||||
length as usize,
|
||||
)),
|
||||
sample_rate,
|
||||
length,
|
||||
duration: length as f64 / sample_rate as f64,
|
||||
|
@ -75,22 +81,38 @@ impl AudioBuffer {
|
|||
}
|
||||
|
||||
#[allow(unrooted_must_root)]
|
||||
pub fn new(global: &Window,
|
||||
number_of_channels: u32,
|
||||
length: u32,
|
||||
sample_rate: f32,
|
||||
initial_data: Option<&[f32]>) -> DomRoot<AudioBuffer> {
|
||||
let buffer = AudioBuffer::new_inherited(global, number_of_channels, length, sample_rate, initial_data);
|
||||
pub fn new(
|
||||
global: &Window,
|
||||
number_of_channels: u32,
|
||||
length: u32,
|
||||
sample_rate: f32,
|
||||
initial_data: Option<&[f32]>,
|
||||
) -> DomRoot<AudioBuffer> {
|
||||
let buffer = AudioBuffer::new_inherited(
|
||||
global,
|
||||
number_of_channels,
|
||||
length,
|
||||
sample_rate,
|
||||
initial_data,
|
||||
);
|
||||
reflect_dom_object(Box::new(buffer), global, AudioBufferBinding::Wrap)
|
||||
}
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#dom-audiobuffer-audiobuffer
|
||||
pub fn Constructor(window: &Window,
|
||||
options: &AudioBufferOptions) -> Fallible<DomRoot<AudioBuffer>> {
|
||||
pub fn Constructor(
|
||||
window: &Window,
|
||||
options: &AudioBufferOptions,
|
||||
) -> Fallible<DomRoot<AudioBuffer>> {
|
||||
if options.numberOfChannels > MAX_CHANNEL_COUNT {
|
||||
return Err(Error::NotSupported);
|
||||
}
|
||||
Ok(AudioBuffer::new(window, options.numberOfChannels, options.length, *options.sampleRate, None))
|
||||
Ok(AudioBuffer::new(
|
||||
window,
|
||||
options.numberOfChannels,
|
||||
options.length,
|
||||
*options.sampleRate,
|
||||
None,
|
||||
))
|
||||
}
|
||||
|
||||
#[allow(unsafe_code)]
|
||||
|
@ -104,7 +126,9 @@ impl AudioBuffer {
|
|||
// Move the channel data from shared_channels to js_channels.
|
||||
rooted!(in (cx) let mut array = ptr::null_mut::<JSObject>());
|
||||
let shared_channel = (*self.shared_channels.borrow_mut()).buffers.remove(i);
|
||||
if Float32Array::create(cx, CreateWith::Slice(&shared_channel), array.handle_mut()).is_err() {
|
||||
if Float32Array::create(cx, CreateWith::Slice(&shared_channel), array.handle_mut())
|
||||
.is_err()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
channel.set(array.get());
|
||||
|
@ -175,7 +199,11 @@ impl AudioBufferMethods for AudioBuffer {
|
|||
|
||||
// https://webaudio.github.io/web-audio-api/#dom-audiobuffer-getchanneldata
|
||||
#[allow(unsafe_code)]
|
||||
unsafe fn GetChannelData(&self, cx: *mut JSContext, channel: u32) -> Fallible<NonNull<JSObject>> {
|
||||
unsafe fn GetChannelData(
|
||||
&self,
|
||||
cx: *mut JSContext,
|
||||
channel: u32,
|
||||
) -> Fallible<NonNull<JSObject>> {
|
||||
if channel >= self.number_of_channels {
|
||||
return Err(Error::IndexSize);
|
||||
}
|
||||
|
@ -184,15 +212,19 @@ impl AudioBufferMethods for AudioBuffer {
|
|||
return Err(Error::JSFailed);
|
||||
}
|
||||
|
||||
Ok(NonNull::new_unchecked(self.js_channels.borrow()[channel as usize].get()))
|
||||
Ok(NonNull::new_unchecked(
|
||||
self.js_channels.borrow()[channel as usize].get(),
|
||||
))
|
||||
}
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#dom-audiobuffer-copyfromchannel
|
||||
#[allow(unsafe_code)]
|
||||
fn CopyFromChannel(&self,
|
||||
mut destination: CustomAutoRooterGuard<Float32Array>,
|
||||
channel_number: u32,
|
||||
start_in_channel: u32) -> Fallible<()> {
|
||||
fn CopyFromChannel(
|
||||
&self,
|
||||
mut destination: CustomAutoRooterGuard<Float32Array>,
|
||||
channel_number: u32,
|
||||
start_in_channel: u32,
|
||||
) -> Fallible<()> {
|
||||
if channel_number >= self.number_of_channels || start_in_channel > self.length {
|
||||
return Err(Error::IndexSize);
|
||||
}
|
||||
|
@ -220,17 +252,21 @@ impl AudioBufferMethods for AudioBuffer {
|
|||
dest.extend_from_slice(&shared_channel.as_slice()[offset..offset + bytes_to_copy]);
|
||||
}
|
||||
|
||||
unsafe { destination.update(&dest); }
|
||||
unsafe {
|
||||
destination.update(&dest);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#dom-audiobuffer-copytochannel
|
||||
#[allow(unsafe_code)]
|
||||
fn CopyToChannel(&self,
|
||||
source: CustomAutoRooterGuard<Float32Array>,
|
||||
channel_number: u32,
|
||||
start_in_channel: u32) -> Fallible<()> {
|
||||
fn CopyToChannel(
|
||||
&self,
|
||||
source: CustomAutoRooterGuard<Float32Array>,
|
||||
channel_number: u32,
|
||||
start_in_channel: u32,
|
||||
) -> Fallible<()> {
|
||||
if channel_number >= self.number_of_channels || start_in_channel > (source.len() as u32) {
|
||||
return Err(Error::IndexSize);
|
||||
}
|
||||
|
@ -250,7 +286,9 @@ impl AudioBufferMethods for AudioBuffer {
|
|||
if let Ok(mut array) = array {
|
||||
let bytes_to_copy = min(self.length - start_in_channel, source.len() as u32) as usize;
|
||||
let offset = start_in_channel as usize;
|
||||
unsafe { array.update(&source.as_slice()[offset..offset + bytes_to_copy]); }
|
||||
unsafe {
|
||||
array.update(&source.as_slice()[offset..offset + bytes_to_copy]);
|
||||
}
|
||||
} else {
|
||||
return Err(Error::IndexSize);
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@ use dom::bindings::codegen::Bindings::AudioParamBinding::AutomationRate;
|
|||
use dom::bindings::codegen::Bindings::AudioNodeBinding::AudioNodeOptions;
|
||||
use dom::bindings::codegen::Bindings::AudioNodeBinding::{ChannelCountMode, ChannelInterpretation};
|
||||
use dom::bindings::codegen::Bindings::AudioScheduledSourceNodeBinding::
|
||||
AudioScheduledSourceNodeBinding::AudioScheduledSourceNodeMethods;
|
||||
AudioScheduledSourceNodeBinding::AudioScheduledSourceNodeMethods;
|
||||
use dom::bindings::error::{Error, Fallible};
|
||||
use dom::bindings::inheritance::Castable;
|
||||
use dom::bindings::num::Finite;
|
||||
|
@ -45,7 +45,7 @@ impl AudioBufferSourceNode {
|
|||
window: &Window,
|
||||
context: &BaseAudioContext,
|
||||
options: &AudioBufferSourceOptions,
|
||||
) -> AudioBufferSourceNode {
|
||||
) -> AudioBufferSourceNode {
|
||||
let mut node_options = AudioNodeOptions::empty();
|
||||
node_options.channelCount = Some(2);
|
||||
node_options.channelCountMode = Some(ChannelCountMode::Max);
|
||||
|
@ -54,24 +54,30 @@ impl AudioBufferSourceNode {
|
|||
AudioNodeInit::AudioBufferSourceNode(options.into()),
|
||||
context,
|
||||
&node_options,
|
||||
0 /* inputs */,
|
||||
1 /* outputs */,
|
||||
);
|
||||
0, /* inputs */
|
||||
1, /* outputs */
|
||||
);
|
||||
let node_id = source_node.node().node_id();
|
||||
let playback_rate = AudioParam::new(&window,
|
||||
context,
|
||||
node_id,
|
||||
ParamType::PlaybackRate,
|
||||
AutomationRate::K_rate,
|
||||
*options.playbackRate,
|
||||
f32::MIN, f32::MAX);
|
||||
let detune = AudioParam::new(&window,
|
||||
context,
|
||||
node_id,
|
||||
ParamType::Detune,
|
||||
AutomationRate::K_rate,
|
||||
*options.detune,
|
||||
f32::MIN, f32::MAX);
|
||||
let playback_rate = AudioParam::new(
|
||||
&window,
|
||||
context,
|
||||
node_id,
|
||||
ParamType::PlaybackRate,
|
||||
AutomationRate::K_rate,
|
||||
*options.playbackRate,
|
||||
f32::MIN,
|
||||
f32::MAX,
|
||||
);
|
||||
let detune = AudioParam::new(
|
||||
&window,
|
||||
context,
|
||||
node_id,
|
||||
ParamType::Detune,
|
||||
AutomationRate::K_rate,
|
||||
*options.detune,
|
||||
f32::MIN,
|
||||
f32::MAX,
|
||||
);
|
||||
AudioBufferSourceNode {
|
||||
source_node,
|
||||
buffer: Default::default(),
|
||||
|
@ -88,7 +94,7 @@ impl AudioBufferSourceNode {
|
|||
window: &Window,
|
||||
context: &BaseAudioContext,
|
||||
options: &AudioBufferSourceOptions,
|
||||
) -> DomRoot<AudioBufferSourceNode> {
|
||||
) -> DomRoot<AudioBufferSourceNode> {
|
||||
let node = AudioBufferSourceNode::new_inherited(window, context, options);
|
||||
reflect_dom_object(Box::new(node), window, AudioBufferSourceNodeBinding::Wrap)
|
||||
}
|
||||
|
@ -97,7 +103,7 @@ impl AudioBufferSourceNode {
|
|||
window: &Window,
|
||||
context: &BaseAudioContext,
|
||||
options: &AudioBufferSourceOptions,
|
||||
) -> Fallible<DomRoot<AudioBufferSourceNode>> {
|
||||
) -> Fallible<DomRoot<AudioBufferSourceNode>> {
|
||||
Ok(AudioBufferSourceNode::new(window, context, options))
|
||||
}
|
||||
}
|
||||
|
@ -119,9 +125,11 @@ impl AudioBufferSourceNodeMethods for AudioBufferSourceNode {
|
|||
if self.source_node.started() {
|
||||
if let Some(buffer) = self.buffer.get() {
|
||||
let buffer = buffer.acquire_contents();
|
||||
self.source_node.node().message(
|
||||
AudioNodeMessage::AudioBufferSourceNode(
|
||||
AudioBufferSourceNodeMessage::SetBuffer(buffer)));
|
||||
self.source_node
|
||||
.node()
|
||||
.message(AudioNodeMessage::AudioBufferSourceNode(
|
||||
AudioBufferSourceNodeMessage::SetBuffer(buffer),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -160,17 +168,23 @@ impl AudioBufferSourceNodeMethods for AudioBufferSourceNode {
|
|||
self.loop_end.set(*loop_end)
|
||||
}
|
||||
|
||||
fn Start(&self,
|
||||
when: Finite<f64>,
|
||||
_offset: Option<Finite<f64>>,
|
||||
_duration: Option<Finite<f64>>) -> Fallible<()> {
|
||||
fn Start(
|
||||
&self,
|
||||
when: Finite<f64>,
|
||||
_offset: Option<Finite<f64>>,
|
||||
_duration: Option<Finite<f64>>,
|
||||
) -> Fallible<()> {
|
||||
if let Some(buffer) = self.buffer.get() {
|
||||
let buffer = buffer.acquire_contents();
|
||||
self.source_node.node().message(
|
||||
AudioNodeMessage::AudioBufferSourceNode(
|
||||
AudioBufferSourceNodeMessage::SetBuffer(buffer)));
|
||||
self.source_node
|
||||
.node()
|
||||
.message(AudioNodeMessage::AudioBufferSourceNode(
|
||||
AudioBufferSourceNodeMessage::SetBuffer(buffer),
|
||||
));
|
||||
}
|
||||
self.source_node.upcast::<AudioScheduledSourceNode>().Start(when)
|
||||
self.source_node
|
||||
.upcast::<AudioScheduledSourceNode>()
|
||||
.Start(when)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -37,7 +37,10 @@ impl AudioContext {
|
|||
// https://webaudio.github.io/web-audio-api/#AudioContext-constructors
|
||||
fn new_inherited(global: &GlobalScope, options: &AudioContextOptions) -> AudioContext {
|
||||
// Steps 1-3.
|
||||
let context = BaseAudioContext::new_inherited(global, BaseAudioContextOptions::AudioContext(options.into()));
|
||||
let context = BaseAudioContext::new_inherited(
|
||||
global,
|
||||
BaseAudioContextOptions::AudioContext(options.into()),
|
||||
);
|
||||
|
||||
// Step 4.1.
|
||||
let latency_hint = options.latencyHint;
|
||||
|
@ -52,14 +55,13 @@ impl AudioContext {
|
|||
AudioContext {
|
||||
context,
|
||||
latency_hint,
|
||||
base_latency: 0., // TODO
|
||||
base_latency: 0., // TODO
|
||||
output_latency: 0., // TODO
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unrooted_must_root)]
|
||||
pub fn new(global: &GlobalScope,
|
||||
options: &AudioContextOptions) -> DomRoot<AudioContext> {
|
||||
pub fn new(global: &GlobalScope, options: &AudioContextOptions) -> DomRoot<AudioContext> {
|
||||
let context = AudioContext::new_inherited(global, options);
|
||||
let context = reflect_dom_object(Box::new(context), global, AudioContextBinding::Wrap);
|
||||
context.resume();
|
||||
|
@ -67,8 +69,10 @@ impl AudioContext {
|
|||
}
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#AudioContext-constructors
|
||||
pub fn Constructor(window: &Window,
|
||||
options: &AudioContextOptions) -> Fallible<DomRoot<AudioContext>> {
|
||||
pub fn Constructor(
|
||||
window: &Window,
|
||||
options: &AudioContextOptions,
|
||||
) -> Fallible<DomRoot<AudioContext>> {
|
||||
let global = window.upcast::<GlobalScope>();
|
||||
Ok(AudioContext::new(global, options))
|
||||
}
|
||||
|
@ -125,7 +129,8 @@ impl AudioContextMethods for AudioContext {
|
|||
Ok(_) => {
|
||||
let base_context = Trusted::new(&self.context);
|
||||
let context = Trusted::new(self);
|
||||
let _ = task_source.queue(task!(suspend_ok: move || {
|
||||
let _ = task_source.queue(
|
||||
task!(suspend_ok: move || {
|
||||
let base_context = base_context.root();
|
||||
let context = context.root();
|
||||
let promise = trusted_promise.root();
|
||||
|
@ -139,15 +144,20 @@ impl AudioContextMethods for AudioContext {
|
|||
&window
|
||||
);
|
||||
}
|
||||
}), window.upcast());
|
||||
}),
|
||||
window.upcast(),
|
||||
);
|
||||
},
|
||||
Err(_) => {
|
||||
// The spec does not define the error case and `suspend` should
|
||||
// never fail, but we handle the case here for completion.
|
||||
let _ = task_source.queue(task!(suspend_error: move || {
|
||||
let _ = task_source.queue(
|
||||
task!(suspend_error: move || {
|
||||
let promise = trusted_promise.root();
|
||||
promise.reject_error(Error::Type("Something went wrong".to_owned()));
|
||||
}), window.upcast());
|
||||
}),
|
||||
window.upcast(),
|
||||
);
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -180,7 +190,8 @@ impl AudioContextMethods for AudioContext {
|
|||
Ok(_) => {
|
||||
let base_context = Trusted::new(&self.context);
|
||||
let context = Trusted::new(self);
|
||||
let _ = task_source.queue(task!(suspend_ok: move || {
|
||||
let _ = task_source.queue(
|
||||
task!(suspend_ok: move || {
|
||||
let base_context = base_context.root();
|
||||
let context = context.root();
|
||||
let promise = trusted_promise.root();
|
||||
|
@ -194,19 +205,23 @@ impl AudioContextMethods for AudioContext {
|
|||
&window
|
||||
);
|
||||
}
|
||||
}), window.upcast());
|
||||
}),
|
||||
window.upcast(),
|
||||
);
|
||||
},
|
||||
Err(_) => {
|
||||
// The spec does not define the error case and `suspend` should
|
||||
// never fail, but we handle the case here for completion.
|
||||
let _ = task_source.queue(task!(suspend_error: move || {
|
||||
let _ = task_source.queue(
|
||||
task!(suspend_error: move || {
|
||||
let promise = trusted_promise.root();
|
||||
promise.reject_error(Error::Type("Something went wrong".to_owned()));
|
||||
}), window.upcast());
|
||||
}),
|
||||
window.upcast(),
|
||||
);
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
// Step 6.
|
||||
promise
|
||||
}
|
||||
|
|
|
@ -18,19 +18,28 @@ pub struct AudioDestinationNode {
|
|||
}
|
||||
|
||||
impl AudioDestinationNode {
|
||||
fn new_inherited(context: &BaseAudioContext,
|
||||
options: &AudioNodeOptions) -> AudioDestinationNode {
|
||||
fn new_inherited(
|
||||
context: &BaseAudioContext,
|
||||
options: &AudioNodeOptions,
|
||||
) -> AudioDestinationNode {
|
||||
AudioDestinationNode {
|
||||
node: AudioNode::new_inherited(AudioNodeInit::DestinationNode,
|
||||
Some(context.destination_node()),
|
||||
context, options, 1, 1),
|
||||
node: AudioNode::new_inherited(
|
||||
AudioNodeInit::DestinationNode,
|
||||
Some(context.destination_node()),
|
||||
context,
|
||||
options,
|
||||
1,
|
||||
1,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unrooted_must_root)]
|
||||
pub fn new(global: &GlobalScope,
|
||||
context: &BaseAudioContext,
|
||||
options: &AudioNodeOptions) -> DomRoot<AudioDestinationNode> {
|
||||
pub fn new(
|
||||
global: &GlobalScope,
|
||||
context: &BaseAudioContext,
|
||||
options: &AudioNodeOptions,
|
||||
) -> DomRoot<AudioDestinationNode> {
|
||||
let node = AudioDestinationNode::new_inherited(context, options);
|
||||
reflect_dom_object(Box::new(node), global, AudioDestinationNodeBinding::Wrap)
|
||||
}
|
||||
|
|
|
@ -33,15 +33,16 @@ pub struct AudioNode {
|
|||
}
|
||||
|
||||
impl AudioNode {
|
||||
pub fn new_inherited(node_type: AudioNodeInit,
|
||||
node_id: Option<NodeId>,
|
||||
context: &BaseAudioContext,
|
||||
options: &AudioNodeOptions,
|
||||
number_of_inputs: u32,
|
||||
number_of_outputs: u32) -> AudioNode {
|
||||
let node_id = node_id.unwrap_or_else(|| {
|
||||
context.audio_context_impl().create_node(node_type)
|
||||
});
|
||||
pub fn new_inherited(
|
||||
node_type: AudioNodeInit,
|
||||
node_id: Option<NodeId>,
|
||||
context: &BaseAudioContext,
|
||||
options: &AudioNodeOptions,
|
||||
number_of_inputs: u32,
|
||||
number_of_outputs: u32,
|
||||
) -> AudioNode {
|
||||
let node_id =
|
||||
node_id.unwrap_or_else(|| context.audio_context_impl().create_node(node_type));
|
||||
AudioNode {
|
||||
eventtarget: EventTarget::new_inherited(),
|
||||
node_id,
|
||||
|
@ -55,7 +56,9 @@ impl AudioNode {
|
|||
}
|
||||
|
||||
pub fn message(&self, message: AudioNodeMessage) {
|
||||
self.context.audio_context_impl().message_node(self.node_id, message);
|
||||
self.context
|
||||
.audio_context_impl()
|
||||
.message_node(self.node_id, message);
|
||||
}
|
||||
|
||||
pub fn node_id(&self) -> NodeId {
|
||||
|
@ -65,65 +68,69 @@ impl AudioNode {
|
|||
|
||||
impl AudioNodeMethods for AudioNode {
|
||||
// https://webaudio.github.io/web-audio-api/#dom-audionode-connect
|
||||
fn Connect(&self,
|
||||
destination: &AudioNode,
|
||||
output: u32,
|
||||
input: u32) -> Fallible<DomRoot<AudioNode>> {
|
||||
|
||||
fn Connect(
|
||||
&self,
|
||||
destination: &AudioNode,
|
||||
output: u32,
|
||||
input: u32,
|
||||
) -> Fallible<DomRoot<AudioNode>> {
|
||||
if self.context != destination.context {
|
||||
return Err(Error::InvalidAccess);
|
||||
}
|
||||
|
||||
if output >= self.NumberOfOutputs() ||
|
||||
input >= destination.NumberOfInputs() {
|
||||
return Err(Error::IndexSize);
|
||||
}
|
||||
if output >= self.NumberOfOutputs() || input >= destination.NumberOfInputs() {
|
||||
return Err(Error::IndexSize);
|
||||
}
|
||||
|
||||
self.context.audio_context_impl().connect_ports(
|
||||
self.node_id().output(output), destination.node_id().input(input)
|
||||
);
|
||||
self.node_id().output(output),
|
||||
destination.node_id().input(input),
|
||||
);
|
||||
|
||||
Ok(DomRoot::from_ref(destination))
|
||||
}
|
||||
|
||||
fn Connect_(&self,
|
||||
_: &AudioParam,
|
||||
_: u32) -> Fallible<()> {
|
||||
fn Connect_(&self, _: &AudioParam, _: u32) -> Fallible<()> {
|
||||
// TODO
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect
|
||||
fn Disconnect(&self) -> ErrorResult {
|
||||
self.context.audio_context_impl()
|
||||
self.context
|
||||
.audio_context_impl()
|
||||
.disconnect_all_from(self.node_id());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-output
|
||||
fn Disconnect_(&self, out: u32) -> ErrorResult {
|
||||
self.context.audio_context_impl()
|
||||
self.context
|
||||
.audio_context_impl()
|
||||
.disconnect_output(self.node_id().output(out));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode
|
||||
fn Disconnect__(&self, to: &AudioNode) -> ErrorResult {
|
||||
self.context.audio_context_impl()
|
||||
self.context
|
||||
.audio_context_impl()
|
||||
.disconnect_between(self.node_id(), to.node_id());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode-output
|
||||
fn Disconnect___(&self, to: &AudioNode, out: u32) -> ErrorResult{
|
||||
self.context.audio_context_impl()
|
||||
fn Disconnect___(&self, to: &AudioNode, out: u32) -> ErrorResult {
|
||||
self.context
|
||||
.audio_context_impl()
|
||||
.disconnect_output_between(self.node_id().output(out), to.node_id());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode-output-input
|
||||
fn Disconnect____(&self, to: &AudioNode, out: u32, inp: u32) -> ErrorResult {
|
||||
self.context.audio_context_impl()
|
||||
self.context
|
||||
.audio_context_impl()
|
||||
.disconnect_output_between_to(self.node_id().output(out), to.node_id().input(inp));
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -30,13 +30,15 @@ pub struct AudioParam {
|
|||
}
|
||||
|
||||
impl AudioParam {
|
||||
pub fn new_inherited(context: &BaseAudioContext,
|
||||
node: NodeId,
|
||||
param: ParamType,
|
||||
automation_rate: AutomationRate,
|
||||
default_value: f32,
|
||||
min_value: f32,
|
||||
max_value: f32) -> AudioParam {
|
||||
pub fn new_inherited(
|
||||
context: &BaseAudioContext,
|
||||
node: NodeId,
|
||||
param: ParamType,
|
||||
automation_rate: AutomationRate,
|
||||
default_value: f32,
|
||||
min_value: f32,
|
||||
max_value: f32,
|
||||
) -> AudioParam {
|
||||
AudioParam {
|
||||
reflector_: Reflector::new(),
|
||||
context: Dom::from_ref(context),
|
||||
|
@ -50,16 +52,25 @@ impl AudioParam {
|
|||
}
|
||||
|
||||
#[allow(unrooted_must_root)]
|
||||
pub fn new(window: &Window,
|
||||
context: &BaseAudioContext,
|
||||
node: NodeId,
|
||||
param: ParamType,
|
||||
automation_rate: AutomationRate,
|
||||
default_value: f32,
|
||||
min_value: f32,
|
||||
max_value: f32) -> DomRoot<AudioParam> {
|
||||
let audio_param = AudioParam::new_inherited(context, node, param, automation_rate,
|
||||
default_value, min_value, max_value);
|
||||
pub fn new(
|
||||
window: &Window,
|
||||
context: &BaseAudioContext,
|
||||
node: NodeId,
|
||||
param: ParamType,
|
||||
automation_rate: AutomationRate,
|
||||
default_value: f32,
|
||||
min_value: f32,
|
||||
max_value: f32,
|
||||
) -> DomRoot<AudioParam> {
|
||||
let audio_param = AudioParam::new_inherited(
|
||||
context,
|
||||
node,
|
||||
param,
|
||||
automation_rate,
|
||||
default_value,
|
||||
min_value,
|
||||
max_value,
|
||||
);
|
||||
reflect_dom_object(Box::new(audio_param), window, AudioParamBinding::Wrap)
|
||||
}
|
||||
}
|
||||
|
@ -80,14 +91,10 @@ impl AudioParamMethods for AudioParam {
|
|||
}
|
||||
|
||||
fn SetValue(&self, value: Finite<f32>) {
|
||||
self.context.audio_context_impl()
|
||||
.message_node(self.node,
|
||||
AudioNodeMessage::SetParam(self.param,
|
||||
UserAutomationEvent::SetValue(
|
||||
*value
|
||||
)
|
||||
)
|
||||
);
|
||||
self.context.audio_context_impl().message_node(
|
||||
self.node,
|
||||
AudioNodeMessage::SetParam(self.param, UserAutomationEvent::SetValue(*value)),
|
||||
);
|
||||
}
|
||||
|
||||
fn DefaultValue(&self) -> Finite<f32> {
|
||||
|
@ -102,83 +109,82 @@ impl AudioParamMethods for AudioParam {
|
|||
Finite::wrap(self.max_value)
|
||||
}
|
||||
|
||||
fn SetValueAtTime(&self, value: Finite<f32>, start_time: Finite<f64>)
|
||||
-> DomRoot<AudioParam>
|
||||
{
|
||||
self.context.audio_context_impl()
|
||||
.message_node(self.node,
|
||||
AudioNodeMessage::SetParam(self.param,
|
||||
UserAutomationEvent::SetValueAtTime(
|
||||
*value, *start_time
|
||||
)
|
||||
)
|
||||
);
|
||||
fn SetValueAtTime(&self, value: Finite<f32>, start_time: Finite<f64>) -> DomRoot<AudioParam> {
|
||||
self.context.audio_context_impl().message_node(
|
||||
self.node,
|
||||
AudioNodeMessage::SetParam(
|
||||
self.param,
|
||||
UserAutomationEvent::SetValueAtTime(*value, *start_time),
|
||||
),
|
||||
);
|
||||
DomRoot::from_ref(self)
|
||||
}
|
||||
|
||||
fn LinearRampToValueAtTime(&self, value: Finite<f32>, end_time: Finite<f64>)
|
||||
-> DomRoot<AudioParam>
|
||||
{
|
||||
self.context.audio_context_impl()
|
||||
.message_node(self.node,
|
||||
AudioNodeMessage::SetParam(self.param,
|
||||
UserAutomationEvent::RampToValueAtTime(
|
||||
RampKind::Linear, *value, *end_time
|
||||
)
|
||||
)
|
||||
);
|
||||
fn LinearRampToValueAtTime(
|
||||
&self,
|
||||
value: Finite<f32>,
|
||||
end_time: Finite<f64>,
|
||||
) -> DomRoot<AudioParam> {
|
||||
self.context.audio_context_impl().message_node(
|
||||
self.node,
|
||||
AudioNodeMessage::SetParam(
|
||||
self.param,
|
||||
UserAutomationEvent::RampToValueAtTime(RampKind::Linear, *value, *end_time),
|
||||
),
|
||||
);
|
||||
DomRoot::from_ref(self)
|
||||
}
|
||||
|
||||
fn ExponentialRampToValueAtTime(&self, value: Finite<f32>, end_time: Finite<f64>)
|
||||
-> DomRoot<AudioParam>
|
||||
{
|
||||
self.context.audio_context_impl()
|
||||
.message_node(self.node,
|
||||
AudioNodeMessage::SetParam(self.param,
|
||||
UserAutomationEvent::RampToValueAtTime(
|
||||
RampKind::Exponential, *value, *end_time
|
||||
)
|
||||
)
|
||||
);
|
||||
fn ExponentialRampToValueAtTime(
|
||||
&self,
|
||||
value: Finite<f32>,
|
||||
end_time: Finite<f64>,
|
||||
) -> DomRoot<AudioParam> {
|
||||
self.context.audio_context_impl().message_node(
|
||||
self.node,
|
||||
AudioNodeMessage::SetParam(
|
||||
self.param,
|
||||
UserAutomationEvent::RampToValueAtTime(RampKind::Exponential, *value, *end_time),
|
||||
),
|
||||
);
|
||||
DomRoot::from_ref(self)
|
||||
}
|
||||
|
||||
fn SetTargetAtTime(&self, target: Finite<f32>, start_time: Finite<f64>, time_constant: Finite<f32>)
|
||||
-> DomRoot<AudioParam>
|
||||
{
|
||||
self.context.audio_context_impl()
|
||||
.message_node(self.node,
|
||||
AudioNodeMessage::SetParam(self.param,
|
||||
UserAutomationEvent::SetTargetAtTime(
|
||||
*target, *start_time, (*time_constant).into()
|
||||
)
|
||||
)
|
||||
);
|
||||
fn SetTargetAtTime(
|
||||
&self,
|
||||
target: Finite<f32>,
|
||||
start_time: Finite<f64>,
|
||||
time_constant: Finite<f32>,
|
||||
) -> DomRoot<AudioParam> {
|
||||
self.context.audio_context_impl().message_node(
|
||||
self.node,
|
||||
AudioNodeMessage::SetParam(
|
||||
self.param,
|
||||
UserAutomationEvent::SetTargetAtTime(*target, *start_time, (*time_constant).into()),
|
||||
),
|
||||
);
|
||||
DomRoot::from_ref(self)
|
||||
}
|
||||
|
||||
fn CancelScheduledValues(&self, cancel_time: Finite<f64>) -> DomRoot<AudioParam> {
|
||||
self.context.audio_context_impl()
|
||||
.message_node(self.node,
|
||||
AudioNodeMessage::SetParam(self.param,
|
||||
UserAutomationEvent::CancelScheduledValues(
|
||||
*cancel_time
|
||||
)
|
||||
)
|
||||
);
|
||||
self.context.audio_context_impl().message_node(
|
||||
self.node,
|
||||
AudioNodeMessage::SetParam(
|
||||
self.param,
|
||||
UserAutomationEvent::CancelScheduledValues(*cancel_time),
|
||||
),
|
||||
);
|
||||
DomRoot::from_ref(self)
|
||||
}
|
||||
|
||||
fn CancelAndHoldAtTime(&self, cancel_time: Finite<f64>) -> DomRoot<AudioParam> {
|
||||
self.context.audio_context_impl()
|
||||
.message_node(self.node,
|
||||
AudioNodeMessage::SetParam(self.param,
|
||||
UserAutomationEvent::CancelAndHoldAtTime(
|
||||
*cancel_time
|
||||
)
|
||||
)
|
||||
);
|
||||
self.context.audio_context_impl().message_node(
|
||||
self.node,
|
||||
AudioNodeMessage::SetParam(
|
||||
self.param,
|
||||
UserAutomationEvent::CancelAndHoldAtTime(*cancel_time),
|
||||
),
|
||||
);
|
||||
DomRoot::from_ref(self)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,16 +19,24 @@ pub struct AudioScheduledSourceNode {
|
|||
}
|
||||
|
||||
impl AudioScheduledSourceNode {
|
||||
pub fn new_inherited(node_type: AudioNodeInit,
|
||||
context: &BaseAudioContext,
|
||||
options: &AudioNodeOptions,
|
||||
number_of_inputs: u32,
|
||||
number_of_outputs: u32) -> AudioScheduledSourceNode {
|
||||
pub fn new_inherited(
|
||||
node_type: AudioNodeInit,
|
||||
context: &BaseAudioContext,
|
||||
options: &AudioNodeOptions,
|
||||
number_of_inputs: u32,
|
||||
number_of_outputs: u32,
|
||||
) -> AudioScheduledSourceNode {
|
||||
AudioScheduledSourceNode {
|
||||
node: AudioNode::new_inherited(node_type, None /* node_id */,
|
||||
context, options, number_of_inputs, number_of_outputs),
|
||||
started: Cell::new(false),
|
||||
stopped: Cell::new(false),
|
||||
node: AudioNode::new_inherited(
|
||||
node_type,
|
||||
None, /* node_id */
|
||||
context,
|
||||
options,
|
||||
number_of_inputs,
|
||||
number_of_outputs,
|
||||
),
|
||||
started: Cell::new(false),
|
||||
stopped: Cell::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -51,9 +59,10 @@ impl AudioScheduledSourceNodeMethods for AudioScheduledSourceNode {
|
|||
return Err(Error::InvalidState);
|
||||
}
|
||||
self.started.set(true);
|
||||
self.node.message(
|
||||
AudioNodeMessage::AudioScheduledSourceNode(AudioScheduledSourceNodeMessage::Start(*when))
|
||||
);
|
||||
self.node
|
||||
.message(AudioNodeMessage::AudioScheduledSourceNode(
|
||||
AudioScheduledSourceNodeMessage::Start(*when),
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -63,9 +72,10 @@ impl AudioScheduledSourceNodeMethods for AudioScheduledSourceNode {
|
|||
return Err(Error::InvalidState);
|
||||
}
|
||||
self.stopped.set(true);
|
||||
self.node.message(
|
||||
AudioNodeMessage::AudioScheduledSourceNode(AudioScheduledSourceNodeMessage::Stop(*when))
|
||||
);
|
||||
self.node
|
||||
.message(AudioNodeMessage::AudioScheduledSourceNode(
|
||||
AudioScheduledSourceNodeMessage::Stop(*when),
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -86,10 +86,7 @@ pub struct BaseAudioContext {
|
|||
|
||||
impl BaseAudioContext {
|
||||
#[allow(unrooted_must_root)]
|
||||
pub fn new_inherited(
|
||||
_: &GlobalScope,
|
||||
options: BaseAudioContextOptions,
|
||||
) -> BaseAudioContext {
|
||||
pub fn new_inherited(_: &GlobalScope, options: BaseAudioContextOptions) -> BaseAudioContext {
|
||||
let options = match options {
|
||||
BaseAudioContextOptions::AudioContext(options) => options,
|
||||
BaseAudioContextOptions::OfflineAudioContext(_) => unimplemented!(),
|
||||
|
@ -99,7 +96,11 @@ impl BaseAudioContext {
|
|||
|
||||
let context = BaseAudioContext {
|
||||
eventtarget: EventTarget::new_inherited(),
|
||||
audio_context_impl: Rc::new(ServoMedia::get().unwrap().create_audio_context(options.into())),
|
||||
audio_context_impl: Rc::new(
|
||||
ServoMedia::get()
|
||||
.unwrap()
|
||||
.create_audio_context(options.into()),
|
||||
),
|
||||
destination: Default::default(),
|
||||
in_flight_resume_promises_queue: Default::default(),
|
||||
pending_resume_promises: Default::default(),
|
||||
|
@ -126,7 +127,9 @@ impl BaseAudioContext {
|
|||
|
||||
#[allow(unrooted_must_root)]
|
||||
fn push_pending_resume_promise(&self, promise: &Rc<Promise>) {
|
||||
self.pending_resume_promises.borrow_mut().push(promise.clone());
|
||||
self.pending_resume_promises
|
||||
.borrow_mut()
|
||||
.push(promise.clone());
|
||||
}
|
||||
|
||||
/// Takes the pending resume promises.
|
||||
|
@ -141,14 +144,11 @@ impl BaseAudioContext {
|
|||
/// which were taken and moved to the in-flight queue.
|
||||
#[allow(unrooted_must_root)]
|
||||
fn take_pending_resume_promises(&self, result: ErrorResult) {
|
||||
let pending_resume_promises = mem::replace(
|
||||
&mut *self.pending_resume_promises.borrow_mut(),
|
||||
vec![],
|
||||
);
|
||||
self.in_flight_resume_promises_queue.borrow_mut().push_back((
|
||||
pending_resume_promises.into(),
|
||||
result,
|
||||
));
|
||||
let pending_resume_promises =
|
||||
mem::replace(&mut *self.pending_resume_promises.borrow_mut(), vec![]);
|
||||
self.in_flight_resume_promises_queue
|
||||
.borrow_mut()
|
||||
.push_back((pending_resume_promises.into(), result));
|
||||
}
|
||||
|
||||
/// Fulfills the next in-flight resume promises queue after running a closure.
|
||||
|
@ -161,21 +161,22 @@ impl BaseAudioContext {
|
|||
/// hiding actual safety bugs.
|
||||
#[allow(unrooted_must_root)]
|
||||
fn fulfill_in_flight_resume_promises<F>(&self, f: F)
|
||||
where
|
||||
F: FnOnce(),
|
||||
{
|
||||
let (promises, result) = self.in_flight_resume_promises_queue
|
||||
.borrow_mut()
|
||||
.pop_front()
|
||||
.expect("there should be at least one list of in flight resume promises");
|
||||
f();
|
||||
for promise in &*promises {
|
||||
match result {
|
||||
Ok(ref value) => promise.resolve_native(value),
|
||||
Err(ref error) => promise.reject_error(error.clone()),
|
||||
}
|
||||
where
|
||||
F: FnOnce(),
|
||||
{
|
||||
let (promises, result) = self
|
||||
.in_flight_resume_promises_queue
|
||||
.borrow_mut()
|
||||
.pop_front()
|
||||
.expect("there should be at least one list of in flight resume promises");
|
||||
f();
|
||||
for promise in &*promises {
|
||||
match result {
|
||||
Ok(ref value) => promise.resolve_native(value),
|
||||
Err(ref error) => promise.reject_error(error.clone()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Control thread processing state
|
||||
pub fn control_thread_state(&self) -> ProcessingState {
|
||||
|
@ -197,7 +198,8 @@ impl BaseAudioContext {
|
|||
match self.audio_context_impl.resume() {
|
||||
Ok(()) => {
|
||||
self.take_pending_resume_promises(Ok(()));
|
||||
let _ = task_source.queue(task!(resume_success: move || {
|
||||
let _ = task_source.queue(
|
||||
task!(resume_success: move || {
|
||||
let this = this.root();
|
||||
this.fulfill_in_flight_resume_promises(|| {
|
||||
if this.state.get() != AudioContextState::Running {
|
||||
|
@ -210,14 +212,21 @@ impl BaseAudioContext {
|
|||
);
|
||||
}
|
||||
});
|
||||
}), window.upcast());
|
||||
}),
|
||||
window.upcast(),
|
||||
);
|
||||
},
|
||||
Err(()) => {
|
||||
self.take_pending_resume_promises(Err(Error::Type("Something went wrong".to_owned())));
|
||||
let _ = task_source.queue(task!(resume_error: move || {
|
||||
self.take_pending_resume_promises(Err(Error::Type(
|
||||
"Something went wrong".to_owned(),
|
||||
)));
|
||||
let _ = task_source.queue(
|
||||
task!(resume_error: move || {
|
||||
this.root().fulfill_in_flight_resume_promises(|| {})
|
||||
}), window.upcast());
|
||||
}
|
||||
}),
|
||||
window.upcast(),
|
||||
);
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -288,7 +297,11 @@ impl BaseAudioContextMethods for BaseAudioContext {
|
|||
|
||||
/// https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createoscillator
|
||||
fn CreateOscillator(&self) -> DomRoot<OscillatorNode> {
|
||||
OscillatorNode::new(&self.global().as_window(), &self, &OscillatorOptions::empty())
|
||||
OscillatorNode::new(
|
||||
&self.global().as_window(),
|
||||
&self,
|
||||
&OscillatorOptions::empty(),
|
||||
)
|
||||
}
|
||||
|
||||
/// https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-creategain
|
||||
|
@ -297,56 +310,74 @@ impl BaseAudioContextMethods for BaseAudioContext {
|
|||
}
|
||||
|
||||
/// https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createbuffer
|
||||
fn CreateBuffer(&self,
|
||||
number_of_channels: u32,
|
||||
length: u32,
|
||||
sample_rate: Finite<f32>) -> Fallible<DomRoot<AudioBuffer>> {
|
||||
fn CreateBuffer(
|
||||
&self,
|
||||
number_of_channels: u32,
|
||||
length: u32,
|
||||
sample_rate: Finite<f32>,
|
||||
) -> Fallible<DomRoot<AudioBuffer>> {
|
||||
if number_of_channels <= 0 ||
|
||||
number_of_channels > MAX_CHANNEL_COUNT ||
|
||||
length <= 0 ||
|
||||
*sample_rate <= 0. {
|
||||
return Err(Error::NotSupported);
|
||||
}
|
||||
Ok(AudioBuffer::new(&self.global().as_window(), number_of_channels, length, *sample_rate, None))
|
||||
length <= 0 ||
|
||||
*sample_rate <= 0.
|
||||
{
|
||||
return Err(Error::NotSupported);
|
||||
}
|
||||
Ok(AudioBuffer::new(
|
||||
&self.global().as_window(),
|
||||
number_of_channels,
|
||||
length,
|
||||
*sample_rate,
|
||||
None,
|
||||
))
|
||||
}
|
||||
|
||||
fn CreateBufferSource(&self) -> DomRoot<AudioBufferSourceNode> {
|
||||
AudioBufferSourceNode::new(&self.global().as_window(), &self, &AudioBufferSourceOptions::empty())
|
||||
AudioBufferSourceNode::new(
|
||||
&self.global().as_window(),
|
||||
&self,
|
||||
&AudioBufferSourceOptions::empty(),
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(unrooted_must_root)]
|
||||
fn DecodeAudioData(&self,
|
||||
audio_data: CustomAutoRooterGuard<ArrayBuffer>,
|
||||
decode_success_callback: Option<Rc<DecodeSuccessCallback>>,
|
||||
decode_error_callback: Option<Rc<DecodeErrorCallback>>)
|
||||
-> Rc<Promise> {
|
||||
// Step 1.
|
||||
let promise = Promise::new(&self.global());
|
||||
let global = self.global();
|
||||
let window = global.as_window();
|
||||
fn DecodeAudioData(
|
||||
&self,
|
||||
audio_data: CustomAutoRooterGuard<ArrayBuffer>,
|
||||
decode_success_callback: Option<Rc<DecodeSuccessCallback>>,
|
||||
decode_error_callback: Option<Rc<DecodeErrorCallback>>,
|
||||
) -> Rc<Promise> {
|
||||
// Step 1.
|
||||
let promise = Promise::new(&self.global());
|
||||
let global = self.global();
|
||||
let window = global.as_window();
|
||||
|
||||
if audio_data.len() > 0 {
|
||||
// Step 2.
|
||||
// XXX detach array buffer.
|
||||
let uuid = Uuid::new_v4().simple().to_string();
|
||||
let uuid_ = uuid.clone();
|
||||
self.decode_resolvers.borrow_mut().insert(uuid.clone(), DecodeResolver {
|
||||
if audio_data.len() > 0 {
|
||||
// Step 2.
|
||||
// XXX detach array buffer.
|
||||
let uuid = Uuid::new_v4().simple().to_string();
|
||||
let uuid_ = uuid.clone();
|
||||
self.decode_resolvers.borrow_mut().insert(
|
||||
uuid.clone(),
|
||||
DecodeResolver {
|
||||
promise: promise.clone(),
|
||||
success_callback: decode_success_callback,
|
||||
error_callback: decode_error_callback,
|
||||
});
|
||||
let audio_data = audio_data.to_vec();
|
||||
let decoded_audio = Arc::new(Mutex::new(Vec::new()));
|
||||
let decoded_audio_ = decoded_audio.clone();
|
||||
let this = Trusted::new(self);
|
||||
let this_ = this.clone();
|
||||
let task_source = window.dom_manipulation_task_source();
|
||||
let task_source_ = window.dom_manipulation_task_source();
|
||||
let canceller = window.task_canceller();
|
||||
let canceller_ = window.task_canceller();
|
||||
let callbacks = AudioDecoderCallbacks::new()
|
||||
.eos(move || {
|
||||
let _ = task_source.queue_with_canceller(task!(audio_decode_eos: move || {
|
||||
},
|
||||
);
|
||||
let audio_data = audio_data.to_vec();
|
||||
let decoded_audio = Arc::new(Mutex::new(Vec::new()));
|
||||
let decoded_audio_ = decoded_audio.clone();
|
||||
let this = Trusted::new(self);
|
||||
let this_ = this.clone();
|
||||
let task_source = window.dom_manipulation_task_source();
|
||||
let task_source_ = window.dom_manipulation_task_source();
|
||||
let canceller = window.task_canceller();
|
||||
let canceller_ = window.task_canceller();
|
||||
let callbacks = AudioDecoderCallbacks::new()
|
||||
.eos(move || {
|
||||
let _ = task_source.queue_with_canceller(
|
||||
task!(audio_decode_eos: move || {
|
||||
let this = this.root();
|
||||
let decoded_audio = decoded_audio.lock().unwrap();
|
||||
let buffer = AudioBuffer::new(
|
||||
|
@ -362,10 +393,13 @@ impl BaseAudioContextMethods for BaseAudioContext {
|
|||
let _ = callback.Call__(&buffer, ExceptionHandling::Report);
|
||||
}
|
||||
resolver.promise.resolve_native(&buffer);
|
||||
}), &canceller);
|
||||
})
|
||||
}),
|
||||
&canceller,
|
||||
);
|
||||
})
|
||||
.error(move || {
|
||||
let _ = task_source_.queue_with_canceller(task!(audio_decode_eos: move || {
|
||||
let _ = task_source_.queue_with_canceller(
|
||||
task!(audio_decode_eos: move || {
|
||||
let this = this_.root();
|
||||
let mut resolvers = this.decode_resolvers.borrow_mut();
|
||||
assert!(resolvers.contains_key(&uuid));
|
||||
|
@ -376,7 +410,9 @@ impl BaseAudioContextMethods for BaseAudioContext {
|
|||
ExceptionHandling::Report);
|
||||
}
|
||||
resolver.promise.reject_error(Error::Type("Audio decode error".to_owned()));
|
||||
}), &canceller_);
|
||||
}),
|
||||
&canceller_,
|
||||
);
|
||||
})
|
||||
.progress(move |buffer| {
|
||||
decoded_audio_
|
||||
|
@ -385,16 +421,17 @@ impl BaseAudioContextMethods for BaseAudioContext {
|
|||
.extend_from_slice((*buffer).as_ref());
|
||||
})
|
||||
.build();
|
||||
self.audio_context_impl.decode_audio_data(audio_data, callbacks);
|
||||
} else {
|
||||
// Step 3.
|
||||
promise.reject_error(Error::DataClone);
|
||||
return promise;
|
||||
}
|
||||
|
||||
// Step 4.
|
||||
promise
|
||||
self.audio_context_impl
|
||||
.decode_audio_data(audio_data, callbacks);
|
||||
} else {
|
||||
// Step 3.
|
||||
promise.reject_error(Error::DataClone);
|
||||
return promise;
|
||||
}
|
||||
|
||||
// Step 4.
|
||||
promise
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ProcessingState> for AudioContextState {
|
||||
|
|
|
@ -31,7 +31,7 @@ impl GainNode {
|
|||
window: &Window,
|
||||
context: &BaseAudioContext,
|
||||
gain_options: &GainOptions,
|
||||
) -> GainNode {
|
||||
) -> GainNode {
|
||||
let mut node_options = AudioNodeOptions::empty();
|
||||
node_options.channelCount = Some(2);
|
||||
node_options.channelCountMode = Some(ChannelCountMode::Max);
|
||||
|
@ -43,16 +43,17 @@ impl GainNode {
|
|||
&node_options,
|
||||
1, // inputs
|
||||
1, // outputs
|
||||
);
|
||||
let gain = AudioParam::new(window,
|
||||
context,
|
||||
node.node_id(),
|
||||
ParamType::Gain,
|
||||
AutomationRate::A_rate,
|
||||
1., // default value
|
||||
f32::MIN, // min value
|
||||
f32::MAX, // max value
|
||||
);
|
||||
);
|
||||
let gain = AudioParam::new(
|
||||
window,
|
||||
context,
|
||||
node.node_id(),
|
||||
ParamType::Gain,
|
||||
AutomationRate::A_rate,
|
||||
1., // default value
|
||||
f32::MIN, // min value
|
||||
f32::MAX, // max value
|
||||
);
|
||||
GainNode {
|
||||
node,
|
||||
gain: Dom::from_ref(&gain),
|
||||
|
@ -60,10 +61,11 @@ impl GainNode {
|
|||
}
|
||||
|
||||
#[allow(unrooted_must_root)]
|
||||
pub fn new(window: &Window,
|
||||
context: &BaseAudioContext,
|
||||
options: &GainOptions
|
||||
) -> DomRoot<GainNode> {
|
||||
pub fn new(
|
||||
window: &Window,
|
||||
context: &BaseAudioContext,
|
||||
options: &GainOptions,
|
||||
) -> DomRoot<GainNode> {
|
||||
let node = GainNode::new_inherited(window, context, options);
|
||||
reflect_dom_object(Box::new(node), window, GainNodeBinding::Wrap)
|
||||
}
|
||||
|
@ -72,7 +74,7 @@ impl GainNode {
|
|||
window: &Window,
|
||||
context: &BaseAudioContext,
|
||||
options: &GainOptions,
|
||||
) -> Fallible<DomRoot<GainNode>> {
|
||||
) -> Fallible<DomRoot<GainNode>> {
|
||||
Ok(GainNode::new(window, context, options))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ impl OscillatorNode {
|
|||
window: &Window,
|
||||
context: &BaseAudioContext,
|
||||
oscillator_options: &OscillatorOptions,
|
||||
) -> OscillatorNode {
|
||||
) -> OscillatorNode {
|
||||
let mut node_options = AudioNodeOptions::empty();
|
||||
node_options.channelCount = Some(2);
|
||||
node_options.channelCountMode = Some(ChannelCountMode::Max);
|
||||
|
@ -46,20 +46,28 @@ impl OscillatorNode {
|
|||
&node_options,
|
||||
0, /* inputs */
|
||||
1, /* outputs */
|
||||
);
|
||||
);
|
||||
let node_id = source_node.node().node_id();
|
||||
let frequency = AudioParam::new(window,
|
||||
context,
|
||||
node_id,
|
||||
ParamType::Frequency,
|
||||
AutomationRate::A_rate,
|
||||
440., f32::MIN, f32::MAX);
|
||||
let detune = AudioParam::new(window,
|
||||
context,
|
||||
node_id,
|
||||
ParamType::Detune,
|
||||
AutomationRate::A_rate,
|
||||
0., -440. / 2., 440. / 2.);
|
||||
let frequency = AudioParam::new(
|
||||
window,
|
||||
context,
|
||||
node_id,
|
||||
ParamType::Frequency,
|
||||
AutomationRate::A_rate,
|
||||
440.,
|
||||
f32::MIN,
|
||||
f32::MAX,
|
||||
);
|
||||
let detune = AudioParam::new(
|
||||
window,
|
||||
context,
|
||||
node_id,
|
||||
ParamType::Detune,
|
||||
AutomationRate::A_rate,
|
||||
0.,
|
||||
-440. / 2.,
|
||||
440. / 2.,
|
||||
);
|
||||
|
||||
OscillatorNode {
|
||||
source_node,
|
||||
|
@ -74,7 +82,7 @@ impl OscillatorNode {
|
|||
window: &Window,
|
||||
context: &BaseAudioContext,
|
||||
options: &OscillatorOptions,
|
||||
) -> DomRoot<OscillatorNode> {
|
||||
) -> DomRoot<OscillatorNode> {
|
||||
let node = OscillatorNode::new_inherited(window, context, options);
|
||||
reflect_dom_object(Box::new(node), window, OscillatorNodeBinding::Wrap)
|
||||
}
|
||||
|
@ -83,7 +91,7 @@ impl OscillatorNode {
|
|||
window: &Window,
|
||||
context: &BaseAudioContext,
|
||||
options: &OscillatorOptions,
|
||||
) -> Fallible<DomRoot<OscillatorNode>> {
|
||||
) -> Fallible<DomRoot<OscillatorNode>> {
|
||||
Ok(OscillatorNode::new(window, context, options))
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue