Auto merge of #21158 - ferjm:webaudio, r=manishearth,nox,ferjm

WebAudio API

- [X] `./mach build -d` does not report any errors
- [x] `./mach test-tidy` does not report any errors
- [x] These changes fix #6710
- [X] There are tests for these changes

This PR adds basic support for the WebAudio API using [servo-media](https://github.com/servo/media) with GStreamer as the audio backend.

There are still some major stuff to fix like:

- [x] Detach ArrayBuffer during the [AudioBuffer "acquire the content" operation](https://webaudio.github.io/web-audio-api/#acquire-the-content). I am naively using `JS_StealArrayBufferContents()` directly, because it is what Gecko uses, but this should probably be part of the [rust-mozjs](https://github.com/servo/rust-mozjs) [TypedArray](https://github.com/servo/rust-mozjs/blob/master/src/typedarray.rs) API. And, in any case, I am not even sure if that's the proper way to do it. According to the results of the WPTs it may not even be right since [this assertion](https://github.com/servo/rust-mozjs/blob/master/src/typedarray.rs#L285) is failing in some cases. I need to dig more about this.
- [x] Disable the GStreamer dependency on Android. Unfortunately gstreamer-rs requires an NDK version upgrade, so we need to disable this for Android until then. I tried adding [different features to servo-media](https://github.com/servo/media/pull/79), but I am currently hitting [this issue](https://github.com/rust-lang/cargo/issues/1197)

I still need to run servo-tidy, change the servo-media dependency to use the git repo and add/fix some comments and TODOs.

The remaining feature work should be done in future PRs.

Note that most of the failing WPTs are failing because we don't implement the tested features yet (we only implement a few AudioNodes) and we have no OfflineAudioContext support, which most WPTs rely on.

<!-- Reviewable:start -->
---
This change is [<img src="https://reviewable.io/review_button.svg" height="34" align="absmiddle" alt="Reviewable"/>](https://reviewable.io/reviews/servo/servo/21158)
<!-- Reviewable:end -->
This commit is contained in:
bors-servo 2018-07-31 09:00:06 -04:00 committed by GitHub
commit 0051597525
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
199 changed files with 5178 additions and 25 deletions

View file

@ -0,0 +1,310 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::audionode::MAX_CHANNEL_COUNT;
use dom::bindings::cell::DomRefCell;
use dom::bindings::codegen::Bindings::AudioBufferBinding::{self, AudioBufferMethods, AudioBufferOptions};
use dom::bindings::error::{Error, Fallible};
use dom::bindings::num::Finite;
use dom::bindings::reflector::{DomObject, Reflector, reflect_dom_object};
use dom::bindings::root::DomRoot;
use dom::window::Window;
use dom_struct::dom_struct;
use js::jsapi::{DetachDataDisposition, Heap, JSAutoCompartment, JSContext, JSObject};
use js::jsapi::JS_GetArrayBufferViewBuffer;
use js::rust::CustomAutoRooterGuard;
use js::rust::wrappers::JS_DetachArrayBuffer;
use js::typedarray::{CreateWith, Float32Array};
use servo_media::audio::buffer_source_node::AudioBuffer as ServoMediaAudioBuffer;
use std::cmp::min;
use std::ptr::{self, NonNull};
// This range is defined by the spec.
// https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createbuffer
pub const MIN_SAMPLE_RATE: f32 = 8000.;
pub const MAX_SAMPLE_RATE: f32 = 96000.;
type JSAudioChannel = Heap<*mut JSObject>;
#[dom_struct]
pub struct AudioBuffer {
reflector_: Reflector,
js_channels: DomRefCell<Vec<JSAudioChannel>>,
#[ignore_malloc_size_of = "servo_media"]
shared_channels: DomRefCell<ServoMediaAudioBuffer>,
sample_rate: f32,
length: u32,
duration: f64,
number_of_channels: u32,
}
impl AudioBuffer {
#[allow(unrooted_must_root)]
#[allow(unsafe_code)]
pub fn new_inherited(number_of_channels: u32, length: u32, sample_rate: f32) -> AudioBuffer {
let vec = (0..number_of_channels).map(|_| Heap::default()).collect();
AudioBuffer {
reflector_: Reflector::new(),
js_channels: DomRefCell::new(vec),
shared_channels: DomRefCell::new(ServoMediaAudioBuffer::new(
number_of_channels as u8,
length as usize,
)),
sample_rate,
length,
duration: length as f64 / sample_rate as f64,
number_of_channels,
}
}
#[allow(unrooted_must_root)]
pub fn new(
global: &Window,
number_of_channels: u32,
length: u32,
sample_rate: f32,
initial_data: Option<&[f32]>,
) -> DomRoot<AudioBuffer> {
let buffer = AudioBuffer::new_inherited(number_of_channels, length, sample_rate);
let buffer = reflect_dom_object(Box::new(buffer), global, AudioBufferBinding::Wrap);
buffer.set_channels(initial_data);
buffer
}
// https://webaudio.github.io/web-audio-api/#dom-audiobuffer-audiobuffer
pub fn Constructor(
window: &Window,
options: &AudioBufferOptions,
) -> Fallible<DomRoot<AudioBuffer>> {
if options.numberOfChannels > MAX_CHANNEL_COUNT ||
*options.sampleRate < MIN_SAMPLE_RATE ||
*options.sampleRate > MAX_SAMPLE_RATE
{
return Err(Error::NotSupported);
}
Ok(AudioBuffer::new(
window,
options.numberOfChannels,
options.length,
*options.sampleRate,
None,
))
}
#[allow(unsafe_code)]
pub fn set_channels(&self, initial_data: Option<&[f32]>) {
let global = self.global();
let cx = global.get_cx();
let _ac = JSAutoCompartment::new(cx, global.reflector().get_jsobject().get());
let chans = self.js_channels.borrow_mut();
for channel in 0..self.number_of_channels {
rooted!(in (cx) let mut array = ptr::null_mut::<JSObject>());
let offset = (channel * self.length) as usize;
match initial_data {
Some(data) => {
let _ = unsafe {
Float32Array::create(
cx,
CreateWith::Slice(&data[offset..offset + (self.length as usize) - 1]),
array.handle_mut(),
)
};
},
None => {
let _ = unsafe {
Float32Array::create(
cx,
CreateWith::Slice(&vec![0.; self.length as usize]),
array.handle_mut(),
)
};
},
}
chans[channel as usize].set(array.get());
}
}
#[allow(unsafe_code)]
unsafe fn restore_js_channel_data(&self, cx: *mut JSContext) -> bool {
for (i, channel) in self.js_channels.borrow_mut().iter().enumerate() {
if !channel.get().is_null() {
// Already have data in JS array.
continue;
}
// Move the channel data from shared_channels to js_channels.
rooted!(in (cx) let mut array = ptr::null_mut::<JSObject>());
let shared_channel = (*self.shared_channels.borrow_mut()).buffers.remove(i);
if Float32Array::create(cx, CreateWith::Slice(&shared_channel), array.handle_mut())
.is_err()
{
return false;
}
channel.set(array.get());
}
true
}
// https://webaudio.github.io/web-audio-api/#acquire-the-content
#[allow(unsafe_code)]
pub fn acquire_contents(&self) -> Option<ServoMediaAudioBuffer> {
let cx = self.global().get_cx();
for (i, channel) in self.js_channels.borrow_mut().iter().enumerate() {
// Step 1.
if channel.get().is_null() {
return None;
}
// Step 2.
let channel_data = unsafe {
typedarray!(in(cx) let array: Float32Array = channel.get());
if let Ok(array) = array {
let data = array.to_vec();
let mut is_shared = false;
rooted!(in (cx) let view_buffer =
JS_GetArrayBufferViewBuffer(cx, channel.handle(), &mut is_shared));
// This buffer is always created unshared
debug_assert!(!is_shared);
let _ = JS_DetachArrayBuffer(cx, view_buffer.handle(), DetachDataDisposition::KeepData);
data
} else {
return None;
}
};
channel.set(ptr::null_mut());
// Step 3.
(*self.shared_channels.borrow_mut()).buffers[i] = channel_data;
// Step 4 will complete turning shared_channels
// data into js_channels ArrayBuffers in restore_js_channel_data.
}
self.js_channels.borrow_mut().clear();
Some((*self.shared_channels.borrow()).clone())
}
}
impl AudioBufferMethods for AudioBuffer {
// https://webaudio.github.io/web-audio-api/#dom-audiobuffer-samplerate
fn SampleRate(&self) -> Finite<f32> {
Finite::wrap(self.sample_rate)
}
// https://webaudio.github.io/web-audio-api/#dom-audiobuffer-length
fn Length(&self) -> u32 {
self.length
}
// https://webaudio.github.io/web-audio-api/#dom-audiobuffer-duration
fn Duration(&self) -> Finite<f64> {
Finite::wrap(self.duration)
}
// https://webaudio.github.io/web-audio-api/#dom-audiobuffer-numberofchannels
fn NumberOfChannels(&self) -> u32 {
self.number_of_channels
}
// https://webaudio.github.io/web-audio-api/#dom-audiobuffer-getchanneldata
#[allow(unsafe_code)]
unsafe fn GetChannelData(
&self,
cx: *mut JSContext,
channel: u32,
) -> Fallible<NonNull<JSObject>> {
if channel >= self.number_of_channels {
return Err(Error::IndexSize);
}
if !self.restore_js_channel_data(cx) {
return Err(Error::JSFailed);
}
Ok(NonNull::new_unchecked(
self.js_channels.borrow()[channel as usize].get(),
))
}
// https://webaudio.github.io/web-audio-api/#dom-audiobuffer-copyfromchannel
#[allow(unsafe_code)]
fn CopyFromChannel(
&self,
mut destination: CustomAutoRooterGuard<Float32Array>,
channel_number: u32,
start_in_channel: u32,
) -> Fallible<()> {
if channel_number >= self.number_of_channels || start_in_channel > self.length {
return Err(Error::IndexSize);
}
let bytes_to_copy = min(self.length - start_in_channel, destination.len() as u32) as usize;
let cx = self.global().get_cx();
let channel_number = channel_number as usize;
let offset = start_in_channel as usize;
let mut dest = Vec::with_capacity(destination.len());
// We either copy form js_channels or shared_channels.
let js_channel = self.js_channels.borrow()[channel_number].get();
if !js_channel.is_null() {
typedarray!(in(cx) let array: Float32Array = js_channel);
if let Ok(array) = array {
let data = unsafe { array.as_slice() };
dest.extend_from_slice(&data[offset..offset + bytes_to_copy]);
return Ok(());
}
}
if let Some(shared_channel) = self.shared_channels.borrow().buffers.get(channel_number) {
dest.extend_from_slice(&shared_channel.as_slice()[offset..offset + bytes_to_copy]);
}
unsafe {
destination.update(&dest);
}
Ok(())
}
// https://webaudio.github.io/web-audio-api/#dom-audiobuffer-copytochannel
#[allow(unsafe_code)]
fn CopyToChannel(
&self,
source: CustomAutoRooterGuard<Float32Array>,
channel_number: u32,
start_in_channel: u32,
) -> Fallible<()> {
if channel_number >= self.number_of_channels || start_in_channel > (source.len() as u32) {
return Err(Error::IndexSize);
}
let cx = self.global().get_cx();
if unsafe { !self.restore_js_channel_data(cx) } {
return Err(Error::JSFailed);
}
let js_channel = self.js_channels.borrow()[channel_number as usize].get();
if js_channel.is_null() {
// The array buffer was detached.
return Err(Error::IndexSize);
}
typedarray!(in(cx) let array: Float32Array = js_channel);
if let Ok(mut array) = array {
let bytes_to_copy = min(self.length - start_in_channel, source.len() as u32) as usize;
let offset = start_in_channel as usize;
unsafe {
array.update(&source.as_slice()[offset..offset + bytes_to_copy]);
}
} else {
return Err(Error::IndexSize);
}
Ok(())
}
}

View file

@ -0,0 +1,228 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::audiobuffer::AudioBuffer;
use dom::audioparam::AudioParam;
use dom::audioscheduledsourcenode::AudioScheduledSourceNode;
use dom::baseaudiocontext::BaseAudioContext;
use dom::bindings::codegen::Bindings::AudioBufferSourceNodeBinding;
use dom::bindings::codegen::Bindings::AudioBufferSourceNodeBinding::AudioBufferSourceNodeMethods;
use dom::bindings::codegen::Bindings::AudioBufferSourceNodeBinding::AudioBufferSourceOptions;
use dom::bindings::codegen::Bindings::AudioNodeBinding::{ChannelCountMode, ChannelInterpretation};
use dom::bindings::codegen::Bindings::AudioNodeBinding::AudioNodeOptions;
use dom::bindings::codegen::Bindings::AudioParamBinding::AutomationRate;
use dom::bindings::codegen::Bindings::AudioScheduledSourceNodeBinding::AudioScheduledSourceNodeMethods;
use dom::bindings::error::{Error, Fallible};
use dom::bindings::inheritance::Castable;
use dom::bindings::num::Finite;
use dom::bindings::reflector::reflect_dom_object;
use dom::bindings::root::{Dom, DomRoot, MutNullableDom};
use dom::window::Window;
use dom_struct::dom_struct;
use servo_media::audio::buffer_source_node::AudioBufferSourceNodeMessage;
use servo_media::audio::buffer_source_node::AudioBufferSourceNodeOptions;
use servo_media::audio::node::{AudioNodeMessage, AudioNodeInit};
use servo_media::audio::param::ParamType;
use std::cell::Cell;
use std::f32;
#[dom_struct]
pub struct AudioBufferSourceNode {
source_node: AudioScheduledSourceNode,
buffer: MutNullableDom<AudioBuffer>,
playback_rate: Dom<AudioParam>,
detune: Dom<AudioParam>,
loop_enabled: Cell<bool>,
loop_start: Cell<f64>,
loop_end: Cell<f64>,
}
impl AudioBufferSourceNode {
#[allow(unrooted_must_root)]
fn new_inherited(
window: &Window,
context: &BaseAudioContext,
options: &AudioBufferSourceOptions,
) -> AudioBufferSourceNode {
let mut node_options = AudioNodeOptions::empty();
node_options.channelCount = Some(2);
node_options.channelCountMode = Some(ChannelCountMode::Max);
node_options.channelInterpretation = Some(ChannelInterpretation::Speakers);
let source_node = AudioScheduledSourceNode::new_inherited(
AudioNodeInit::AudioBufferSourceNode(options.into()),
context,
&node_options,
0, /* inputs */
1, /* outputs */
);
let node_id = source_node.node().node_id();
let playback_rate = AudioParam::new(
&window,
context,
node_id,
ParamType::PlaybackRate,
AutomationRate::K_rate,
*options.playbackRate,
f32::MIN,
f32::MAX,
);
let detune = AudioParam::new(
&window,
context,
node_id,
ParamType::Detune,
AutomationRate::K_rate,
*options.detune,
f32::MIN,
f32::MAX,
);
AudioBufferSourceNode {
source_node,
buffer: Default::default(),
playback_rate: Dom::from_ref(&playback_rate),
detune: Dom::from_ref(&detune),
loop_enabled: Cell::new(options.loop_),
loop_start: Cell::new(*options.loopStart),
loop_end: Cell::new(*options.loopEnd),
}
}
#[allow(unrooted_must_root)]
pub fn new(
window: &Window,
context: &BaseAudioContext,
options: &AudioBufferSourceOptions,
) -> DomRoot<AudioBufferSourceNode> {
let node = AudioBufferSourceNode::new_inherited(window, context, options);
reflect_dom_object(Box::new(node), window, AudioBufferSourceNodeBinding::Wrap)
}
pub fn Constructor(
window: &Window,
context: &BaseAudioContext,
options: &AudioBufferSourceOptions,
) -> Fallible<DomRoot<AudioBufferSourceNode>> {
Ok(AudioBufferSourceNode::new(window, context, options))
}
}
impl AudioBufferSourceNodeMethods for AudioBufferSourceNode {
// https://webaudio.github.io/web-audio-api/#dom-audiobuffersourcenode-buffer
fn GetBuffer(&self) -> Fallible<Option<DomRoot<AudioBuffer>>> {
Ok(self.buffer.get())
}
// https://webaudio.github.io/web-audio-api/#dom-audiobuffersourcenode-buffer
fn SetBuffer(&self, new_buffer: Option<&AudioBuffer>) -> Fallible<()> {
if new_buffer.is_some() && self.buffer.get().is_some() {
return Err(Error::InvalidState);
}
self.buffer.set(new_buffer);
if self.source_node.started() {
if let Some(buffer) = self.buffer.get() {
let buffer = buffer.acquire_contents();
self.source_node
.node()
.message(AudioNodeMessage::AudioBufferSourceNode(
AudioBufferSourceNodeMessage::SetBuffer(buffer),
));
}
}
Ok(())
}
// https://webaudio.github.io/web-audio-api/#dom-audiobuffersourcenode-playbackrate
fn PlaybackRate(&self) -> DomRoot<AudioParam> {
DomRoot::from_ref(&self.playback_rate)
}
// https://webaudio.github.io/web-audio-api/#dom-audiobuffersourcenode-detune
fn Detune(&self) -> DomRoot<AudioParam> {
DomRoot::from_ref(&self.detune)
}
// https://webaudio.github.io/web-audio-api/#dom-audiobuffersourcenode-loop
fn Loop(&self) -> bool {
self.loop_enabled.get()
}
// https://webaudio.github.io/web-audio-api/#dom-audiobuffersourcenode-loop
fn SetLoop(&self, should_loop: bool) {
self.loop_enabled.set(should_loop);
}
// https://webaudio.github.io/web-audio-api/#dom-audiobuffersourcenode-loopstart
fn LoopStart(&self) -> Finite<f64> {
Finite::wrap(self.loop_start.get())
}
// https://webaudio.github.io/web-audio-api/#dom-audiobuffersourcenode-loopstart
fn SetLoopStart(&self, loop_start: Finite<f64>) {
self.loop_start.set(*loop_start);
}
// https://webaudio.github.io/web-audio-api/#dom-audiobuffersourcenode-loopend
fn LoopEnd(&self) -> Finite<f64> {
Finite::wrap(self.loop_end.get())
}
// https://webaudio.github.io/web-audio-api/#dom-audiobuffersourcenode-loopend
fn SetLoopEnd(&self, loop_end: Finite<f64>) {
self.loop_end.set(*loop_end)
}
// https://webaudio.github.io/web-audio-api/#dom-audiobuffersourcenode-start
fn Start(
&self,
when: Finite<f64>,
offset: Option<Finite<f64>>,
duration: Option<Finite<f64>>,
) -> Fallible<()> {
if *when < 0. {
return Err(Error::Range("'when' must be a positive value".to_owned()));
}
if let Some(offset) = offset {
if *offset < 0. {
return Err(Error::Range("'offset' must be a positive value".to_owned()));
}
}
if let Some(duration) = duration {
if *duration < 0. {
return Err(Error::Range(
"'duration' must be a positive value".to_owned(),
));
}
}
if let Some(buffer) = self.buffer.get() {
let buffer = buffer.acquire_contents();
self.source_node
.node()
.message(AudioNodeMessage::AudioBufferSourceNode(
AudioBufferSourceNodeMessage::SetBuffer(buffer),
));
}
self.source_node
.upcast::<AudioScheduledSourceNode>()
.Start(when)
}
}
impl<'a> From<&'a AudioBufferSourceOptions> for AudioBufferSourceNodeOptions {
fn from(options: &'a AudioBufferSourceOptions) -> Self {
Self {
buffer: None,
detune: *options.detune,
loop_enabled: options.loop_,
loop_end: Some(*options.loopEnd),
loop_start: Some(*options.loopStart),
playback_rate: *options.playbackRate,
}
}
}

View file

@ -0,0 +1,248 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::baseaudiocontext::{BaseAudioContext, BaseAudioContextOptions};
use dom::bindings::codegen::Bindings::AudioContextBinding;
use dom::bindings::codegen::Bindings::AudioContextBinding::{AudioContextLatencyCategory, AudioContextMethods};
use dom::bindings::codegen::Bindings::AudioContextBinding::{AudioContextOptions, AudioTimestamp};
use dom::bindings::codegen::Bindings::BaseAudioContextBinding::AudioContextState;
use dom::bindings::codegen::Bindings::BaseAudioContextBinding::BaseAudioContextBinding::BaseAudioContextMethods;
use dom::bindings::error::{Error, Fallible};
use dom::bindings::inheritance::Castable;
use dom::bindings::num::Finite;
use dom::bindings::refcounted::{Trusted, TrustedPromise};
use dom::bindings::reflector::{DomObject, reflect_dom_object};
use dom::bindings::root::DomRoot;
use dom::promise::Promise;
use dom::window::Window;
use dom_struct::dom_struct;
use servo_media::audio::context::{LatencyCategory, ProcessingState, RealTimeAudioContextOptions};
use std::rc::Rc;
use task_source::TaskSource;
#[dom_struct]
pub struct AudioContext {
context: BaseAudioContext,
latency_hint: AudioContextLatencyCategory,
/// https://webaudio.github.io/web-audio-api/#dom-audiocontext-baselatency
base_latency: f64,
/// https://webaudio.github.io/web-audio-api/#dom-audiocontext-outputlatency
output_latency: f64,
}
impl AudioContext {
#[allow(unrooted_must_root)]
// https://webaudio.github.io/web-audio-api/#AudioContext-constructors
fn new_inherited(options: &AudioContextOptions) -> AudioContext {
// Steps 1-3.
let context = BaseAudioContext::new_inherited(
BaseAudioContextOptions::AudioContext(options.into()),
);
// Step 4.1.
let latency_hint = options.latencyHint;
// Step 4.2. The sample rate is set during the creation of the BaseAudioContext.
// servo-media takes care of setting the default sample rate of the output device
// and of resampling the audio output if needed.
// Steps 5 and 6 of the construction algorithm will happen in `resume`,
// after reflecting dom object.
AudioContext {
context,
latency_hint,
base_latency: 0., // TODO
output_latency: 0., // TODO
}
}
#[allow(unrooted_must_root)]
pub fn new(window: &Window, options: &AudioContextOptions) -> DomRoot<AudioContext> {
let context = AudioContext::new_inherited(options);
let context = reflect_dom_object(Box::new(context), window, AudioContextBinding::Wrap);
context.resume();
context
}
// https://webaudio.github.io/web-audio-api/#AudioContext-constructors
pub fn Constructor(
window: &Window,
options: &AudioContextOptions,
) -> Fallible<DomRoot<AudioContext>> {
Ok(AudioContext::new(window, options))
}
fn resume(&self) {
// Step 5.
if self.context.is_allowed_to_start() {
// Step 6.
self.context.resume();
}
}
}
impl AudioContextMethods for AudioContext {
// https://webaudio.github.io/web-audio-api/#dom-audiocontext-baselatency
fn BaseLatency(&self) -> Finite<f64> {
Finite::wrap(self.base_latency)
}
// https://webaudio.github.io/web-audio-api/#dom-audiocontext-outputlatency
fn OutputLatency(&self) -> Finite<f64> {
Finite::wrap(self.output_latency)
}
// https://webaudio.github.io/web-audio-api/#dom-audiocontext-outputlatency
fn GetOutputTimestamp(&self) -> AudioTimestamp {
// TODO
AudioTimestamp {
contextTime: Some(Finite::wrap(0.)),
performanceTime: Some(Finite::wrap(0.)),
}
}
// https://webaudio.github.io/web-audio-api/#dom-audiocontext-suspend
#[allow(unrooted_must_root)]
fn Suspend(&self) -> Rc<Promise> {
// Step 1.
let promise = Promise::new(&self.global());
// Step 2.
if self.context.control_thread_state() == ProcessingState::Closed {
promise.reject_error(Error::InvalidState);
return promise;
}
// Step 3.
if self.context.State() == AudioContextState::Suspended {
promise.resolve_native(&());
return promise;
}
// Steps 4 and 5.
let window = DomRoot::downcast::<Window>(self.global()).unwrap();
let task_source = window.dom_manipulation_task_source();
let trusted_promise = TrustedPromise::new(promise.clone());
match self.context.audio_context_impl().suspend() {
Ok(_) => {
let base_context = Trusted::new(&self.context);
let context = Trusted::new(self);
let _ = task_source.queue(
task!(suspend_ok: move || {
let base_context = base_context.root();
let context = context.root();
let promise = trusted_promise.root();
promise.resolve_native(&());
if base_context.State() != AudioContextState::Suspended {
base_context.set_state_attribute(AudioContextState::Suspended);
let window = DomRoot::downcast::<Window>(context.global()).unwrap();
window.dom_manipulation_task_source().queue_simple_event(
context.upcast(),
atom!("statechange"),
&window
);
}
}),
window.upcast(),
);
},
Err(_) => {
// The spec does not define the error case and `suspend` should
// never fail, but we handle the case here for completion.
let _ = task_source.queue(
task!(suspend_error: move || {
let promise = trusted_promise.root();
promise.reject_error(Error::Type("Something went wrong".to_owned()));
}),
window.upcast(),
);
},
};
// Step 6.
promise
}
// https://webaudio.github.io/web-audio-api/#dom-audiocontext-close
#[allow(unrooted_must_root)]
fn Close(&self) -> Rc<Promise> {
// Step 1.
let promise = Promise::new(&self.global());
// Step 2.
if self.context.control_thread_state() == ProcessingState::Closed {
promise.reject_error(Error::InvalidState);
return promise;
}
// Step 3.
if self.context.State() == AudioContextState::Closed {
promise.resolve_native(&());
return promise;
}
// Steps 4 and 5.
let window = DomRoot::downcast::<Window>(self.global()).unwrap();
let task_source = window.dom_manipulation_task_source();
let trusted_promise = TrustedPromise::new(promise.clone());
match self.context.audio_context_impl().close() {
Ok(_) => {
let base_context = Trusted::new(&self.context);
let context = Trusted::new(self);
let _ = task_source.queue(
task!(suspend_ok: move || {
let base_context = base_context.root();
let context = context.root();
let promise = trusted_promise.root();
promise.resolve_native(&());
if base_context.State() != AudioContextState::Closed {
base_context.set_state_attribute(AudioContextState::Closed);
let window = DomRoot::downcast::<Window>(context.global()).unwrap();
window.dom_manipulation_task_source().queue_simple_event(
context.upcast(),
atom!("statechange"),
&window
);
}
}),
window.upcast(),
);
},
Err(_) => {
// The spec does not define the error case and `suspend` should
// never fail, but we handle the case here for completion.
let _ = task_source.queue(
task!(suspend_error: move || {
let promise = trusted_promise.root();
promise.reject_error(Error::Type("Something went wrong".to_owned()));
}),
window.upcast(),
);
},
};
// Step 6.
promise
}
}
impl From<AudioContextLatencyCategory> for LatencyCategory {
fn from(category: AudioContextLatencyCategory) -> Self {
match category {
AudioContextLatencyCategory::Balanced => LatencyCategory::Balanced,
AudioContextLatencyCategory::Interactive => LatencyCategory::Interactive,
AudioContextLatencyCategory::Playback => LatencyCategory::Playback,
}
}
}
impl<'a> From<&'a AudioContextOptions> for RealTimeAudioContextOptions {
fn from(options: &AudioContextOptions) -> Self {
Self {
sample_rate: *options.sampleRate.unwrap_or(Finite::wrap(44100.)),
latency_hint: options.latencyHint.into(),
}
}
}

View file

@ -0,0 +1,53 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::audionode::{AudioNode, MAX_CHANNEL_COUNT};
use dom::baseaudiocontext::BaseAudioContext;
use dom::bindings::codegen::Bindings::AudioDestinationNodeBinding::{self, AudioDestinationNodeMethods};
use dom::bindings::codegen::Bindings::AudioNodeBinding::AudioNodeOptions;
use dom::bindings::reflector::reflect_dom_object;
use dom::bindings::root::DomRoot;
use dom::globalscope::GlobalScope;
use dom_struct::dom_struct;
use servo_media::audio::node::AudioNodeInit;
#[dom_struct]
pub struct AudioDestinationNode {
node: AudioNode,
}
impl AudioDestinationNode {
fn new_inherited(
context: &BaseAudioContext,
options: &AudioNodeOptions,
) -> AudioDestinationNode {
AudioDestinationNode {
node: AudioNode::new_inherited(
AudioNodeInit::DestinationNode,
Some(context.destination_node()),
context,
options,
1,
1,
),
}
}
#[allow(unrooted_must_root)]
pub fn new(
global: &GlobalScope,
context: &BaseAudioContext,
options: &AudioNodeOptions,
) -> DomRoot<AudioDestinationNode> {
let node = AudioDestinationNode::new_inherited(context, options);
reflect_dom_object(Box::new(node), global, AudioDestinationNodeBinding::Wrap)
}
}
impl AudioDestinationNodeMethods for AudioDestinationNode {
// https://webaudio.github.io/web-audio-api/#dom-audiodestinationnode-maxchannelcount
fn MaxChannelCount(&self) -> u32 {
MAX_CHANNEL_COUNT
}
}

View file

@ -0,0 +1,285 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::audioparam::AudioParam;
use dom::baseaudiocontext::BaseAudioContext;
use dom::bindings::codegen::Bindings::AudioNodeBinding::{AudioNodeMethods, AudioNodeOptions};
use dom::bindings::codegen::Bindings::AudioNodeBinding::{ChannelCountMode, ChannelInterpretation};
use dom::bindings::codegen::InheritTypes::{AudioNodeTypeId, EventTargetTypeId};
use dom::bindings::error::{Error, ErrorResult, Fallible};
use dom::bindings::inheritance::Castable;
use dom::bindings::root::{Dom, DomRoot};
use dom::eventtarget::EventTarget;
use dom_struct::dom_struct;
use servo_media::audio::graph::NodeId;
use servo_media::audio::node::{AudioNodeMessage, AudioNodeInit};
use servo_media::audio::node::ChannelCountMode as ServoMediaChannelCountMode;
use servo_media::audio::node::ChannelInterpretation as ServoMediaChannelInterpretation;
use std::cell::Cell;
// 32 is the minimum required by the spec for createBuffer() and the deprecated
// createScriptProcessor() and matches what is used by Blink and Gecko.
// The limit protects against large memory allocations.
pub const MAX_CHANNEL_COUNT: u32 = 32;
#[dom_struct]
pub struct AudioNode {
eventtarget: EventTarget,
#[ignore_malloc_size_of = "servo_media"]
node_id: NodeId,
context: Dom<BaseAudioContext>,
number_of_inputs: u32,
number_of_outputs: u32,
channel_count: Cell<u32>,
channel_count_mode: Cell<ChannelCountMode>,
channel_interpretation: Cell<ChannelInterpretation>,
}
impl AudioNode {
pub fn new_inherited(
node_type: AudioNodeInit,
node_id: Option<NodeId>,
context: &BaseAudioContext,
options: &AudioNodeOptions,
number_of_inputs: u32,
number_of_outputs: u32,
) -> AudioNode {
let node_id =
node_id.unwrap_or_else(|| context.audio_context_impl().create_node(node_type));
AudioNode {
eventtarget: EventTarget::new_inherited(),
node_id,
context: Dom::from_ref(context),
number_of_inputs,
number_of_outputs,
channel_count: Cell::new(options.channelCount.unwrap_or(2)),
channel_count_mode: Cell::new(options.channelCountMode.unwrap_or_default()),
channel_interpretation: Cell::new(options.channelInterpretation.unwrap_or_default()),
}
}
pub fn message(&self, message: AudioNodeMessage) {
self.context
.audio_context_impl()
.message_node(self.node_id, message);
}
pub fn node_id(&self) -> NodeId {
self.node_id
}
}
impl AudioNodeMethods for AudioNode {
// https://webaudio.github.io/web-audio-api/#dom-audionode-connect
fn Connect(
&self,
destination: &AudioNode,
output: u32,
input: u32,
) -> Fallible<DomRoot<AudioNode>> {
if self.context != destination.context {
return Err(Error::InvalidAccess);
}
if output >= self.NumberOfOutputs() || input >= destination.NumberOfInputs() {
return Err(Error::IndexSize);
}
// servo-media takes care of ignoring duplicated connections.
self.context.audio_context_impl().connect_ports(
self.node_id().output(output),
destination.node_id().input(input),
);
Ok(DomRoot::from_ref(destination))
}
// https://webaudio.github.io/web-audio-api/#dom-audionode-connect-destinationparam-output
fn Connect_(&self, dest: &AudioParam, output: u32) -> Fallible<()> {
if self.context != dest.context() {
return Err(Error::InvalidAccess);
}
if output >= self.NumberOfOutputs() {
return Err(Error::IndexSize);
}
// servo-media takes care of ignoring duplicated connections.
self.context.audio_context_impl().connect_ports(
self.node_id().output(output),
dest.node_id().param(dest.param_type()),
);
Ok(())
}
// https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect
fn Disconnect(&self) -> ErrorResult {
self.context
.audio_context_impl()
.disconnect_all_from(self.node_id());
Ok(())
}
// https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-output
fn Disconnect_(&self, out: u32) -> ErrorResult {
self.context
.audio_context_impl()
.disconnect_output(self.node_id().output(out));
Ok(())
}
// https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode
fn Disconnect__(&self, to: &AudioNode) -> ErrorResult {
self.context
.audio_context_impl()
.disconnect_between(self.node_id(), to.node_id());
Ok(())
}
// https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode-output
fn Disconnect___(&self, to: &AudioNode, out: u32) -> ErrorResult {
self.context
.audio_context_impl()
.disconnect_output_between(self.node_id().output(out), to.node_id());
Ok(())
}
// https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode-output-input
fn Disconnect____(&self, to: &AudioNode, out: u32, inp: u32) -> ErrorResult {
self.context
.audio_context_impl()
.disconnect_output_between_to(self.node_id().output(out), to.node_id().input(inp));
Ok(())
}
// https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect
fn Disconnect_____(&self, param: &AudioParam) -> ErrorResult {
self.context
.audio_context_impl()
.disconnect_to(self.node_id(),
param.node_id().param(param.param_type()));
Ok(())
}
// https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect
fn Disconnect______(&self, param: &AudioParam, out: u32) -> ErrorResult {
self.context
.audio_context_impl()
.disconnect_output_between_to(self.node_id().output(out),
param.node_id().param(param.param_type()));
Ok(())
}
// https://webaudio.github.io/web-audio-api/#dom-audionode-context
fn Context(&self) -> DomRoot<BaseAudioContext> {
DomRoot::from_ref(&self.context)
}
// https://webaudio.github.io/web-audio-api/#dom-audionode-numberofinputs
fn NumberOfInputs(&self) -> u32 {
self.number_of_inputs
}
// https://webaudio.github.io/web-audio-api/#dom-audionode-numberofoutputs
fn NumberOfOutputs(&self) -> u32 {
self.number_of_outputs
}
// https://webaudio.github.io/web-audio-api/#dom-audionode-channelcount
fn ChannelCount(&self) -> u32 {
self.channel_count.get()
}
// https://webaudio.github.io/web-audio-api/#dom-audionode-channelcount
fn SetChannelCount(&self, value: u32) -> ErrorResult {
match self.upcast::<EventTarget>().type_id() {
EventTargetTypeId::AudioNode(AudioNodeTypeId::AudioDestinationNode) => {
if self.context.is_offline() {
return Err(Error::InvalidState);
} else if value < 1 || value > MAX_CHANNEL_COUNT {
return Err(Error::IndexSize);
}
},
// XXX We do not support any of the other AudioNodes with
// constraints yet. Add more cases here as we add support
// for new AudioNodes.
_ => (),
};
if value == 0 || value > MAX_CHANNEL_COUNT {
return Err(Error::NotSupported);
}
self.channel_count.set(value);
self.message(AudioNodeMessage::SetChannelCount(value as u8));
Ok(())
}
// https://webaudio.github.io/web-audio-api/#dom-audionode-channelcountmode
fn ChannelCountMode(&self) -> ChannelCountMode {
self.channel_count_mode.get()
}
// https://webaudio.github.io/web-audio-api/#dom-audionode-channelcountmode
fn SetChannelCountMode(&self, value: ChannelCountMode) -> ErrorResult {
// Channel count mode has no effect for nodes with no inputs.
if self.number_of_inputs == 0 {
return Ok(());
}
match self.upcast::<EventTarget>().type_id() {
EventTargetTypeId::AudioNode(AudioNodeTypeId::AudioDestinationNode) => {
if self.context.is_offline() {
return Err(Error::InvalidState);
}
},
// XXX We do not support any of the other AudioNodes with
// constraints yet. Add more cases here as we add support
// for new AudioNodes.
_ => (),
};
self.channel_count_mode.set(value);
self.message(AudioNodeMessage::SetChannelMode(value.into()));
Ok(())
}
// https://webaudio.github.io/web-audio-api/#dom-audionode-channelinterpretation
fn ChannelInterpretation(&self) -> ChannelInterpretation {
self.channel_interpretation.get()
}
// https://webaudio.github.io/web-audio-api/#dom-audionode-channelinterpretation
fn SetChannelInterpretation(&self, value: ChannelInterpretation) {
// Channel interpretation mode has no effect for nodes with no inputs.
if self.number_of_inputs == 0 {
return;
}
self.channel_interpretation.set(value);
self.message(AudioNodeMessage::SetChannelInterpretation(value.into()));
}
}
impl From<ChannelCountMode> for ServoMediaChannelCountMode {
fn from(mode: ChannelCountMode) -> Self {
match mode {
ChannelCountMode::Max => ServoMediaChannelCountMode::Max,
ChannelCountMode::Clamped_max => ServoMediaChannelCountMode::ClampedMax,
ChannelCountMode::Explicit => ServoMediaChannelCountMode::Explicit,
}
}
}
impl From<ChannelInterpretation> for ServoMediaChannelInterpretation {
fn from(interpretation: ChannelInterpretation) -> Self {
match interpretation {
ChannelInterpretation::Discrete => ServoMediaChannelInterpretation::Discrete,
ChannelInterpretation::Speakers => ServoMediaChannelInterpretation::Speakers,
}
}
}

View file

@ -0,0 +1,228 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::baseaudiocontext::BaseAudioContext;
use dom::bindings::codegen::Bindings::AudioParamBinding;
use dom::bindings::codegen::Bindings::AudioParamBinding::{AudioParamMethods, AutomationRate};
use dom::bindings::num::Finite;
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::bindings::root::{Dom, DomRoot};
use dom::window::Window;
use dom_struct::dom_struct;
use servo_media::audio::graph::NodeId;
use servo_media::audio::node::AudioNodeMessage;
use servo_media::audio::param::{ParamRate, ParamType, RampKind, UserAutomationEvent};
use std::cell::Cell;
use std::sync::mpsc;
#[dom_struct]
pub struct AudioParam {
reflector_: Reflector,
context: Dom<BaseAudioContext>,
#[ignore_malloc_size_of = "servo_media"]
node: NodeId,
#[ignore_malloc_size_of = "servo_media"]
param: ParamType,
automation_rate: Cell<AutomationRate>,
default_value: f32,
min_value: f32,
max_value: f32,
}
impl AudioParam {
pub fn new_inherited(
context: &BaseAudioContext,
node: NodeId,
param: ParamType,
automation_rate: AutomationRate,
default_value: f32,
min_value: f32,
max_value: f32,
) -> AudioParam {
AudioParam {
reflector_: Reflector::new(),
context: Dom::from_ref(context),
node,
param,
automation_rate: Cell::new(automation_rate),
default_value,
min_value,
max_value,
}
}
#[allow(unrooted_must_root)]
pub fn new(
window: &Window,
context: &BaseAudioContext,
node: NodeId,
param: ParamType,
automation_rate: AutomationRate,
default_value: f32,
min_value: f32,
max_value: f32,
) -> DomRoot<AudioParam> {
let audio_param = AudioParam::new_inherited(
context,
node,
param,
automation_rate,
default_value,
min_value,
max_value,
);
reflect_dom_object(Box::new(audio_param), window, AudioParamBinding::Wrap)
}
fn message_node(&self, message: AudioNodeMessage) {
self.context.audio_context_impl().message_node(self.node, message);
}
pub fn context(&self) -> &BaseAudioContext {
&self.context
}
pub fn node_id(&self) -> NodeId {
self.node
}
pub fn param_type(&self) -> ParamType {
self.param
}
}
impl AudioParamMethods for AudioParam {
// https://webaudio.github.io/web-audio-api/#dom-audioparam-automationrate
fn AutomationRate(&self) -> AutomationRate {
self.automation_rate.get()
}
// https://webaudio.github.io/web-audio-api/#dom-audioparam-automationrate
fn SetAutomationRate(&self, automation_rate: AutomationRate) {
self.automation_rate.set(automation_rate);
self.message_node(
AudioNodeMessage::SetParamRate(self.param, automation_rate.into())
);
}
// https://webaudio.github.io/web-audio-api/#dom-audioparam-value
fn Value(&self) -> Finite<f32> {
let (tx, rx) = mpsc::channel();
self.message_node(
AudioNodeMessage::GetParamValue(self.param, tx)
);
Finite::wrap(rx.recv().unwrap())
}
// https://webaudio.github.io/web-audio-api/#dom-audioparam-value
fn SetValue(&self, value: Finite<f32>) {
self.message_node(
AudioNodeMessage::SetParam(self.param, UserAutomationEvent::SetValue(*value)),
);
}
// https://webaudio.github.io/web-audio-api/#dom-audioparam-defaultvalue
fn DefaultValue(&self) -> Finite<f32> {
Finite::wrap(self.default_value)
}
// https://webaudio.github.io/web-audio-api/#dom-audioparam-minvalue
fn MinValue(&self) -> Finite<f32> {
Finite::wrap(self.min_value)
}
// https://webaudio.github.io/web-audio-api/#dom-audioparam-maxvalue
fn MaxValue(&self) -> Finite<f32> {
Finite::wrap(self.max_value)
}
// https://webaudio.github.io/web-audio-api/#dom-audioparam-setvalueattime
fn SetValueAtTime(&self, value: Finite<f32>, start_time: Finite<f64>) -> DomRoot<AudioParam> {
self.message_node(
AudioNodeMessage::SetParam(
self.param,
UserAutomationEvent::SetValueAtTime(*value, *start_time),
)
);
DomRoot::from_ref(self)
}
// https://webaudio.github.io/web-audio-api/#dom-audioparam-linearramptovalueattime
fn LinearRampToValueAtTime(
&self,
value: Finite<f32>,
end_time: Finite<f64>,
) -> DomRoot<AudioParam> {
self.message_node(
AudioNodeMessage::SetParam(
self.param,
UserAutomationEvent::RampToValueAtTime(RampKind::Linear, *value, *end_time),
),
);
DomRoot::from_ref(self)
}
// https://webaudio.github.io/web-audio-api/#dom-audioparam-exponentialramptovalueattime
fn ExponentialRampToValueAtTime(
&self,
value: Finite<f32>,
end_time: Finite<f64>,
) -> DomRoot<AudioParam> {
self.message_node(
AudioNodeMessage::SetParam(
self.param,
UserAutomationEvent::RampToValueAtTime(RampKind::Exponential, *value, *end_time),
),
);
DomRoot::from_ref(self)
}
// https://webaudio.github.io/web-audio-api/#dom-audioparam-settargetattime
fn SetTargetAtTime(
&self,
target: Finite<f32>,
start_time: Finite<f64>,
time_constant: Finite<f32>,
) -> DomRoot<AudioParam> {
self.message_node(
AudioNodeMessage::SetParam(
self.param,
UserAutomationEvent::SetTargetAtTime(*target, *start_time, (*time_constant).into()),
),
);
DomRoot::from_ref(self)
}
// https://webaudio.github.io/web-audio-api/#dom-audioparam-cancelscheduledvalues
fn CancelScheduledValues(&self, cancel_time: Finite<f64>) -> DomRoot<AudioParam> {
self.message_node(
AudioNodeMessage::SetParam(
self.param,
UserAutomationEvent::CancelScheduledValues(*cancel_time),
),
);
DomRoot::from_ref(self)
}
// https://webaudio.github.io/web-audio-api/#dom-audioparam-cancelandholdattime
fn CancelAndHoldAtTime(&self, cancel_time: Finite<f64>) -> DomRoot<AudioParam> {
self.message_node(
AudioNodeMessage::SetParam(
self.param,
UserAutomationEvent::CancelAndHoldAtTime(*cancel_time),
),
);
DomRoot::from_ref(self)
}
}
// https://webaudio.github.io/web-audio-api/#enumdef-automationrate
impl From<AutomationRate> for ParamRate {
fn from(rate: AutomationRate) -> Self {
match rate {
AutomationRate::A_rate => ParamRate::ARate,
AutomationRate::K_rate => ParamRate::KRate,
}
}
}

View file

@ -0,0 +1,112 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::audionode::AudioNode;
use dom::baseaudiocontext::BaseAudioContext;
use dom::bindings::codegen::Bindings::AudioNodeBinding::AudioNodeOptions;
use dom::bindings::codegen::Bindings::AudioScheduledSourceNodeBinding::AudioScheduledSourceNodeMethods;
use dom::bindings::error::{Error, Fallible};
use dom::bindings::inheritance::Castable;
use dom::bindings::num::Finite;
use dom::bindings::refcounted::Trusted;
use dom::bindings::reflector::DomObject;
use dom_struct::dom_struct;
use servo_media::audio::node::{AudioNodeMessage, AudioNodeInit, AudioScheduledSourceNodeMessage};
use servo_media::audio::node::OnEndedCallback;
use std::cell::Cell;
use task_source::{TaskSource, TaskSourceName};
#[dom_struct]
pub struct AudioScheduledSourceNode {
node: AudioNode,
started: Cell<bool>,
stopped: Cell<bool>,
}
impl AudioScheduledSourceNode {
pub fn new_inherited(
node_type: AudioNodeInit,
context: &BaseAudioContext,
options: &AudioNodeOptions,
number_of_inputs: u32,
number_of_outputs: u32,
) -> AudioScheduledSourceNode {
AudioScheduledSourceNode {
node: AudioNode::new_inherited(
node_type,
None, /* node_id */
context,
options,
number_of_inputs,
number_of_outputs,
),
started: Cell::new(false),
stopped: Cell::new(false),
}
}
pub fn node(&self) -> &AudioNode {
&self.node
}
pub fn started(&self) -> bool {
self.started.get()
}
}
impl AudioScheduledSourceNodeMethods for AudioScheduledSourceNode {
// https://webaudio.github.io/web-audio-api/#dom-audioscheduledsourcenode-onended
event_handler!(ended, GetOnended, SetOnended);
// https://webaudio.github.io/web-audio-api/#dom-audioscheduledsourcenode-start
fn Start(&self, when: Finite<f64>) -> Fallible<()> {
if self.started.get() || self.stopped.get() {
return Err(Error::InvalidState);
}
let this = Trusted::new(self);
let global = self.global();
let window = global.as_window();
let task_source = window.dom_manipulation_task_source();
let canceller = window.task_canceller(TaskSourceName::DOMManipulation);
let callback = OnEndedCallback::new(move || {
let _ = task_source.queue_with_canceller(
task!(ended: move || {
let this = this.root();
let global = this.global();
let window = global.as_window();
window.dom_manipulation_task_source().queue_simple_event(
this.upcast(),
atom!("ended"),
&window
);
}),
&canceller,
);
});
self.node().message(
AudioNodeMessage::AudioScheduledSourceNode(
AudioScheduledSourceNodeMessage::RegisterOnEndedCallback(callback)));
self.started.set(true);
self.node
.message(AudioNodeMessage::AudioScheduledSourceNode(
AudioScheduledSourceNodeMessage::Start(*when),
));
Ok(())
}
// https://webaudio.github.io/web-audio-api/#dom-audioscheduledsourcenode-stop
fn Stop(&self, when: Finite<f64>) -> Fallible<()> {
if !self.started.get() {
return Err(Error::InvalidState);
}
self.stopped.set(true);
self.node
.message(AudioNodeMessage::AudioScheduledSourceNode(
AudioScheduledSourceNodeMessage::Stop(*when),
));
Ok(())
}
}

View file

@ -0,0 +1,463 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::audiobuffer::AudioBuffer;
use dom::audiobuffersourcenode::AudioBufferSourceNode;
use dom::audiodestinationnode::AudioDestinationNode;
use dom::audionode::MAX_CHANNEL_COUNT;
use dom::bindings::callback::ExceptionHandling;
use dom::bindings::cell::DomRefCell;
use dom::bindings::codegen::Bindings::AudioBufferSourceNodeBinding::AudioBufferSourceOptions;
use dom::bindings::codegen::Bindings::AudioNodeBinding::{ChannelCountMode, ChannelInterpretation};
use dom::bindings::codegen::Bindings::AudioNodeBinding::AudioNodeOptions;
use dom::bindings::codegen::Bindings::BaseAudioContextBinding::AudioContextState;
use dom::bindings::codegen::Bindings::BaseAudioContextBinding::BaseAudioContextMethods;
use dom::bindings::codegen::Bindings::BaseAudioContextBinding::DecodeErrorCallback;
use dom::bindings::codegen::Bindings::BaseAudioContextBinding::DecodeSuccessCallback;
use dom::bindings::codegen::Bindings::GainNodeBinding::GainOptions;
use dom::bindings::codegen::Bindings::OscillatorNodeBinding::OscillatorOptions;
use dom::bindings::error::{Error, ErrorResult, Fallible};
use dom::bindings::inheritance::Castable;
use dom::bindings::num::Finite;
use dom::bindings::refcounted::Trusted;
use dom::bindings::reflector::DomObject;
use dom::bindings::root::{DomRoot, MutNullableDom};
use dom::domexception::{DOMErrorName, DOMException};
use dom::eventtarget::EventTarget;
use dom::gainnode::GainNode;
use dom::oscillatornode::OscillatorNode;
use dom::promise::Promise;
use dom::window::Window;
use dom_struct::dom_struct;
use js::rust::CustomAutoRooterGuard;
use js::typedarray::ArrayBuffer;
use servo_media::{Backend, ServoMedia};
use servo_media::audio::context::{AudioContext, AudioContextOptions, ProcessingState};
use servo_media::audio::context::{OfflineAudioContextOptions, RealTimeAudioContextOptions};
use servo_media::audio::decoder::AudioDecoderCallbacks;
use servo_media::audio::graph::NodeId;
use std::cell::Cell;
use std::collections::{HashMap, VecDeque};
use std::mem;
use std::rc::Rc;
use std::sync::{Arc, Mutex};
use task_source::{TaskSource, TaskSourceName};
use uuid::Uuid;
#[allow(dead_code)]
pub enum BaseAudioContextOptions {
AudioContext(RealTimeAudioContextOptions),
OfflineAudioContext(OfflineAudioContextOptions),
}
#[must_root]
#[derive(JSTraceable)]
struct DecodeResolver {
pub promise: Rc<Promise>,
pub success_callback: Option<Rc<DecodeSuccessCallback>>,
pub error_callback: Option<Rc<DecodeErrorCallback>>,
}
#[dom_struct]
pub struct BaseAudioContext {
eventtarget: EventTarget,
#[ignore_malloc_size_of = "servo_media"]
audio_context_impl: Rc<AudioContext<Backend>>,
/// https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-destination
destination: MutNullableDom<AudioDestinationNode>,
/// Resume promises which are soon to be fulfilled by a queued task.
#[ignore_malloc_size_of = "promises are hard"]
in_flight_resume_promises_queue: DomRefCell<VecDeque<(Box<[Rc<Promise>]>, ErrorResult)>>,
/// https://webaudio.github.io/web-audio-api/#pendingresumepromises
#[ignore_malloc_size_of = "promises are hard"]
pending_resume_promises: DomRefCell<Vec<Rc<Promise>>>,
#[ignore_malloc_size_of = "promises are hard"]
decode_resolvers: DomRefCell<HashMap<String, DecodeResolver>>,
/// https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-samplerate
sample_rate: f32,
/// https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-state
/// Although servo-media already keeps track of the control thread state,
/// we keep a state flag here as well. This is so that we can synchronously
/// throw when trying to do things on the context when the context has just
/// been "closed()".
state: Cell<AudioContextState>,
channel_count: u32,
}
impl BaseAudioContext {
#[allow(unrooted_must_root)]
pub fn new_inherited(options: BaseAudioContextOptions) -> BaseAudioContext {
let (sample_rate, channel_count) = match options {
BaseAudioContextOptions::AudioContext(ref opt) => (opt.sample_rate, 2),
BaseAudioContextOptions::OfflineAudioContext(ref opt) => (opt.sample_rate, opt.channels),
};
let context = BaseAudioContext {
eventtarget: EventTarget::new_inherited(),
audio_context_impl: Rc::new(
ServoMedia::get()
.unwrap()
.create_audio_context(options.into()),
),
destination: Default::default(),
in_flight_resume_promises_queue: Default::default(),
pending_resume_promises: Default::default(),
decode_resolvers: Default::default(),
sample_rate,
state: Cell::new(AudioContextState::Suspended),
channel_count: channel_count.into(),
};
context
}
/// Tells whether this is an OfflineAudioContext or not.
pub fn is_offline(&self) -> bool {
false
}
pub fn audio_context_impl(&self) -> Rc<AudioContext<Backend>> {
self.audio_context_impl.clone()
}
pub fn destination_node(&self) -> NodeId {
self.audio_context_impl.dest_node()
}
// https://webaudio.github.io/web-audio-api/#allowed-to-start
pub fn is_allowed_to_start(&self) -> bool {
self.state.get() == AudioContextState::Suspended
}
#[allow(unrooted_must_root)]
fn push_pending_resume_promise(&self, promise: &Rc<Promise>) {
self.pending_resume_promises
.borrow_mut()
.push(promise.clone());
}
/// Takes the pending resume promises.
///
/// The result with which these promises will be fulfilled is passed here
/// and this method returns nothing because we actually just move the
/// current list of pending resume promises to the
/// `in_flight_resume_promises_queue` field.
///
/// Each call to this method must be followed by a call to
/// `fulfill_in_flight_resume_promises`, to actually fulfill the promises
/// which were taken and moved to the in-flight queue.
#[allow(unrooted_must_root)]
fn take_pending_resume_promises(&self, result: ErrorResult) {
let pending_resume_promises =
mem::replace(&mut *self.pending_resume_promises.borrow_mut(), vec![]);
self.in_flight_resume_promises_queue
.borrow_mut()
.push_back((pending_resume_promises.into(), result));
}
/// Fulfills the next in-flight resume promises queue after running a closure.
///
/// See the comment on `take_pending_resume_promises` for why this method
/// does not take a list of promises to fulfill. Callers cannot just pop
/// the front list off of `in_flight_resume_promises_queue` and later fulfill
/// the promises because that would mean putting
/// `#[allow(unrooted_must_root)]` on even more functions, potentially
/// hiding actual safety bugs.
#[allow(unrooted_must_root)]
fn fulfill_in_flight_resume_promises<F>(&self, f: F)
where
F: FnOnce(),
{
let (promises, result) = self
.in_flight_resume_promises_queue
.borrow_mut()
.pop_front()
.expect("there should be at least one list of in flight resume promises");
f();
for promise in &*promises {
match result {
Ok(ref value) => promise.resolve_native(value),
Err(ref error) => promise.reject_error(error.clone()),
}
}
}
/// Control thread processing state
pub fn control_thread_state(&self) -> ProcessingState {
self.audio_context_impl.state()
}
/// Set audio context state
pub fn set_state_attribute(&self, state: AudioContextState) {
self.state.set(state);
}
pub fn resume(&self) {
let global = self.global();
let window = global.as_window();
let task_source = window.dom_manipulation_task_source();
let this = Trusted::new(self);
// Set the rendering thread state to 'running' and start
// rendering the audio graph.
match self.audio_context_impl.resume() {
Ok(()) => {
self.take_pending_resume_promises(Ok(()));
let _ = task_source.queue(
task!(resume_success: move || {
let this = this.root();
this.fulfill_in_flight_resume_promises(|| {
if this.state.get() != AudioContextState::Running {
this.state.set(AudioContextState::Running);
let window = DomRoot::downcast::<Window>(this.global()).unwrap();
window.dom_manipulation_task_source().queue_simple_event(
this.upcast(),
atom!("statechange"),
&window
);
}
});
}),
window.upcast(),
);
},
Err(()) => {
self.take_pending_resume_promises(Err(Error::Type(
"Something went wrong".to_owned(),
)));
let _ = task_source.queue(
task!(resume_error: move || {
this.root().fulfill_in_flight_resume_promises(|| {})
}),
window.upcast(),
);
},
}
}
}
impl BaseAudioContextMethods for BaseAudioContext {
/// https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-samplerate
fn SampleRate(&self) -> Finite<f32> {
Finite::wrap(self.sample_rate)
}
/// https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-currenttime
fn CurrentTime(&self) -> Finite<f64> {
let current_time = self.audio_context_impl.current_time();
Finite::wrap(current_time)
}
/// https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-state
fn State(&self) -> AudioContextState {
self.state.get()
}
/// https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-resume
#[allow(unrooted_must_root)]
fn Resume(&self) -> Rc<Promise> {
// Step 1.
let promise = Promise::new(&self.global());
// Step 2.
if self.audio_context_impl.state() == ProcessingState::Closed {
promise.reject_error(Error::InvalidState);
return promise;
}
// Step 3.
if self.state.get() == AudioContextState::Running {
promise.resolve_native(&());
return promise;
}
self.push_pending_resume_promise(&promise);
// Step 4.
if !self.is_allowed_to_start() {
return promise;
}
// Steps 5 and 6.
self.resume();
// Step 7.
promise
}
/// https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-destination
fn Destination(&self) -> DomRoot<AudioDestinationNode> {
let global = self.global();
self.destination.or_init(|| {
let mut options = AudioNodeOptions::empty();
options.channelCount = Some(self.channel_count);
options.channelCountMode = Some(ChannelCountMode::Explicit);
options.channelInterpretation = Some(ChannelInterpretation::Speakers);
AudioDestinationNode::new(&global, self, &options)
})
}
/// https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-onstatechange
event_handler!(statechange, GetOnstatechange, SetOnstatechange);
/// https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createoscillator
fn CreateOscillator(&self) -> DomRoot<OscillatorNode> {
OscillatorNode::new(
&self.global().as_window(),
&self,
&OscillatorOptions::empty(),
)
}
/// https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-creategain
fn CreateGain(&self) -> DomRoot<GainNode> {
GainNode::new(&self.global().as_window(), &self, &GainOptions::empty())
}
/// https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createbuffer
fn CreateBuffer(
&self,
number_of_channels: u32,
length: u32,
sample_rate: Finite<f32>,
) -> Fallible<DomRoot<AudioBuffer>> {
if number_of_channels <= 0 ||
number_of_channels > MAX_CHANNEL_COUNT ||
length <= 0 ||
*sample_rate <= 0.
{
return Err(Error::NotSupported);
}
Ok(AudioBuffer::new(
&self.global().as_window(),
number_of_channels,
length,
*sample_rate,
None,
))
}
// https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createbuffersource
fn CreateBufferSource(&self) -> DomRoot<AudioBufferSourceNode> {
AudioBufferSourceNode::new(
&self.global().as_window(),
&self,
&AudioBufferSourceOptions::empty(),
)
}
// https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-decodeaudiodata
#[allow(unrooted_must_root)]
fn DecodeAudioData(
&self,
audio_data: CustomAutoRooterGuard<ArrayBuffer>,
decode_success_callback: Option<Rc<DecodeSuccessCallback>>,
decode_error_callback: Option<Rc<DecodeErrorCallback>>,
) -> Rc<Promise> {
// Step 1.
let promise = Promise::new(&self.global());
let global = self.global();
let window = global.as_window();
if audio_data.len() > 0 {
// Step 2.
// XXX detach array buffer.
let uuid = Uuid::new_v4().simple().to_string();
let uuid_ = uuid.clone();
self.decode_resolvers.borrow_mut().insert(
uuid.clone(),
DecodeResolver {
promise: promise.clone(),
success_callback: decode_success_callback,
error_callback: decode_error_callback,
},
);
let audio_data = audio_data.to_vec();
let decoded_audio = Arc::new(Mutex::new(Vec::new()));
let decoded_audio_ = decoded_audio.clone();
let this = Trusted::new(self);
let this_ = this.clone();
let task_source = window.dom_manipulation_task_source();
let task_source_ = window.dom_manipulation_task_source();
let canceller = window.task_canceller(TaskSourceName::DOMManipulation);
let canceller_ = window.task_canceller(TaskSourceName::DOMManipulation);
let callbacks = AudioDecoderCallbacks::new()
.eos(move || {
let _ = task_source.queue_with_canceller(
task!(audio_decode_eos: move || {
let this = this.root();
let decoded_audio = decoded_audio.lock().unwrap();
let buffer = AudioBuffer::new(
&this.global().as_window(),
1, // XXX servo-media should provide this info
decoded_audio.len() as u32,
this.sample_rate,
Some(decoded_audio.as_slice()));
let mut resolvers = this.decode_resolvers.borrow_mut();
assert!(resolvers.contains_key(&uuid_));
let resolver = resolvers.remove(&uuid_).unwrap();
if let Some(callback) = resolver.success_callback {
let _ = callback.Call__(&buffer, ExceptionHandling::Report);
}
resolver.promise.resolve_native(&buffer);
}),
&canceller,
);
})
.error(move || {
let _ = task_source_.queue_with_canceller(
task!(audio_decode_eos: move || {
let this = this_.root();
let mut resolvers = this.decode_resolvers.borrow_mut();
assert!(resolvers.contains_key(&uuid));
let resolver = resolvers.remove(&uuid).unwrap();
if let Some(callback) = resolver.error_callback {
let _ = callback.Call__(
&DOMException::new(&this.global(), DOMErrorName::DataCloneError),
ExceptionHandling::Report);
}
resolver.promise.reject_error(Error::Type("Audio decode error".to_owned()));
}),
&canceller_,
);
})
.progress(move |buffer| {
decoded_audio_
.lock()
.unwrap()
.extend_from_slice((*buffer).as_ref());
})
.build();
self.audio_context_impl
.decode_audio_data(audio_data, callbacks);
} else {
// Step 3.
promise.reject_error(Error::DataClone);
return promise;
}
// Step 4.
promise
}
}
impl From<BaseAudioContextOptions> for AudioContextOptions {
fn from(options: BaseAudioContextOptions) -> Self {
match options {
BaseAudioContextOptions::AudioContext(options) =>
AudioContextOptions::RealTimeAudioContext(options),
BaseAudioContextOptions::OfflineAudioContext(options) =>
AudioContextOptions::OfflineAudioContext(options),
}
}
}
impl From<ProcessingState> for AudioContextState {
fn from(state: ProcessingState) -> Self {
match state {
ProcessingState::Suspended => AudioContextState::Suspended,
ProcessingState::Running => AudioContextState::Running,
ProcessingState::Closed => AudioContextState::Closed,
}
}
}

View file

@ -423,6 +423,13 @@ impl<T> PartialEq for Dom<T> {
}
}
impl<'a, T: DomObject> PartialEq<&'a T> for Dom<T> {
fn eq(&self, other: &&'a T) -> bool {
*self == Dom::from_ref(*other)
}
}
impl<T> Eq for Dom<T> {}
impl<T> PartialEq for LayoutDom<T> {

View file

@ -90,6 +90,11 @@ use selectors::matching::ElementSelectorFlags;
use serde::{Deserialize, Serialize};
use servo_arc::Arc as ServoArc;
use servo_atoms::Atom;
use servo_media::Backend;
use servo_media::audio::buffer_source_node::AudioBuffer;
use servo_media::audio::context::AudioContext;
use servo_media::audio::graph::NodeId;
use servo_media::audio::param::ParamType;
use servo_url::{ImmutableOrigin, MutableOrigin, ServoUrl};
use smallvec::SmallVec;
use std::cell::{Cell, RefCell, UnsafeCell};
@ -429,6 +434,10 @@ unsafe_no_jsmanaged_fields!(InteractiveMetrics);
unsafe_no_jsmanaged_fields!(InteractiveWindow);
unsafe_no_jsmanaged_fields!(CanvasId);
unsafe_no_jsmanaged_fields!(SourceSet);
unsafe_no_jsmanaged_fields!(AudioBuffer);
unsafe_no_jsmanaged_fields!(AudioContext<Backend>);
unsafe_no_jsmanaged_fields!(NodeId);
unsafe_no_jsmanaged_fields!(ParamType);
unsafe impl<'a> JSTraceable for &'a str {
#[inline]

View file

@ -0,0 +1,95 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::audionode::AudioNode;
use dom::audioparam::AudioParam;
use dom::baseaudiocontext::BaseAudioContext;
use dom::bindings::codegen::Bindings::AudioNodeBinding::{ChannelCountMode, ChannelInterpretation};
use dom::bindings::codegen::Bindings::AudioNodeBinding::AudioNodeOptions;
use dom::bindings::codegen::Bindings::AudioParamBinding::AutomationRate;
use dom::bindings::codegen::Bindings::GainNodeBinding::{self, GainNodeMethods, GainOptions};
use dom::bindings::error::Fallible;
use dom::bindings::reflector::reflect_dom_object;
use dom::bindings::root::{Dom, DomRoot};
use dom::window::Window;
use dom_struct::dom_struct;
use servo_media::audio::gain_node::GainNodeOptions;
use servo_media::audio::node::AudioNodeInit;
use servo_media::audio::param::ParamType;
use std::f32;
#[dom_struct]
pub struct GainNode {
node: AudioNode,
gain: Dom<AudioParam>,
}
impl GainNode {
#[allow(unrooted_must_root)]
pub fn new_inherited(
window: &Window,
context: &BaseAudioContext,
gain_options: &GainOptions,
) -> GainNode {
let mut node_options = AudioNodeOptions::empty();
node_options.channelCount = Some(2);
node_options.channelCountMode = Some(ChannelCountMode::Max);
node_options.channelInterpretation = Some(ChannelInterpretation::Speakers);
let node = AudioNode::new_inherited(
AudioNodeInit::GainNode(gain_options.into()),
None,
context,
&node_options,
1, // inputs
1, // outputs
);
let gain = AudioParam::new(
window,
context,
node.node_id(),
ParamType::Gain,
AutomationRate::A_rate,
1., // default value
f32::MIN, // min value
f32::MAX, // max value
);
GainNode {
node,
gain: Dom::from_ref(&gain),
}
}
#[allow(unrooted_must_root)]
pub fn new(
window: &Window,
context: &BaseAudioContext,
options: &GainOptions,
) -> DomRoot<GainNode> {
let node = GainNode::new_inherited(window, context, options);
reflect_dom_object(Box::new(node), window, GainNodeBinding::Wrap)
}
pub fn Constructor(
window: &Window,
context: &BaseAudioContext,
options: &GainOptions,
) -> Fallible<DomRoot<GainNode>> {
Ok(GainNode::new(window, context, options))
}
}
impl GainNodeMethods for GainNode {
// https://webaudio.github.io/web-audio-api/#dom-gainnode-gain
fn Gain(&self) -> DomRoot<AudioParam> {
DomRoot::from_ref(&self.gain)
}
}
impl<'a> From<&'a GainOptions> for GainNodeOptions {
fn from(options: &'a GainOptions) -> Self {
Self {
gain: *options.gain,
}
}
}

View file

@ -629,3 +629,4 @@ macro_rules! handle_potential_webgl_error {
handle_potential_webgl_error!($context, $call, ());
};
}

View file

@ -216,6 +216,14 @@ pub mod abstractworker;
pub mod abstractworkerglobalscope;
pub mod activation;
pub mod attr;
pub mod audiobuffer;
pub mod audiobuffersourcenode;
pub mod audiocontext;
pub mod audiodestinationnode;
pub mod audionode;
pub mod audioparam;
pub mod audioscheduledsourcenode;
pub mod baseaudiocontext;
pub mod beforeunloadevent;
pub mod bindings;
pub mod blob;
@ -290,6 +298,7 @@ pub mod filereader;
pub mod filereadersync;
pub mod focusevent;
pub mod formdata;
pub mod gainnode;
pub mod gamepad;
pub mod gamepadbutton;
pub mod gamepadbuttonlist;
@ -392,6 +401,9 @@ pub mod navigatorinfo;
pub mod node;
pub mod nodeiterator;
pub mod nodelist;
pub mod offlineaudiocompletionevent;
pub mod offlineaudiocontext;
pub mod oscillatornode;
pub mod pagetransitionevent;
pub mod paintrenderingcontext2d;
pub mod paintsize;

View file

@ -0,0 +1,77 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::audiobuffer::AudioBuffer;
use dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use dom::bindings::codegen::Bindings::OfflineAudioCompletionEventBinding;
use dom::bindings::codegen::Bindings::OfflineAudioCompletionEventBinding::OfflineAudioCompletionEventInit;
use dom::bindings::codegen::Bindings::OfflineAudioCompletionEventBinding::OfflineAudioCompletionEventMethods;
use dom::bindings::error::Fallible;
use dom::bindings::inheritance::Castable;
use dom::bindings::reflector::reflect_dom_object;
use dom::bindings::root::{Dom, DomRoot, RootedReference};
use dom::bindings::str::DOMString;
use dom::event::{Event, EventBubbles, EventCancelable};
use dom::window::Window;
use dom_struct::dom_struct;
use servo_atoms::Atom;
#[dom_struct]
pub struct OfflineAudioCompletionEvent {
event: Event,
rendered_buffer: Dom<AudioBuffer>,
}
impl OfflineAudioCompletionEvent {
pub fn new_inherited(rendered_buffer: &AudioBuffer) -> OfflineAudioCompletionEvent {
OfflineAudioCompletionEvent {
event: Event::new_inherited(),
rendered_buffer: Dom::from_ref(rendered_buffer),
}
}
pub fn new(
window: &Window,
type_: Atom,
bubbles: EventBubbles,
cancelable: EventCancelable,
rendered_buffer: &AudioBuffer,
) -> DomRoot<OfflineAudioCompletionEvent> {
let event = Box::new(OfflineAudioCompletionEvent::new_inherited(rendered_buffer));
let ev = reflect_dom_object(event, window, OfflineAudioCompletionEventBinding::Wrap);
{
let event = ev.upcast::<Event>();
event.init_event(type_, bool::from(bubbles), bool::from(cancelable));
}
ev
}
pub fn Constructor(
window: &Window,
type_: DOMString,
init: &OfflineAudioCompletionEventInit,
) -> Fallible<DomRoot<OfflineAudioCompletionEvent>> {
let bubbles = EventBubbles::from(init.parent.bubbles);
let cancelable = EventCancelable::from(init.parent.cancelable);
Ok(OfflineAudioCompletionEvent::new(
window,
Atom::from(type_),
bubbles,
cancelable,
init.renderedBuffer.r(),
))
}
}
impl OfflineAudioCompletionEventMethods for OfflineAudioCompletionEvent {
// https://webaudio.github.io/web-audio-api/#dom-offlineaudiocompletionevent-renderedbuffer
fn RenderedBuffer(&self) -> DomRoot<AudioBuffer> {
DomRoot::from_ref(&*self.rendered_buffer)
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
}

View file

@ -0,0 +1,174 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::audiobuffer::{AudioBuffer, MAX_SAMPLE_RATE, MIN_SAMPLE_RATE};
use dom::audionode::MAX_CHANNEL_COUNT;
use dom::baseaudiocontext::{BaseAudioContext, BaseAudioContextOptions};
use dom::bindings::cell::DomRefCell;
use dom::bindings::codegen::Bindings::BaseAudioContextBinding::BaseAudioContextBinding::BaseAudioContextMethods;
use dom::bindings::codegen::Bindings::OfflineAudioContextBinding;
use dom::bindings::codegen::Bindings::OfflineAudioContextBinding::OfflineAudioContextMethods;
use dom::bindings::codegen::Bindings::OfflineAudioContextBinding::OfflineAudioContextOptions;
use dom::bindings::error::{Error, Fallible};
use dom::bindings::inheritance::Castable;
use dom::bindings::num::Finite;
use dom::bindings::refcounted::Trusted;
use dom::bindings::reflector::{DomObject, reflect_dom_object};
use dom::bindings::root::DomRoot;
use dom::event::{Event, EventBubbles, EventCancelable};
use dom::offlineaudiocompletionevent::OfflineAudioCompletionEvent;
use dom::promise::Promise;
use dom::window::Window;
use dom_struct::dom_struct;
use servo_media::audio::context::OfflineAudioContextOptions as ServoMediaOfflineAudioContextOptions;
use std::cell::Cell;
use std::rc::Rc;
use std::sync::{Arc, Mutex};
use std::sync::mpsc;
use std::thread::Builder;
use task_source::{TaskSource, TaskSourceName};
#[dom_struct]
pub struct OfflineAudioContext {
context: BaseAudioContext,
channel_count: u32,
length: u32,
rendering_started: Cell<bool>,
#[ignore_malloc_size_of = "promises are hard"]
pending_rendering_promise: DomRefCell<Option<Rc<Promise>>>,
}
impl OfflineAudioContext {
#[allow(unrooted_must_root)]
fn new_inherited(channel_count: u32,
length: u32,
sample_rate: f32) -> OfflineAudioContext {
let options = ServoMediaOfflineAudioContextOptions {
channels: channel_count as u8,
length: length as usize,
sample_rate,
};
let context = BaseAudioContext::new_inherited(
BaseAudioContextOptions::OfflineAudioContext(options),
);
OfflineAudioContext {
context,
channel_count,
length,
rendering_started: Cell::new(false),
pending_rendering_promise: Default::default(),
}
}
#[allow(unrooted_must_root)]
fn new(window: &Window,
channel_count: u32,
length: u32,
sample_rate: f32) -> DomRoot<OfflineAudioContext> {
let context = OfflineAudioContext::new_inherited(channel_count, length, sample_rate);
reflect_dom_object(Box::new(context), window, OfflineAudioContextBinding::Wrap)
}
pub fn Constructor(
window: &Window,
options: &OfflineAudioContextOptions,
) -> Fallible<DomRoot<OfflineAudioContext>> {
Ok(OfflineAudioContext::new(window, options.numberOfChannels, options.length, *options.sampleRate))
}
pub fn Constructor_(
window: &Window,
number_of_channels: u32,
length: u32,
sample_rate: Finite<f32>,
) -> Fallible<DomRoot<OfflineAudioContext>> {
if number_of_channels > MAX_CHANNEL_COUNT ||
number_of_channels <= 0 ||
length <= 0 ||
*sample_rate < MIN_SAMPLE_RATE ||
*sample_rate > MAX_SAMPLE_RATE
{
return Err(Error::NotSupported);
}
Ok(OfflineAudioContext::new(window, number_of_channels, length, *sample_rate))
}
}
impl OfflineAudioContextMethods for OfflineAudioContext {
// https://webaudio.github.io/web-audio-api/#dom-offlineaudiocontext-oncomplete
event_handler!(complete, GetOncomplete, SetOncomplete);
// https://webaudio.github.io/web-audio-api/#dom-offlineaudiocontext-length
fn Length(&self) -> u32 {
self.length
}
// https://webaudio.github.io/web-audio-api/#dom-offlineaudiocontext-startrendering
#[allow(unrooted_must_root)]
fn StartRendering(&self) -> Rc<Promise> {
let promise = Promise::new(&self.global());
if self.rendering_started.get() {
promise.reject_error(Error::InvalidState);
return promise;
}
self.rendering_started.set(true);
*self.pending_rendering_promise.borrow_mut() = Some(promise.clone());
let processed_audio = Arc::new(Mutex::new(Vec::new()));
let processed_audio_ = processed_audio.clone();
let (sender, receiver) = mpsc::channel();
let sender = Mutex::new(sender);
self.context
.audio_context_impl()
.set_eos_callback(Box::new(move |buffer| {
processed_audio_
.lock()
.unwrap()
.extend_from_slice((*buffer).as_ref());
let _ = sender.lock().unwrap().send(());
}));
let this = Trusted::new(self);
let global = self.global();
let window = global.as_window();
let task_source = window.dom_manipulation_task_source();
let canceller = window.task_canceller(TaskSourceName::DOMManipulation);
Builder::new()
.name("OfflineAudioContextResolver".to_owned())
.spawn(move || {
let _ = receiver.recv();
let _ = task_source.queue_with_canceller(
task!(resolve: move || {
let this = this.root();
let processed_audio = processed_audio.lock().unwrap();
let buffer = AudioBuffer::new(
&this.global().as_window(),
this.channel_count,
this.length,
*this.context.SampleRate(),
Some(processed_audio.as_slice()));
(*this.pending_rendering_promise.borrow_mut()).take().unwrap().resolve_native(&buffer);
let global = &this.global();
let window = global.as_window();
let event = OfflineAudioCompletionEvent::new(&window,
atom!("complete"),
EventBubbles::DoesNotBubble,
EventCancelable::NotCancelable,
&buffer);
event.upcast::<Event>().fire(this.upcast());
}),
&canceller,
);
})
.unwrap();
if self.context.audio_context_impl().resume().is_err() {
promise.reject_error(Error::Type("Could not start offline rendering".to_owned()));
}
promise
}
}

View file

@ -0,0 +1,132 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::audioparam::AudioParam;
use dom::audioscheduledsourcenode::AudioScheduledSourceNode;
use dom::baseaudiocontext::BaseAudioContext;
use dom::bindings::codegen::Bindings::AudioNodeBinding::{ChannelCountMode, ChannelInterpretation};
use dom::bindings::codegen::Bindings::AudioNodeBinding::AudioNodeOptions;
use dom::bindings::codegen::Bindings::AudioParamBinding::AutomationRate;
use dom::bindings::codegen::Bindings::OscillatorNodeBinding::{self, OscillatorOptions, OscillatorType};
use dom::bindings::codegen::Bindings::OscillatorNodeBinding::OscillatorNodeMethods;
use dom::bindings::error::Fallible;
use dom::bindings::reflector::reflect_dom_object;
use dom::bindings::root::{Dom, DomRoot};
use dom::window::Window;
use dom_struct::dom_struct;
use servo_media::audio::node::AudioNodeInit;
use servo_media::audio::oscillator_node::OscillatorNodeOptions as ServoMediaOscillatorOptions;
use servo_media::audio::oscillator_node::OscillatorType as ServoMediaOscillatorType;
use servo_media::audio::param::ParamType;
use std::f32;
#[dom_struct]
pub struct OscillatorNode {
source_node: AudioScheduledSourceNode,
oscillator_type: OscillatorType,
frequency: Dom<AudioParam>,
detune: Dom<AudioParam>,
}
impl OscillatorNode {
#[allow(unrooted_must_root)]
pub fn new_inherited(
window: &Window,
context: &BaseAudioContext,
oscillator_options: &OscillatorOptions,
) -> OscillatorNode {
let mut node_options = AudioNodeOptions::empty();
node_options.channelCount = Some(2);
node_options.channelCountMode = Some(ChannelCountMode::Max);
node_options.channelInterpretation = Some(ChannelInterpretation::Speakers);
let source_node = AudioScheduledSourceNode::new_inherited(
AudioNodeInit::OscillatorNode(oscillator_options.into()),
context,
&node_options,
0, /* inputs */
1, /* outputs */
);
let node_id = source_node.node().node_id();
let frequency = AudioParam::new(
window,
context,
node_id,
ParamType::Frequency,
AutomationRate::A_rate,
440.,
f32::MIN,
f32::MAX,
);
let detune = AudioParam::new(
window,
context,
node_id,
ParamType::Detune,
AutomationRate::A_rate,
0.,
-440. / 2.,
440. / 2.,
);
OscillatorNode {
source_node,
oscillator_type: oscillator_options.type_,
frequency: Dom::from_ref(&frequency),
detune: Dom::from_ref(&detune),
}
}
#[allow(unrooted_must_root)]
pub fn new(
window: &Window,
context: &BaseAudioContext,
options: &OscillatorOptions,
) -> DomRoot<OscillatorNode> {
let node = OscillatorNode::new_inherited(window, context, options);
reflect_dom_object(Box::new(node), window, OscillatorNodeBinding::Wrap)
}
pub fn Constructor(
window: &Window,
context: &BaseAudioContext,
options: &OscillatorOptions,
) -> Fallible<DomRoot<OscillatorNode>> {
Ok(OscillatorNode::new(window, context, options))
}
}
impl OscillatorNodeMethods for OscillatorNode {
// https://webaudio.github.io/web-audio-api/#dom-oscillatornode-frequency
fn Frequency(&self) -> DomRoot<AudioParam> {
DomRoot::from_ref(&self.frequency)
}
// https://webaudio.github.io/web-audio-api/#dom-oscillatornode-detune
fn Detune(&self) -> DomRoot<AudioParam> {
DomRoot::from_ref(&self.detune)
}
}
impl<'a> From<&'a OscillatorOptions> for ServoMediaOscillatorOptions {
fn from(options: &'a OscillatorOptions) -> Self {
Self {
oscillator_type: options.type_.into(),
freq: *options.frequency,
detune: *options.detune,
periodic_wave_options: None, // XXX
}
}
}
impl From<OscillatorType> for ServoMediaOscillatorType {
fn from(oscillator_type: OscillatorType) -> Self {
match oscillator_type {
OscillatorType::Sine => ServoMediaOscillatorType::Sine,
OscillatorType::Square => ServoMediaOscillatorType::Square,
OscillatorType::Sawtooth => ServoMediaOscillatorType::Sawtooth,
OscillatorType::Triangle => ServoMediaOscillatorType::Triangle,
OscillatorType::Custom => ServoMediaOscillatorType::Custom,
}
}
}

View file

@ -0,0 +1,29 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
* The origin of this IDL file is
* https://webaudio.github.io/web-audio-api/#audiobuffer
*/
dictionary AudioBufferOptions {
unsigned long numberOfChannels = 1;
required unsigned long length;
required float sampleRate;
};
[Exposed=Window,
Constructor (AudioBufferOptions options)]
interface AudioBuffer {
readonly attribute float sampleRate;
readonly attribute unsigned long length;
readonly attribute double duration;
readonly attribute unsigned long numberOfChannels;
[Throws] Float32Array getChannelData(unsigned long channel);
[Throws] void copyFromChannel(Float32Array destination,
unsigned long channelNumber,
optional unsigned long startInChannel = 0);
[Throws] void copyToChannel(Float32Array source,
unsigned long channelNumber,
optional unsigned long startInChannel = 0);
};

View file

@ -0,0 +1,30 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
* The origin of this IDL file is
* https://webaudio.github.io/web-audio-api/#AudioBufferSourceNode
*/
dictionary AudioBufferSourceOptions {
// AudioBuffer? buffer;
float detune = 0;
boolean loop = false;
double loopEnd = 0;
double loopStart = 0;
float playbackRate = 1;
};
[Exposed=Window,
Constructor (BaseAudioContext context, optional AudioBufferSourceOptions options)]
interface AudioBufferSourceNode : AudioScheduledSourceNode {
[Throws] attribute AudioBuffer? buffer;
readonly attribute AudioParam playbackRate;
readonly attribute AudioParam detune;
attribute boolean loop;
attribute double loopStart;
attribute double loopEnd;
[Throws] void start(optional double when = 0,
optional double offset,
optional double duration);
};

View file

@ -0,0 +1,40 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
* The origin of this IDL file is
* https://webaudio.github.io/web-audio-api/#dom-audiocontext
*/
enum AudioContextLatencyCategory {
"balanced",
"interactive",
"playback"
};
dictionary AudioContextOptions {
AudioContextLatencyCategory latencyHint = "interactive";
float sampleRate;
};
dictionary AudioTimestamp {
double contextTime;
DOMHighResTimeStamp performanceTime;
};
[Exposed=Window,
Constructor(optional AudioContextOptions contextOptions)]
interface AudioContext : BaseAudioContext {
readonly attribute double baseLatency;
readonly attribute double outputLatency;
AudioTimestamp getOutputTimestamp();
Promise<void> suspend();
Promise<void> close();
// MediaElementAudioSourceNode createMediaElementSource(HTMLMediaElement mediaElement);
// MediaStreamAudioSourceNode createMediaStreamSource(MediaStream mediaStream);
// MediaStreamTrackAudioSourceNode createMediaStreamTrackSource(MediaStreamTrack mediaStreamTrack);
// MediaStreamAudioDestinationNode createMediaStreamDestination();
};

View file

@ -0,0 +1,12 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
* The origin of this IDL file is
* https://webaudio.github.io/web-audio-api/#dom-audiodestinationnode
*/
[Exposed=Window]
interface AudioDestinationNode : AudioNode {
readonly attribute unsigned long maxChannelCount;
};

View file

@ -0,0 +1,61 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
* The origin of this IDL file is
* https://webaudio.github.io/web-audio-api/#dom-audionode
*/
enum ChannelCountMode {
"max",
"clamped-max",
"explicit"
};
enum ChannelInterpretation {
"speakers",
"discrete"
};
dictionary AudioNodeOptions {
unsigned long channelCount;
ChannelCountMode channelCountMode;
ChannelInterpretation channelInterpretation;
};
[Exposed=Window]
interface AudioNode : EventTarget {
[Throws]
AudioNode connect(AudioNode destinationNode,
optional unsigned long output = 0,
optional unsigned long input = 0);
[Throws]
void connect(AudioParam destinationParam,
optional unsigned long output = 0);
[Throws]
void disconnect();
[Throws]
void disconnect(unsigned long output);
[Throws]
void disconnect(AudioNode destination);
[Throws]
void disconnect(AudioNode destination, unsigned long output);
[Throws]
void disconnect(AudioNode destination,
unsigned long output,
unsigned long input);
[Throws]
void disconnect(AudioParam destination);
[Throws]
void disconnect(AudioParam destination, unsigned long output);
readonly attribute BaseAudioContext context;
readonly attribute unsigned long numberOfInputs;
readonly attribute unsigned long numberOfOutputs;
[SetterThrows]
attribute unsigned long channelCount;
[SetterThrows]
attribute ChannelCountMode channelCountMode;
attribute ChannelInterpretation channelInterpretation;
};

View file

@ -0,0 +1,32 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
* The origin of this IDL file is
* https://webaudio.github.io/web-audio-api/#dom-audioparam
*/
enum AutomationRate {
"a-rate",
"k-rate"
};
[Exposed=Window]
interface AudioParam {
attribute float value;
attribute AutomationRate automationRate;
readonly attribute float defaultValue;
readonly attribute float minValue;
readonly attribute float maxValue;
AudioParam setValueAtTime(float value, double startTime);
AudioParam linearRampToValueAtTime(float value, double endTime);
AudioParam exponentialRampToValueAtTime(float value, double endTime);
AudioParam setTargetAtTime(float target,
double startTime,
float timeConstant);
// AudioParam setValueCurveAtTime(sequence<float> values,
// double startTime,
// double duration);
AudioParam cancelScheduledValues(double cancelTime);
AudioParam cancelAndHoldAtTime(double cancelTime);
};

View file

@ -0,0 +1,14 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
* The origin of this IDL file is
* https://webaudio.github.io/web-audio-api/#AudioScheduledSourceNode
*/
[Exposed=Window]
interface AudioScheduledSourceNode : AudioNode {
attribute EventHandler onended;
[Throws] void start(optional double when = 0);
[Throws] void stop(optional double when = 0);
};

View file

@ -0,0 +1,55 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
* The origin of this IDL file is
* https://webaudio.github.io/web-audio-api/#BaseAudioContext
*/
enum AudioContextState {
"suspended",
"running",
"closed"
};
callback DecodeErrorCallback = void (DOMException error);
callback DecodeSuccessCallback = void (AudioBuffer decodedData);
[Exposed=Window]
interface BaseAudioContext : EventTarget {
readonly attribute AudioDestinationNode destination;
readonly attribute float sampleRate;
readonly attribute double currentTime;
// readonly attribute AudioListener listener;
readonly attribute AudioContextState state;
Promise<void> resume();
attribute EventHandler onstatechange;
[Throws] AudioBuffer createBuffer(unsigned long numberOfChannels,
unsigned long length,
float sampleRate);
Promise<AudioBuffer> decodeAudioData(ArrayBuffer audioData,
optional DecodeSuccessCallback successCallback,
optional DecodeErrorCallback errorCallback);
AudioBufferSourceNode createBufferSource();
// ConstantSourceNode createConstantSource();
// ScriptProcessorNode createScriptProcessor(optional unsigned long bufferSize = 0,
// optional unsigned long numberOfInputChannels = 2,
// optional unsigned long numberOfOutputChannels = 2);
// AnalyserNode createAnalyser();
GainNode createGain();
// DelayNode createDelay(optional double maxDelayTime = 1);
// BiquadFilterNode createBiquadFilter();
// IIRFilterNode createIIRFilter(sequence<double> feedforward,
// sequence<double> feedback);
// WaveShaperNode createWaveShaper();
// PannerNode createPanner();
// StereoPannerNode createStereoPanner();
// ConvolverNode createConvolver();
// ChannelSplitterNode createChannelSplitter(optional unsigned long numberOfOutputs = 6);
// ChannelMergerNode createChannelMerger(optional unsigned long numberOfInputs = 6);
// DynamicsCompressorNode createDynamicsCompressor();
OscillatorNode createOscillator();
// PeriodicWave createPeriodicWave(sequence<float> real,
// sequence<float> imag,
// optional PeriodicWaveConstraints constraints);
};

View file

@ -0,0 +1,17 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
* The origin of this IDL file is
* https://webaudio.github.io/web-audio-api/#gainnode
*/
dictionary GainOptions : AudioNodeOptions {
float gain = 1.0;
};
[Exposed=Window,
Constructor (BaseAudioContext context, optional GainOptions options)]
interface GainNode : AudioNode {
readonly attribute AudioParam gain;
};

View file

@ -0,0 +1,17 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
* For more information on this interface please see
* https://webaudio.github.io/web-audio-api/#offlineaudiocompletionevent
*/
dictionary OfflineAudioCompletionEventInit : EventInit {
required AudioBuffer renderedBuffer;
};
[Exposed=Window,
Constructor(DOMString type, OfflineAudioCompletionEventInit eventInitDict)]
interface OfflineAudioCompletionEvent : Event {
readonly attribute AudioBuffer renderedBuffer;
};

View file

@ -0,0 +1,24 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
* The origin of this IDL file is
* https://webaudio.github.io/web-audio-api/#OfflineAudioContext
*/
dictionary OfflineAudioContextOptions {
unsigned long numberOfChannels = 1;
required unsigned long length;
required float sampleRate;
};
[Exposed=Window,
Constructor (OfflineAudioContextOptions contextOptions),
Constructor (unsigned long numberOfChannels, unsigned long length, float sampleRate)]
interface OfflineAudioContext : BaseAudioContext {
readonly attribute unsigned long length;
attribute EventHandler oncomplete;
Promise<AudioBuffer> startRendering();
// Promise<void> suspend(double suspendTime);
};

View file

@ -0,0 +1,34 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
* The origin of this IDL file is
* https://webaudio.github.io/web-audio-api/#oscillatornode
*/
enum OscillatorType {
"sine",
"square",
"sawtooth",
"triangle",
"custom"
};
dictionary OscillatorOptions : AudioNodeOptions {
OscillatorType type = "sine";
float frequency = 440;
float detune = 0;
// PeriodicWave periodicWave;
};
[Exposed=Window,
Constructor (BaseAudioContext context, optional OscillatorOptions options)]
interface OscillatorNode : AudioScheduledSourceNode {
// [SetterThrows]
// attribute OscillatorType type;
readonly attribute AudioParam frequency;
readonly attribute AudioParam detune;
// void setPeriodicWave (PeriodicWave periodicWave);
};