Update web-platform-tests to revision 4a5223502fa660ce03e470af6a61c8bc26c5a8ee

This commit is contained in:
WPT Sync Bot 2018-04-23 21:13:37 -04:00
parent c5f7c9ccf3
commit e891345f26
1328 changed files with 36632 additions and 20588 deletions

View file

@ -10,13 +10,16 @@
'use strict';
promise_test(async t => {
const [html, dom, mediacapture, webaudio] = await Promise.all([
const [html, dom, uievents, mediacapture, webaudio] = await Promise.all([
// Needed for EventTarget, HTMLMediaElement
'/interfaces/html.idl',
// Needed for Event, EventListener
'/interfaces/dom.idl',
// Needed for MouseEvent
'/interfaces/uievents.idl',
// Needed for MediaStream, MediaStreamTrack
'/interfaces/mediacapture-main.idl',
@ -28,7 +31,15 @@ promise_test(async t => {
// Dependencies of HTML
idl_array.add_untested_idls('interface LinkStyle {};');
idl_array.add_untested_idls('interface SVGElement {};');
idl_array.add_untested_idls('interface WorkletGlobalScope {};');
idl_array.add_untested_idls(html);
idl_array.add_untested_idls(uievents, { only: [
'MouseEvent',
'MouseEventInit',
'EventModifierInit',
'UIEvent',
'UIEventInit',
]});
idl_array.add_untested_idls(dom);
idl_array.add_untested_idls(mediacapture);

View file

@ -0,0 +1,171 @@
// Globals, to make testing and debugging easier.
let context;
let filter;
let signal;
let renderedBuffer;
let renderedData;
let sampleRate = 44100.0;
let pulseLengthFrames = .1 * sampleRate;
// Maximum allowed error for the test to succeed. Experimentally determined.
let maxAllowedError = 5.9e-8;
// This must be large enough so that the filtered result is
// essentially zero. See comments for createTestAndRun.
let timeStep = .1;
// Maximum number of filters we can process (mostly for setting the
// render length correctly.)
let maxFilters = 5;
// How long to render. Must be long enough for all of the filters we
// want to test.
let renderLengthSeconds = timeStep * (maxFilters + 1);
let renderLengthSamples = Math.round(renderLengthSeconds * sampleRate);
// Number of filters that will be processed.
let nFilters;
function createImpulseBuffer(context, length) {
let impulse = context.createBuffer(1, length, context.sampleRate);
let data = impulse.getChannelData(0);
for (let k = 1; k < data.length; ++k) {
data[k] = 0;
}
data[0] = 1;
return impulse;
}
function createTestAndRun(context, filterType, testParameters) {
// To test the filters, we apply a signal (an impulse) to each of
// the specified filters, with each signal starting at a different
// time. The output of the filters is summed together at the
// output. Thus for filter k, the signal input to the filter
// starts at time k * timeStep. For this to work well, timeStep
// must be large enough for the output of each filter to have
// decayed to zero with timeStep seconds. That way the filter
// outputs don't interfere with each other.
let filterParameters = testParameters.filterParameters;
nFilters = Math.min(filterParameters.length, maxFilters);
signal = new Array(nFilters);
filter = new Array(nFilters);
impulse = createImpulseBuffer(context, pulseLengthFrames);
// Create all of the signal sources and filters that we need.
for (let k = 0; k < nFilters; ++k) {
signal[k] = context.createBufferSource();
signal[k].buffer = impulse;
filter[k] = context.createBiquadFilter();
filter[k].type = filterType;
filter[k].frequency.value =
context.sampleRate / 2 * filterParameters[k].cutoff;
filter[k].detune.value = (filterParameters[k].detune === undefined) ?
0 :
filterParameters[k].detune;
filter[k].Q.value = filterParameters[k].q;
filter[k].gain.value = filterParameters[k].gain;
signal[k].connect(filter[k]);
filter[k].connect(context.destination);
signal[k].start(timeStep * k);
}
return context.startRendering().then(buffer => {
checkFilterResponse(buffer, filterType, testParameters);
});
}
function addSignal(dest, src, destOffset) {
// Add src to dest at the given dest offset.
for (let k = destOffset, j = 0; k < dest.length, j < src.length; ++k, ++j) {
dest[k] += src[j];
}
}
function generateReference(filterType, filterParameters) {
let result = new Array(renderLengthSamples);
let data = new Array(renderLengthSamples);
// Initialize the result array and data.
for (let k = 0; k < result.length; ++k) {
result[k] = 0;
data[k] = 0;
}
// Make data an impulse.
data[0] = 1;
for (let k = 0; k < nFilters; ++k) {
// Filter an impulse
let detune = (filterParameters[k].detune === undefined) ?
0 :
filterParameters[k].detune;
let frequency = filterParameters[k].cutoff *
Math.pow(2, detune / 1200); // Apply detune, converting from Cents.
let filterCoef = createFilter(
filterType, frequency, filterParameters[k].q, filterParameters[k].gain);
let y = filterData(filterCoef, data, renderLengthSamples);
// Accumulate this filtered data into the final output at the desired
// offset.
addSignal(result, y, timeToSampleFrame(timeStep * k, sampleRate));
}
return result;
}
function checkFilterResponse(renderedBuffer, filterType, testParameters) {
let filterParameters = testParameters.filterParameters;
let maxAllowedError = testParameters.threshold;
let should = testParameters.should;
renderedData = renderedBuffer.getChannelData(0);
reference = generateReference(filterType, filterParameters);
let len = Math.min(renderedData.length, reference.length);
let success = true;
// Maximum error between rendered data and expected data
let maxError = 0;
// Sample offset where the maximum error occurred.
let maxPosition = 0;
// Number of infinities or NaNs that occurred in the rendered data.
let invalidNumberCount = 0;
should(nFilters, 'Number of filters tested')
.beEqualTo(filterParameters.length);
// Compare the rendered signal with our reference, keeping
// track of the maximum difference (and the offset of the max
// difference.) Check for bad numbers in the rendered output
// too. There shouldn't be any.
for (let k = 0; k < len; ++k) {
let err = Math.abs(renderedData[k] - reference[k]);
if (err > maxError) {
maxError = err;
maxPosition = k;
}
if (!isValidNumber(renderedData[k])) {
++invalidNumberCount;
}
}
should(
invalidNumberCount, 'Number of non-finite values in the rendered output')
.beEqualTo(0);
should(maxError, 'Max error in ' + filterTypeName[filterType] + ' response')
.beLessThanOrEqualTo(maxAllowedError);
}

View file

@ -0,0 +1,350 @@
// Utilities for mixing rule testing.
// http://webaudio.github.io/web-audio-api/#channel-up-mixing-and-down-mixing
/**
* Create an n-channel buffer, with all sample data zero except for a shifted
* impulse. The impulse position depends on the channel index. For example, for
* a 4-channel buffer:
* channel 0: 1 0 0 0 0 0 0 0
* channel 1: 0 1 0 0 0 0 0 0
* channel 2: 0 0 1 0 0 0 0 0
* channel 3: 0 0 0 1 0 0 0 0
* @param {AudioContext} context Associated AudioContext.
* @param {Number} numberOfChannels Number of channels of test buffer.
* @param {Number} frameLength Buffer length in frames.
* @return {AudioBuffer}
*/
function createShiftedImpulseBuffer(context, numberOfChannels, frameLength) {
let shiftedImpulseBuffer =
context.createBuffer(numberOfChannels, frameLength, context.sampleRate);
for (let channel = 0; channel < numberOfChannels; ++channel) {
let data = shiftedImpulseBuffer.getChannelData(channel);
data[channel] = 1;
}
return shiftedImpulseBuffer;
}
/**
* Create a string that displays the content of AudioBuffer.
* @param {AudioBuffer} audioBuffer AudioBuffer object to stringify.
* @param {Number} frameLength Number of frames to be printed.
* @param {Number} frameOffset Starting frame position for printing.
* @return {String}
*/
function stringifyBuffer(audioBuffer, frameLength, frameOffset) {
frameOffset = (frameOffset || 0);
let stringifiedBuffer = '';
for (let channel = 0; channel < audioBuffer.numberOfChannels; ++channel) {
let channelData = audioBuffer.getChannelData(channel);
for (let i = 0; i < frameLength; ++i)
stringifiedBuffer += channelData[i + frameOffset] + ' ';
stringifiedBuffer += '\n';
}
return stringifiedBuffer;
}
/**
* Compute number of channels from the connection.
* http://webaudio.github.io/web-audio-api/#dfn-computednumberofchannels
* @param {String} connections A string specifies the connection. For
* example, the string "128" means 3
* connections, having 1, 2, and 8 channels
* respectively.
* @param {Number} channelCount Channel count.
* @param {String} channelCountMode Channel count mode.
* @return {Number} Computed number of channels.
*/
function computeNumberOfChannels(connections, channelCount, channelCountMode) {
if (channelCountMode == 'explicit')
return channelCount;
// Must have at least one channel.
let computedNumberOfChannels = 1;
// Compute "computedNumberOfChannels" based on all the connections.
for (let i = 0; i < connections.length; ++i) {
let connectionNumberOfChannels = parseInt(connections[i]);
computedNumberOfChannels =
Math.max(computedNumberOfChannels, connectionNumberOfChannels);
}
if (channelCountMode == 'clamped-max')
computedNumberOfChannels = Math.min(computedNumberOfChannels, channelCount);
return computedNumberOfChannels;
}
/**
* Apply up/down-mixing (in-place summing) based on 'speaker' interpretation.
* @param {AudioBuffer} input Input audio buffer.
* @param {AudioBuffer} output Output audio buffer.
*/
function speakersSum(input, output) {
if (input.length != output.length) {
throw '[mixing-rules.js] speakerSum(): buffer lengths mismatch (input: ' +
input.length + ', output: ' + output.length + ')';
}
if (input.numberOfChannels === output.numberOfChannels) {
for (let channel = 0; channel < output.numberOfChannels; ++channel) {
let inputChannel = input.getChannelData(channel);
let outputChannel = output.getChannelData(channel);
for (let i = 0; i < outputChannel.length; i++)
outputChannel[i] += inputChannel[i];
}
} else if (input.numberOfChannels < output.numberOfChannels) {
processUpMix(input, output);
} else {
processDownMix(input, output);
}
}
/**
* In-place summing to |output| based on 'discrete' channel interpretation.
* @param {AudioBuffer} input Input audio buffer.
* @param {AudioBuffer} output Output audio buffer.
*/
function discreteSum(input, output) {
if (input.length != output.length) {
throw '[mixing-rules.js] speakerSum(): buffer lengths mismatch (input: ' +
input.length + ', output: ' + output.length + ')';
}
let numberOfChannels =
Math.min(input.numberOfChannels, output.numberOfChannels)
for (let channel = 0; channel < numberOfChannels; ++channel) {
let inputChannel = input.getChannelData(channel);
let outputChannel = output.getChannelData(channel);
for (let i = 0; i < outputChannel.length; i++)
outputChannel[i] += inputChannel[i];
}
}
/**
* Perform up-mix by in-place summing to |output| buffer.
* @param {AudioBuffer} input Input audio buffer.
* @param {AudioBuffer} output Output audio buffer.
*/
function processUpMix(input, output) {
let numberOfInputChannels = input.numberOfChannels;
let numberOfOutputChannels = output.numberOfChannels;
let i, length = output.length;
// Up-mixing: 1 -> 2, 1 -> 4
// output.L += input
// output.R += input
// output.SL += 0 (in the case of 1 -> 4)
// output.SR += 0 (in the case of 1 -> 4)
if ((numberOfInputChannels === 1 && numberOfOutputChannels === 2) ||
(numberOfInputChannels === 1 && numberOfOutputChannels === 4)) {
let inputChannel = input.getChannelData(0);
let outputChannel0 = output.getChannelData(0);
let outputChannel1 = output.getChannelData(1);
for (i = 0; i < length; i++) {
outputChannel0[i] += inputChannel[i];
outputChannel1[i] += inputChannel[i];
}
return;
}
// Up-mixing: 1 -> 5.1
// output.L += 0
// output.R += 0
// output.C += input
// output.LFE += 0
// output.SL += 0
// output.SR += 0
if (numberOfInputChannels == 1 && numberOfOutputChannels == 6) {
let inputChannel = input.getChannelData(0);
let outputChannel2 = output.getChannelData(2);
for (i = 0; i < length; i++)
outputChannel2[i] += inputChannel[i];
return;
}
// Up-mixing: 2 -> 4, 2 -> 5.1
// output.L += input.L
// output.R += input.R
// output.C += 0 (in the case of 2 -> 5.1)
// output.LFE += 0 (in the case of 2 -> 5.1)
// output.SL += 0
// output.SR += 0
if ((numberOfInputChannels === 2 && numberOfOutputChannels === 4) ||
(numberOfInputChannels === 2 && numberOfOutputChannels === 6)) {
let inputChannel0 = input.getChannelData(0);
let inputChannel1 = input.getChannelData(1);
let outputChannel0 = output.getChannelData(0);
let outputChannel1 = output.getChannelData(1);
for (i = 0; i < length; i++) {
outputChannel0[i] += inputChannel0[i];
outputChannel1[i] += inputChannel1[i];
}
return;
}
// Up-mixing: 4 -> 5.1
// output.L += input.L
// output.R += input.R
// output.C += 0
// output.LFE += 0
// output.SL += input.SL
// output.SR += input.SR
if (numberOfInputChannels === 4 && numberOfOutputChannels === 6) {
let inputChannel0 = input.getChannelData(0); // input.L
let inputChannel1 = input.getChannelData(1); // input.R
let inputChannel2 = input.getChannelData(2); // input.SL
let inputChannel3 = input.getChannelData(3); // input.SR
let outputChannel0 = output.getChannelData(0); // output.L
let outputChannel1 = output.getChannelData(1); // output.R
let outputChannel4 = output.getChannelData(4); // output.SL
let outputChannel5 = output.getChannelData(5); // output.SR
for (i = 0; i < length; i++) {
outputChannel0[i] += inputChannel0[i];
outputChannel1[i] += inputChannel1[i];
outputChannel4[i] += inputChannel2[i];
outputChannel5[i] += inputChannel3[i];
}
return;
}
// All other cases, fall back to the discrete sum.
discreteSum(input, output);
}
/**
* Perform down-mix by in-place summing to |output| buffer.
* @param {AudioBuffer} input Input audio buffer.
* @param {AudioBuffer} output Output audio buffer.
*/
function processDownMix(input, output) {
let numberOfInputChannels = input.numberOfChannels;
let numberOfOutputChannels = output.numberOfChannels;
let i, length = output.length;
// Down-mixing: 2 -> 1
// output += 0.5 * (input.L + input.R)
if (numberOfInputChannels === 2 && numberOfOutputChannels === 1) {
let inputChannel0 = input.getChannelData(0); // input.L
let inputChannel1 = input.getChannelData(1); // input.R
let outputChannel0 = output.getChannelData(0);
for (i = 0; i < length; i++)
outputChannel0[i] += 0.5 * (inputChannel0[i] + inputChannel1[i]);
return;
}
// Down-mixing: 4 -> 1
// output += 0.25 * (input.L + input.R + input.SL + input.SR)
if (numberOfInputChannels === 4 && numberOfOutputChannels === 1) {
let inputChannel0 = input.getChannelData(0); // input.L
let inputChannel1 = input.getChannelData(1); // input.R
let inputChannel2 = input.getChannelData(2); // input.SL
let inputChannel3 = input.getChannelData(3); // input.SR
let outputChannel0 = output.getChannelData(0);
for (i = 0; i < length; i++) {
outputChannel0[i] += 0.25 *
(inputChannel0[i] + inputChannel1[i] + inputChannel2[i] +
inputChannel3[i]);
}
return;
}
// Down-mixing: 5.1 -> 1
// output += sqrt(1/2) * (input.L + input.R) + input.C
// + 0.5 * (input.SL + input.SR)
if (numberOfInputChannels === 6 && numberOfOutputChannels === 1) {
let inputChannel0 = input.getChannelData(0); // input.L
let inputChannel1 = input.getChannelData(1); // input.R
let inputChannel2 = input.getChannelData(2); // input.C
let inputChannel4 = input.getChannelData(4); // input.SL
let inputChannel5 = input.getChannelData(5); // input.SR
let outputChannel0 = output.getChannelData(0);
let scaleSqrtHalf = Math.sqrt(0.5);
for (i = 0; i < length; i++) {
outputChannel0[i] +=
scaleSqrtHalf * (inputChannel0[i] + inputChannel1[i]) +
inputChannel2[i] + 0.5 * (inputChannel4[i] + inputChannel5[i]);
}
return;
}
// Down-mixing: 4 -> 2
// output.L += 0.5 * (input.L + input.SL)
// output.R += 0.5 * (input.R + input.SR)
if (numberOfInputChannels == 4 && numberOfOutputChannels == 2) {
let inputChannel0 = input.getChannelData(0); // input.L
let inputChannel1 = input.getChannelData(1); // input.R
let inputChannel2 = input.getChannelData(2); // input.SL
let inputChannel3 = input.getChannelData(3); // input.SR
let outputChannel0 = output.getChannelData(0); // output.L
let outputChannel1 = output.getChannelData(1); // output.R
for (i = 0; i < length; i++) {
outputChannel0[i] += 0.5 * (inputChannel0[i] + inputChannel2[i]);
outputChannel1[i] += 0.5 * (inputChannel1[i] + inputChannel3[i]);
}
return;
}
// Down-mixing: 5.1 -> 2
// output.L += input.L + sqrt(1/2) * (input.C + input.SL)
// output.R += input.R + sqrt(1/2) * (input.C + input.SR)
if (numberOfInputChannels == 6 && numberOfOutputChannels == 2) {
let inputChannel0 = input.getChannelData(0); // input.L
let inputChannel1 = input.getChannelData(1); // input.R
let inputChannel2 = input.getChannelData(2); // input.C
let inputChannel4 = input.getChannelData(4); // input.SL
let inputChannel5 = input.getChannelData(5); // input.SR
let outputChannel0 = output.getChannelData(0); // output.L
let outputChannel1 = output.getChannelData(1); // output.R
let scaleSqrtHalf = Math.sqrt(0.5);
for (i = 0; i < length; i++) {
outputChannel0[i] += inputChannel0[i] +
scaleSqrtHalf * (inputChannel2[i] + inputChannel4[i]);
outputChannel1[i] += inputChannel1[i] +
scaleSqrtHalf * (inputChannel2[i] + inputChannel5[i]);
}
return;
}
// Down-mixing: 5.1 -> 4
// output.L += input.L + sqrt(1/2) * input.C
// output.R += input.R + sqrt(1/2) * input.C
// output.SL += input.SL
// output.SR += input.SR
if (numberOfInputChannels === 6 && numberOfOutputChannels === 4) {
let inputChannel0 = input.getChannelData(0); // input.L
let inputChannel1 = input.getChannelData(1); // input.R
let inputChannel2 = input.getChannelData(2); // input.C
let inputChannel4 = input.getChannelData(4); // input.SL
let inputChannel5 = input.getChannelData(5); // input.SR
let outputChannel0 = output.getChannelData(0); // output.L
let outputChannel1 = output.getChannelData(1); // output.R
let outputChannel2 = output.getChannelData(2); // output.SL
let outputChannel3 = output.getChannelData(3); // output.SR
let scaleSqrtHalf = Math.sqrt(0.5);
for (i = 0; i < length; i++) {
outputChannel0[i] += inputChannel0[i] + scaleSqrtHalf * inputChannel2[i];
outputChannel1[i] += inputChannel1[i] + scaleSqrtHalf * inputChannel2[i];
outputChannel2[i] += inputChannel4[i];
outputChannel3[i] += inputChannel5[i];
}
return;
}
// All other cases, fall back to the discrete sum.
discreteSum(input, output);
}

View file

@ -0,0 +1,277 @@
<!DOCTYPE html>
<html>
<head>
<title>
audionode-channel-rules.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
<script src="/webaudio/resources/mixing-rules.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
let context = 0;
let sampleRate = 44100;
let renderNumberOfChannels = 8;
let singleTestFrameLength = 8;
let testBuffers;
// A list of connections to an AudioNode input, each of which is to be
// used in one or more specific test cases. Each element in the list is a
// string, with the number of connections corresponding to the length of
// the string, and each character in the string is from '1' to '8'
// representing a 1 to 8 channel connection (from an AudioNode output).
// For example, the string "128" means 3 connections, having 1, 2, and 8
// channels respectively.
let connectionsList = [
'1', '2', '3', '4', '5', '6', '7', '8', '11', '12', '14', '18', '111',
'122', '123', '124', '128'
];
// A list of mixing rules, each of which will be tested against all of the
// connections in connectionsList.
let mixingRulesList = [
{
channelCount: 2,
channelCountMode: 'max',
channelInterpretation: 'speakers'
},
{
channelCount: 4,
channelCountMode: 'clamped-max',
channelInterpretation: 'speakers'
},
// Test up-down-mix to some explicit speaker layouts.
{
channelCount: 1,
channelCountMode: 'explicit',
channelInterpretation: 'speakers'
},
{
channelCount: 2,
channelCountMode: 'explicit',
channelInterpretation: 'speakers'
},
{
channelCount: 4,
channelCountMode: 'explicit',
channelInterpretation: 'speakers'
},
{
channelCount: 6,
channelCountMode: 'explicit',
channelInterpretation: 'speakers'
},
{
channelCount: 2,
channelCountMode: 'max',
channelInterpretation: 'discrete'
},
{
channelCount: 4,
channelCountMode: 'clamped-max',
channelInterpretation: 'discrete'
},
{
channelCount: 4,
channelCountMode: 'explicit',
channelInterpretation: 'discrete'
},
{
channelCount: 8,
channelCountMode: 'explicit',
channelInterpretation: 'discrete'
},
];
let numberOfTests = mixingRulesList.length * connectionsList.length;
// Print out the information for an individual test case.
function printTestInformation(
testNumber, actualBuffer, expectedBuffer, frameLength, frameOffset) {
let actual = stringifyBuffer(actualBuffer, frameLength);
let expected =
stringifyBuffer(expectedBuffer, frameLength, frameOffset);
debug('TEST CASE #' + testNumber + '\n');
debug('actual channels:\n' + actual);
debug('expected channels:\n' + expected);
}
function scheduleTest(
testNumber, connections, channelCount, channelCountMode,
channelInterpretation) {
let mixNode = context.createGain();
mixNode.channelCount = channelCount;
mixNode.channelCountMode = channelCountMode;
mixNode.channelInterpretation = channelInterpretation;
mixNode.connect(context.destination);
for (let i = 0; i < connections.length; ++i) {
let connectionNumberOfChannels =
connections.charCodeAt(i) - '0'.charCodeAt(0);
let source = context.createBufferSource();
// Get a buffer with the right number of channels, converting from
// 1-based to 0-based index.
let buffer = testBuffers[connectionNumberOfChannels - 1];
source.buffer = buffer;
source.connect(mixNode);
// Start at the right offset.
let sampleFrameOffset = testNumber * singleTestFrameLength;
let time = sampleFrameOffset / sampleRate;
source.start(time);
}
}
function checkTestResult(
renderedBuffer, testNumber, connections, channelCount,
channelCountMode, channelInterpretation, should) {
let s = 'connections: ' + connections + ', ' + channelCountMode;
// channelCount is ignored in "max" mode.
if (channelCountMode == 'clamped-max' ||
channelCountMode == 'explicit') {
s += '(' + channelCount + ')';
}
s += ', ' + channelInterpretation;
let computedNumberOfChannels = computeNumberOfChannels(
connections, channelCount, channelCountMode);
// Create a zero-initialized silent AudioBuffer with
// computedNumberOfChannels.
let destBuffer = context.createBuffer(
computedNumberOfChannels, singleTestFrameLength,
context.sampleRate);
// Mix all of the connections into the destination buffer.
for (let i = 0; i < connections.length; ++i) {
let connectionNumberOfChannels =
connections.charCodeAt(i) - '0'.charCodeAt(0);
let sourceBuffer =
testBuffers[connectionNumberOfChannels - 1]; // convert from
// 1-based to
// 0-based index
if (channelInterpretation == 'speakers') {
speakersSum(sourceBuffer, destBuffer);
} else if (channelInterpretation == 'discrete') {
discreteSum(sourceBuffer, destBuffer);
} else {
alert('Invalid channel interpretation!');
}
}
// Use this when debugging mixing rules.
// printTestInformation(testNumber, renderedBuffer, destBuffer,
// singleTestFrameLength, sampleFrameOffset);
// Validate that destBuffer matches the rendered output. We need to
// check the rendered output at a specific sample-frame-offset
// corresponding to the specific test case we're checking for based on
// testNumber.
let sampleFrameOffset = testNumber * singleTestFrameLength;
for (let c = 0; c < renderNumberOfChannels; ++c) {
let renderedData = renderedBuffer.getChannelData(c);
for (let frame = 0; frame < singleTestFrameLength; ++frame) {
let renderedValue = renderedData[frame + sampleFrameOffset];
let expectedValue = 0;
if (c < destBuffer.numberOfChannels) {
let expectedData = destBuffer.getChannelData(c);
expectedValue = expectedData[frame];
}
// We may need to add an epsilon in the comparison if we add more
// test vectors.
if (renderedValue != expectedValue) {
let message = s + 'rendered: ' + renderedValue +
' expected: ' + expectedValue + ' channel: ' + c +
' frame: ' + frame;
// testFailed(s);
should(renderedValue, s).beEqualTo(expectedValue);
return;
}
}
}
should(true, s).beTrue();
}
function checkResult(buffer, should) {
// Sanity check result.
should(buffer.length, 'Rendered number of frames')
.beEqualTo(numberOfTests * singleTestFrameLength);
should(buffer.numberOfChannels, 'Rendered number of channels')
.beEqualTo(renderNumberOfChannels);
// Check all the tests.
let testNumber = 0;
for (let m = 0; m < mixingRulesList.length; ++m) {
let mixingRules = mixingRulesList[m];
for (let i = 0; i < connectionsList.length; ++i, ++testNumber) {
checkTestResult(
buffer, testNumber, connectionsList[i],
mixingRules.channelCount, mixingRules.channelCountMode,
mixingRules.channelInterpretation, should);
}
}
}
audit.define(
{label: 'test', description: 'Channel mixing rules for AudioNodes'},
function(task, should) {
// Create 8-channel offline audio context. Each test will render 8
// sample-frames starting at sample-frame position testNumber * 8.
let totalFrameLength = numberOfTests * singleTestFrameLength;
context = new OfflineAudioContext(
renderNumberOfChannels, totalFrameLength, sampleRate);
// Set destination to discrete mixing.
context.destination.channelCount = renderNumberOfChannels;
context.destination.channelCountMode = 'explicit';
context.destination.channelInterpretation = 'discrete';
// Create test buffers from 1 to 8 channels.
testBuffers = new Array();
for (let i = 0; i < renderNumberOfChannels; ++i) {
testBuffers[i] = createShiftedImpulseBuffer(
context, i + 1, singleTestFrameLength);
}
// Schedule all the tests.
let testNumber = 0;
for (let m = 0; m < mixingRulesList.length; ++m) {
let mixingRules = mixingRulesList[m];
for (let i = 0; i < connectionsList.length; ++i, ++testNumber) {
scheduleTest(
testNumber, connectionsList[i], mixingRules.channelCount,
mixingRules.channelCountMode,
mixingRules.channelInterpretation);
}
}
// Render then check results.
// context.oncomplete = checkResult;
context.startRendering().then(buffer => {
checkResult(buffer, should);
task.done();
});
;
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,164 @@
<!DOCTYPE html>
<html>
<head>
<title>
audionode-connect-method-chaining.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
</head>
<body>
<script id="layout-test-code">
// AudioNode dictionary with associated arguments.
let nodeDictionary = [
{name: 'Analyser'}, {name: 'BiquadFilter'}, {name: 'BufferSource'},
{name: 'ChannelMerger', args: [6]},
{name: 'ChannelSplitter', args: [6]}, {name: 'Convolver'},
{name: 'Delay', args: []}, {name: 'DynamicsCompressor'}, {name: 'Gain'},
{name: 'Oscillator'}, {name: 'Panner'},
{name: 'ScriptProcessor', args: [512, 1, 1]}, {name: 'StereoPanner'},
{name: 'WaveShaper'}
];
function verifyReturnedNode(should, config) {
should(
config.destination === config.returned,
'The return value of ' + config.desc + ' matches the destination ' +
config.returned.constructor.name)
.beEqualTo(true);
}
// Test utility for batch method checking: in order to test 3 method
// signatures, so we create 3 dummy destinations.
// 1) .connect(GainNode)
// 2) .connect(BiquadFilterNode, output)
// 3) .connect(ChannelMergerNode, output, input)
function testConnectMethod(context, should, options) {
let source =
context['create' + options.name].apply(context, options.args);
let sourceName = source.constructor.name;
let destination1 = context.createGain();
verifyReturnedNode(should, {
source: source,
destination: destination1,
returned: source.connect(destination1),
desc: sourceName + '.connect(' + destination1.constructor.name + ')'
});
let destination2 = context.createBiquadFilter();
verifyReturnedNode(should, {
source: source,
destination: destination2,
returned: source.connect(destination2, 0),
desc:
sourceName + '.connect(' + destination2.constructor.name + ', 0)'
});
let destination3 = context.createChannelMerger();
verifyReturnedNode(should, {
source: source,
destination: destination3,
returned: source.connect(destination3, 0, 1),
desc: sourceName + '.connect(' + destination3.constructor.name +
', 0, 1)'
});
}
let audit = Audit.createTaskRunner();
// Task: testing entries from the dictionary.
audit.define('from-dictionary', (task, should) => {
let context = new AudioContext();
for (let i = 0; i < nodeDictionary.length; i++)
testConnectMethod(context, should, nodeDictionary[i]);
task.done();
});
// Task: testing Media* nodes.
audit.define('media-group', (task, should) => {
let context = new AudioContext();
// Test MediaElementSourceNode needs an <audio> element.
let mediaElement = document.createElement('audio');
testConnectMethod(
context, should,
{name: 'MediaElementSource', args: [mediaElement]});
testConnectMethod(context, should, {name: 'MediaStreamDestination'});
// MediaStreamSourceNode requires 'stream' object to be constructed,
// which is a part of MediaStreamDestinationNode.
let streamDestination = context.createMediaStreamDestination();
let stream = streamDestination.stream;
testConnectMethod(
context, should, {name: 'MediaStreamSource', args: [stream]});
task.done();
});
// Task: test the exception thrown by invalid operation.
audit.define('invalid-operation', (task, should) => {
let contextA = new AudioContext();
let contextB = new AudioContext();
let gain1 = contextA.createGain();
let gain2 = contextA.createGain();
// Test if the first connection throws correctly. The first gain node
// does not have the second output, so it should throw.
should(function() {
gain1.connect(gain2, 1).connect(contextA.destination);
}, 'Connecting with an invalid output').throw('IndexSizeError');
// Test if the second connection throws correctly. The contextB's
// destination is not compatible with the nodes from contextA, thus the
// first connection succeeds but the second one should throw.
should(
function() {
gain1.connect(gain2).connect(contextB.destination);
},
'Connecting to a node from the different context')
.throw('InvalidAccessError');
task.done();
});
// Task: verify if the method chaining actually works.
audit.define('verification', (task, should) => {
// We pick the lowest sample rate allowed to run the test efficiently.
let context = new OfflineAudioContext(1, 128, 3000);
let constantBuffer = createConstantBuffer(context, 1, 1.0);
let source = context.createBufferSource();
source.buffer = constantBuffer;
source.loop = true;
let gain1 = context.createGain();
gain1.gain.value = 0.5;
let gain2 = context.createGain();
gain2.gain.value = 0.25;
source.connect(gain1).connect(gain2).connect(context.destination);
source.start();
context.startRendering()
.then(function(buffer) {
should(
buffer.getChannelData(0),
'The output of chained connection of gain nodes')
.beConstantValueOf(0.125);
})
.then(() => task.done());
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,77 @@
<!DOCTYPE html>
<html>
<head>
<title>
audionode-connect-order.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
let sampleRate = 44100.0;
let renderLengthSeconds = 0.125;
let delayTimeSeconds = 0.1;
function createSinWaveBuffer(context, lengthInSeconds, frequency) {
let audioBuffer =
context.createBuffer(1, lengthInSeconds * sampleRate, sampleRate);
let n = audioBuffer.length;
let data = audioBuffer.getChannelData(0);
for (let i = 0; i < n; ++i) {
data[i] = Math.sin(frequency * 2 * Math.PI * i / sampleRate);
}
return audioBuffer;
}
audit.define(
{
label: 'Test connections',
description:
'AudioNode connection order doesn\'t trigger assertion errors'
},
function(task, should) {
// Create offline audio context.
let context = new OfflineAudioContext(
1, sampleRate * renderLengthSeconds, sampleRate);
let toneBuffer =
createSinWaveBuffer(context, renderLengthSeconds, 880);
let bufferSource = context.createBufferSource();
bufferSource.buffer = toneBuffer;
bufferSource.connect(context.destination);
let delay = context.createDelay();
delay.delayTime.value = delayTimeSeconds;
// We connect delay node to gain node before anything is connected
// to delay node itself. We do this because we try to trigger the
// ASSERT which might be fired due to AudioNode connection order,
// especially when gain node and delay node is involved e.g.
// https://bugs.webkit.org/show_bug.cgi?id=76685.
should(() => {
let gain = context.createGain();
gain.connect(context.destination);
delay.connect(gain);
}, 'Connecting nodes').notThrow();
bufferSource.start(0);
let promise = context.startRendering();
should(promise, 'OfflineContext startRendering()')
.beResolved()
.then(task.done.bind(task));
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,214 @@
<!DOCTYPE html>
<html>
<head>
<title>
audionode-disconnect-audioparam.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
</head>
<body>
<script id="layout-test-code">
let renderQuantum = 128;
let sampleRate = 44100;
let renderDuration = 0.5;
let disconnectTime = 0.5 * renderDuration;
let audit = Audit.createTaskRunner();
// Calculate the index for disconnection.
function getDisconnectIndex(disconnectTime) {
let disconnectIndex = disconnectTime * sampleRate;
return disconnectIndex -= (disconnectIndex) % renderQuantum;
}
// Get the index of value change.
function getValueChangeIndex(array, targetValue) {
return array.findIndex(function(element, index) {
if (element === targetValue)
return true;
});
}
// Task 1: test disconnect(AudioParam) method.
audit.define('disconnect(AudioParam)', (task, should) => {
// Creates a buffer source with value [1] and then connect it to two
// gain nodes in series. The output of the buffer source is lowered by
// half
// (* 0.5) and then connected to two |.gain| AudioParams in each gain
// node.
//
// (1) bufferSource => gain1 => gain2
// (2) bufferSource => half => gain1.gain
// (3) half => gain2.gain
//
// This graph should produce the output of 2.25 (= 1 * 1.5 * 1.5). After
// disconnecting (3), it should produce 1.5.
let context =
new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
let source = context.createBufferSource();
let buffer1ch = createConstantBuffer(context, 1, 1);
let half = context.createGain();
let gain1 = context.createGain();
let gain2 = context.createGain();
source.buffer = buffer1ch;
source.loop = true;
half.gain.value = 0.5;
source.connect(gain1);
gain1.connect(gain2);
gain2.connect(context.destination);
source.connect(half);
// Connecting |half| to both |gain1.gain| and |gain2.gain| amplifies the
// signal by 2.25 (= 1.5 * 1.5) because each gain node amplifies the
// signal by 1.5 (= 1.0 + 0.5).
half.connect(gain1.gain);
half.connect(gain2.gain);
source.start();
// Schedule the disconnection at the half of render duration.
context.suspend(disconnectTime).then(function() {
half.disconnect(gain2.gain);
context.resume();
});
context.startRendering()
.then(function(buffer) {
let channelData = buffer.getChannelData(0);
let disconnectIndex = getDisconnectIndex(disconnectTime);
let valueChangeIndex = getValueChangeIndex(channelData, 1.5);
// Expected values are: 1 * 1.5 * 1.5 -> 1 * 1.5 = [2.25, 1.5]
should(channelData, 'Channel #0').containValues([2.25, 1.5]);
should(valueChangeIndex, 'The index of value change')
.beEqualTo(disconnectIndex);
})
.then(() => task.done());
});
// Task 2: test disconnect(AudioParam, output) method.
audit.define('disconnect(AudioParam, output)', (task, should) => {
// Create a 2-channel buffer source with [1, 2] in each channel and
// make a serial connection through gain1 and gain 2. The make the
// buffer source half with a gain node and connect it to a 2-output
// splitter. Connect each output to 2 gain AudioParams respectively.
//
// (1) bufferSource => gain1 => gain2
// (2) bufferSource => half => splitter(2)
// (3) splitter#0 => gain1.gain
// (4) splitter#1 => gain2.gain
//
// This graph should produce 3 (= 1 * 1.5 * 2) and 6 (= 2 * 1.5 * 2) for
// each channel. After disconnecting (4), it should output 1.5 and 3.
let context =
new OfflineAudioContext(2, renderDuration * sampleRate, sampleRate);
let source = context.createBufferSource();
let buffer2ch = createConstantBuffer(context, 1, [1, 2]);
let splitter = context.createChannelSplitter(2);
let half = context.createGain();
let gain1 = context.createGain();
let gain2 = context.createGain();
source.buffer = buffer2ch;
source.loop = true;
half.gain.value = 0.5;
source.connect(gain1);
gain1.connect(gain2);
gain2.connect(context.destination);
// |source| originally is [1, 2] but it becomes [0.5, 1] after 0.5 gain.
// Each splitter's output will be applied to |gain1.gain| and
// |gain2.gain| respectively in an additive fashion.
source.connect(half);
half.connect(splitter);
// This amplifies the signal by 1.5. (= 1.0 + 0.5)
splitter.connect(gain1.gain, 0);
// This amplifies the signal by 2. (= 1.0 + 1.0)
splitter.connect(gain2.gain, 1);
source.start();
// Schedule the disconnection at the half of render duration.
context.suspend(disconnectTime).then(function() {
splitter.disconnect(gain2.gain, 1);
context.resume();
});
context.startRendering()
.then(function(buffer) {
let channelData0 = buffer.getChannelData(0);
let channelData1 = buffer.getChannelData(1);
let disconnectIndex = getDisconnectIndex(disconnectTime);
let valueChangeIndexCh0 = getValueChangeIndex(channelData0, 1.5);
let valueChangeIndexCh1 = getValueChangeIndex(channelData1, 3);
// Expected values are: 1 * 1.5 * 2 -> 1 * 1.5 = [3, 1.5]
should(channelData0, 'Channel #0').containValues([3, 1.5]);
should(
valueChangeIndexCh0,
'The index of value change in channel #0')
.beEqualTo(disconnectIndex);
// Expected values are: 2 * 1.5 * 2 -> 2 * 1.5 = [6, 3]
should(channelData1, 'Channel #1').containValues([6, 3]);
should(
valueChangeIndexCh1,
'The index of value change in channel #1')
.beEqualTo(disconnectIndex);
})
.then(() => task.done());
});
// Task 3: exception checks.
audit.define('exceptions', (task, should) => {
let context = new AudioContext();
let gain1 = context.createGain();
let splitter = context.createChannelSplitter(2);
let gain2 = context.createGain();
let gain3 = context.createGain();
// Connect a splitter to gain nodes and merger so we can test the
// possible ways of disconnecting the nodes to verify that appropriate
// exceptions are thrown.
gain1.connect(splitter);
splitter.connect(gain2.gain, 0);
splitter.connect(gain3.gain, 1);
gain2.connect(gain3);
gain3.connect(context.destination);
// gain1 is not connected to gain3.gain. Exception should be thrown.
should(function() {
gain1.disconnect(gain3.gain);
}, 'gain1.disconnect(gain3.gain)').throw('InvalidAccessError');
// When the output index is good but the destination is invalid.
should(function() {
splitter.disconnect(gain1.gain, 1);
}, 'splitter.disconnect(gain1.gain, 1)').throw('InvalidAccessError');
// When both arguments are wrong, throw IndexSizeError first.
should(function() {
splitter.disconnect(gain1.gain, 2);
}, 'splitter.disconnect(gain1.gain, 2)').throw('IndexSizeError');
task.done();
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,298 @@
<!DOCTYPE html>
<html>
<head>
<title>
audionode-disconnect.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
// Task 1: test disconnect() method.
audit.define('disconnect()', (task, should) => {
// Connect a source to multiple gain nodes, each connected to the
// destination. Then disconnect the source. The expected output should
// be all zeros since the source was disconnected.
let context = new OfflineAudioContext(1, 128, 44100);
let source = context.createBufferSource();
let buffer1ch = createConstantBuffer(context, 128, [1]);
let gain1 = context.createGain();
let gain2 = context.createGain();
let gain3 = context.createGain();
source.buffer = buffer1ch;
source.connect(gain1);
source.connect(gain2);
source.connect(gain3);
gain1.connect(context.destination);
gain2.connect(context.destination);
gain3.connect(context.destination);
source.start();
// This disconnects everything.
source.disconnect();
context.startRendering()
.then(function(buffer) {
// With everything disconnected, the result should be zero.
should(buffer.getChannelData(0), 'Channel #0')
.beConstantValueOf(0);
})
.then(() => task.done());
});
// Task 2: test disconnect(output) method.
audit.define('disconnect(output)', (task, should) => {
// Create multiple connections from each output of a ChannelSplitter
// to a gain node. Then test if disconnecting a single output of
// splitter is actually disconnected.
let context = new OfflineAudioContext(1, 128, 44100);
let source = context.createBufferSource();
let buffer3ch = createConstantBuffer(context, 128, [1, 2, 3]);
let splitter = context.createChannelSplitter(3);
let sum = context.createGain();
source.buffer = buffer3ch;
source.connect(splitter);
splitter.connect(sum, 0);
splitter.connect(sum, 1);
splitter.connect(sum, 2);
sum.connect(context.destination);
source.start();
// This disconnects the second output.
splitter.disconnect(1);
context.startRendering()
.then(function(buffer) {
// The rendered channel should contain 4. (= 1 + 0 + 3)
should(buffer.getChannelData(0), 'Channel #0')
.beConstantValueOf(4);
})
.then(() => task.done());
});
// Task 3: test disconnect(AudioNode) method.
audit.define('disconnect(AudioNode)', (task, should) => {
// Connect a source to multiple gain nodes. Then test if disconnecting a
// single destination selectively works correctly.
let context = new OfflineAudioContext(1, 128, 44100);
let source = context.createBufferSource();
let buffer1ch = createConstantBuffer(context, 128, [1]);
let gain1 = context.createGain();
let gain2 = context.createGain();
let gain3 = context.createGain();
let orphan = context.createGain();
source.buffer = buffer1ch;
source.connect(gain1);
source.connect(gain2);
source.connect(gain3);
gain1.connect(context.destination);
gain2.connect(context.destination);
gain3.connect(context.destination);
source.start();
source.disconnect(gain2);
context.startRendering()
.then(function(buffer) {
// The |sum| gain node should produce value 2. (1 + 0 + 1 = 2)
should(buffer.getChannelData(0), 'Channel #0')
.beConstantValueOf(2);
})
.then(() => task.done());
});
// Task 4: test disconnect(AudioNode, output) method.
audit.define('disconnect(AudioNode, output)', (task, should) => {
// Connect a buffer with 2 channels with each containing 1 and 2
// respectively to a ChannelSplitter, then connect the splitter to 2
// gain nodes as shown below:
// (1) splitter#0 => gain1
// (2) splitter#0 => gain2
// (3) splitter#1 => gain2
// Then disconnect (2) and verify if the selective disconnection on a
// specified output of the destination node works correctly.
let context = new OfflineAudioContext(1, 128, 44100);
let source = context.createBufferSource();
let buffer2ch = createConstantBuffer(context, 128, [1, 2]);
let splitter = context.createChannelSplitter(2);
let gain1 = context.createGain();
let gain2 = context.createGain();
source.buffer = buffer2ch;
source.connect(splitter);
splitter.connect(gain1, 0); // gain1 gets channel 0.
splitter.connect(gain2, 0); // gain2 sums channel 0 and 1.
splitter.connect(gain2, 1);
gain1.connect(context.destination);
gain2.connect(context.destination);
source.start();
splitter.disconnect(gain2, 0); // Now gain2 gets [2]
context.startRendering()
.then(function(buffer) {
// The sum of gain1 and gain2 should produce value 3. (= 1 + 2)
should(buffer.getChannelData(0), 'Channel #0')
.beConstantValueOf(3);
})
.then(() => task.done());
});
// Task 5: test disconnect(AudioNode, output, input) method.
audit.define('disconnect(AudioNode, output, input)', (task, should) => {
// Create a 3-channel buffer with [1, 2, 3] in each channel and then
// pass it through a splitter and a merger. Each input/output of the
// splitter and the merger is connected in a sequential order as shown
// below.
// (1) splitter#0 => merger#0
// (2) splitter#1 => merger#1
// (3) splitter#2 => merger#2
// Then disconnect (3) and verify if each channel contains [1] and [2]
// respectively.
let context = new OfflineAudioContext(3, 128, 44100);
let source = context.createBufferSource();
let buffer3ch = createConstantBuffer(context, 128, [1, 2, 3]);
let splitter = context.createChannelSplitter(3);
let merger = context.createChannelMerger(3);
source.buffer = buffer3ch;
source.connect(splitter);
splitter.connect(merger, 0, 0);
splitter.connect(merger, 1, 1);
splitter.connect(merger, 2, 2);
merger.connect(context.destination);
source.start();
splitter.disconnect(merger, 2, 2);
context.startRendering()
.then(function(buffer) {
// Each channel should have 1, 2, and 0 respectively.
should(buffer.getChannelData(0), 'Channel #0')
.beConstantValueOf(1);
should(buffer.getChannelData(1), 'Channel #1')
.beConstantValueOf(2);
should(buffer.getChannelData(2), 'Channel #2')
.beConstantValueOf(0);
})
.then(() => task.done());
});
// Task 6: exception checks.
audit.define('exceptions', (task, should) => {
let context = new OfflineAudioContext(2, 128, 44100);
let gain1 = context.createGain();
let splitter = context.createChannelSplitter(2);
let merger = context.createChannelMerger(2);
let gain2 = context.createGain();
let gain3 = context.createGain();
// Connect a splitter to gain nodes and merger so we can test the
// possible ways of disconnecting the nodes to verify that appropriate
// exceptions are thrown.
gain1.connect(splitter);
splitter.connect(gain2, 0);
splitter.connect(gain3, 1);
splitter.connect(merger, 0, 0);
splitter.connect(merger, 1, 1);
gain2.connect(gain3);
gain3.connect(context.destination);
merger.connect(context.destination);
// There is no output #2. An exception should be thrown.
should(function() {
splitter.disconnect(2);
}, 'splitter.disconnect(2)').throw('IndexSizeError');
// Disconnecting the output already disconnected should not throw.
should(function() {
splitter.disconnect(1);
splitter.disconnect(1);
}, 'Disconnecting a connection twice').notThrow();
// gain1 is not connected gain2. An exception should be thrown.
should(function() {
gain1.disconnect(gain2);
}, 'gain1.disconnect(gain2)').throw('InvalidAccessError');
// gain1 and gain3 are not connected. An exception should be thrown.
should(function() {
gain1.disconnect(gain3);
}, 'gain1.disconnect(gain3)').throw('InvalidAccessError');
// There is no output #2 in the splitter. An exception should be thrown.
should(function() {
splitter.disconnect(gain2, 2);
}, 'splitter.disconnect(gain2, 2)').throw('IndexSizeError');
// The splitter and gain1 are not connected. An exception should be
// thrown.
should(function() {
splitter.disconnect(gain1, 0);
}, 'splitter.disconnect(gain1, 0)').throw('InvalidAccessError');
// The splitter output #0 and the gain3 output #0 are not connected. An
// exception should be thrown.
should(function() {
splitter.disconnect(gain3, 0, 0);
}, 'splitter.disconnect(gain3, 0, 0)').throw('InvalidAccessError');
// The output index is out of bound. An exception should be thrown.
should(function() {
splitter.disconnect(merger, 3, 0);
}, 'splitter.disconnect(merger, 3, 0)').throw('IndexSizeError');
task.done();
});
audit.define('disabled-outputs', (task, should) => {
// See crbug.com/656652
let context = new OfflineAudioContext(2, 1024, 44100);
let g1 = context.createGain();
let g2 = context.createGain();
g1.connect(g2);
g1.disconnect(g2);
let g3 = context.createGain();
g2.connect(g3);
g1.connect(g2);
context.startRendering()
.then(function() {
// If we make it here, we passed.
should(true, 'Disabled outputs handled')
.message('correctly', 'inccorrectly');
})
.then(() => task.done());
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,93 @@
<!DOCTYPE html>
<html>
<head>
<title>
audionode.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
</head>
<body>
<div id="description"></div>
<div id="console"></div>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
let context = 0;
let context2 = 0;
let context3 = 0;
audit.define(
{label: 'test', description: 'Basic tests for AudioNode API.'},
function(task, should) {
context = new AudioContext();
window.audioNode = context.createBufferSource();
// Check input and output numbers of AudioSourceNode.
should(audioNode.numberOfInputs, 'AudioBufferSource.numberOfInputs')
.beEqualTo(0);
should(
audioNode.numberOfOutputs, 'AudioBufferSource.numberOfOutputs')
.beEqualTo(1);
// Check input and output numbers of AudioDestinationNode
should(
context.destination.numberOfInputs,
'AudioContext.destination.numberOfInputs')
.beEqualTo(1);
should(
context.destination.numberOfOutputs,
'AudioContext.destination.numberOfOutputs')
.beEqualTo(0);
// Try calling connect() method with illegal values.
should(
() => audioNode.connect(0, 0, 0), 'audioNode.connect(0, 0, 0)')
.throw('TypeError');
should(
() => audioNode.connect(null, 0, 0),
'audioNode.connect(null, 0, 0)')
.throw('TypeError');
should(
() => audioNode.connect(context.destination, 5, 0),
'audioNode.connect(context.destination, 5, 0)')
.throw('IndexSizeError');
should(
() => audioNode.connect(context.destination, 0, 5),
'audioNode.connect(context.destination, 0, 5)')
.throw('IndexSizeError');
should(
() => audioNode.connect(context.destination, 0, 0),
'audioNode.connect(context.destination, 0, 0)')
.notThrow();
// Create a new context and try to connect the other context's node
// to this one.
context2 = new AudioContext();
should(
() => window.audioNode.connect(context2.destination),
'Connecting a node to a different context')
.throw('InvalidAccessError');
// 3-arg AudioContext doesn't create an offline context anymore.
should(
() => context3 = new AudioContext(1, 44100, 44100),
'context3 = new AudioContext(1, 44100, 44100)')
.throw('TypeError');
// Ensure it is an EventTarget
should(
audioNode instanceof EventTarget, 'AudioNode is an EventTarget')
.beTrue();
task.done();
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,66 @@
<!DOCTYPE html>
<html>
<head>
<title>
Test Setting of channelCountMode and channelInterpretation
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
</head>
<body>
<script id="layout-test-code">
// Fairly arbitrary sample rate and number of frames, except the number of
// frames should be more than a few render quantums.
let sampleRate = 16000;
let renderFrames = 10 * 128;
let audit = Audit.createTaskRunner();
audit.define('interp', (task, should) => {
let context = new OfflineAudioContext(1, renderFrames, sampleRate);
let node = context.createGain();
// Set a new interpretation and verify that it changed.
node.channelInterpretation = 'discrete';
let value = node.channelInterpretation;
should(value, 'node.channelInterpretation').beEqualTo('discrete');
node.connect(context.destination);
context.startRendering()
.then(function(buffer) {
// After rendering, the value should have been changed.
should(
node.channelInterpretation,
'After rendering node.channelInterpretation')
.beEqualTo('discrete');
})
.then(() => task.done());
});
audit.define('mode', (task, should) => {
let context = new OfflineAudioContext(1, renderFrames, sampleRate);
let node = context.createGain();
// Set a new mode and verify that it changed.
node.channelCountMode = 'explicit';
let value = node.channelCountMode;
should(value, 'node.channelCountMode').beEqualTo('explicit');
node.connect(context.destination);
context.startRendering()
.then(function(buffer) {
// After rendering, the value should have been changed.
should(
node.channelCountMode,
'After rendering node.channelCountMode')
.beEqualTo('explicit');
})
.then(() => task.done());
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,73 @@
<!DOCTYPE html>
<html>
<head>
<title>
Test AudioWorkletNode's automatic pull feature
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
</head>
<body>
<script id="layout-test-code">
const audit = Audit.createTaskRunner();
// Arbitrary sample rate. Anything should work.
const sampleRate = 48000;
const renderLength = RENDER_QUANTUM_FRAMES * 2;
const channelCount = 1;
const filePath = 'processors/zero-output-processor.js';
const sourceOffset = 0.5;
// Connect a constant source node to the zero-output AudioWorkletNode.
// Then verify if it captures the data correctly.
audit.define('setup-worklet', (task, should) => {
const context =
new OfflineAudioContext(channelCount, renderLength, sampleRate);
context.audioWorklet.addModule(filePath).then(() => {
let testSource =
new ConstantSourceNode(context, { offset: sourceOffset });
let zeroOutputWorkletNode =
new AudioWorkletNode(context, 'zero-output-processor', {
numberOfInputs: 1,
numberOfOutputs: 0,
processorOptions: {
bufferLength: renderLength,
channeCount: channelCount
}
});
// Start the source and stop at the first render quantum.
testSource.connect(zeroOutputWorkletNode);
testSource.start();
testSource.stop(RENDER_QUANTUM_FRAMES/sampleRate);
zeroOutputWorkletNode.port.onmessage = (event) => {
// The |capturedBuffer| can be multichannel. Iterate through it.
for (let i = 0; i < event.data.capturedBuffer.length; ++i) {
let buffer = event.data.capturedBuffer[i];
// Split the captured buffer in half for the easier test.
should(buffer.subarray(0, RENDER_QUANTUM_FRAMES),
'The first half of the captured buffer')
.beConstantValueOf(sourceOffset);
should(buffer.subarray(RENDER_QUANTUM_FRAMES, renderLength),
'The second half of the captured buffer')
.beConstantValueOf(0);
}
task.done();
};
// Starts the rendering, but we don't need the rendered buffer from
// the context.
context.startRendering();
});
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,42 @@
/**
* @class ZeroOutputProcessor
* @extends AudioWorkletProcessor
*
* This processor accumulates the incoming buffer and send the buffered data
* to the main thread when it reaches the specified frame length. The processor
* only supports the single input.
*/
const kRenderQuantumFrames = 128;
class ZeroOuttputProcessor extends AudioWorkletProcessor {
constructor(options) {
super();
this._framesRequested = options.processorOptions.bufferLength;
this._framesCaptured = 0;
this._buffer = [];
for (let i = 0; i < options.processorOptions.channeCount; ++i) {
this._buffer[i] = new Float32Array(this._framesRequested);
}
}
process(inputs) {
let input = inputs[0];
let startIndex = this._framesCaptured;
let endIndex = startIndex + kRenderQuantumFrames;
for (let i = 0; i < this._buffer.length; ++i) {
this._buffer[i].subarray(startIndex, endIndex).set(input[i]);
}
this._framesCaptured = endIndex;
if (this._framesCaptured >= this._framesRequested) {
this.port.postMessage({ capturedBuffer: this._buffer });
return false;
} else {
return true;
}
}
}
registerProcessor('zero-output-processor', ZeroOuttputProcessor);

View file

@ -0,0 +1,42 @@
<!DOCTYPE html>
<html>
<head>
<title>
biquad-allpass.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
<script src="/webaudio/resources/biquad-filters.js"></script>
<script src="/webaudio/resources/biquad-testing.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
audit.define(
{label: 'test', description: 'Biquad allpass filter'},
function(task, should) {
// Create offline audio context.
let context = new OfflineAudioContext(
2, sampleRate * renderLengthSeconds, sampleRate);
let filterParameters = [
{cutoff: 0, q: 10, gain: 1},
{cutoff: 1, q: 10, gain: 1},
{cutoff: .5, q: 0, gain: 1},
{cutoff: 0.25, q: 10, gain: 1},
];
createTestAndRun(context, 'allpass', {
should: should,
threshold: 3.9337e-8,
filterParameters: filterParameters
}).then(task.done.bind(task));
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,406 @@
<!DOCTYPE html>
<html>
<head>
<title>
Biquad Automation Test
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
<script src="/webaudio/resources/biquad-filters.js"></script>
<script src="/webaudio/resources/audioparam-testing.js"></script>
</head>
<body>
<script id="layout-test-code">
// Don't need to run these tests at high sampling rate, so just use a low
// one to reduce memory usage and complexity.
let sampleRate = 16000;
// How long to render for each test.
let renderDuration = 0.25;
// Where to end the automations. Fairly arbitrary, but must end before
// the renderDuration.
let automationEndTime = renderDuration / 2;
let audit = Audit.createTaskRunner();
// The definition of the linear ramp automation function.
function linearRamp(t, v0, v1, t0, t1) {
return v0 + (v1 - v0) * (t - t0) / (t1 - t0);
}
// Generate the filter coefficients for the specified filter using the
// given parameters for the given duration. |filterTypeFunction| is a
// function that returns the filter coefficients for one set of
// parameters. |parameters| is a property bag that contains the start and
// end values (as an array) for each of the biquad attributes. The
// properties are |freq|, |Q|, |gain|, and |detune|. |duration| is the
// number of seconds for which the coefficients are generated.
//
// A property bag with properties |b0|, |b1|, |b2|, |a1|, |a2|. Each
// propery is an array consisting of the coefficients for the time-varying
// biquad filter.
function generateFilterCoefficients(
filterTypeFunction, parameters, duration) {
let renderEndFrame = Math.ceil(renderDuration * sampleRate);
let endFrame = Math.ceil(duration * sampleRate);
let nCoef = renderEndFrame;
let b0 = new Float64Array(nCoef);
let b1 = new Float64Array(nCoef);
let b2 = new Float64Array(nCoef);
let a1 = new Float64Array(nCoef);
let a2 = new Float64Array(nCoef);
let k = 0;
// If the property is not given, use the defaults.
let freqs = parameters.freq || [350, 350];
let qs = parameters.Q || [1, 1];
let gains = parameters.gain || [0, 0];
let detunes = parameters.detune || [0, 0];
for (let frame = 0; frame <= endFrame; ++frame) {
// Apply linear ramp at frame |frame|.
let f =
linearRamp(frame / sampleRate, freqs[0], freqs[1], 0, duration);
let q = linearRamp(frame / sampleRate, qs[0], qs[1], 0, duration);
let g =
linearRamp(frame / sampleRate, gains[0], gains[1], 0, duration);
let d = linearRamp(
frame / sampleRate, detunes[0], detunes[1], 0, duration);
// Compute actual frequency parameter
f = f * Math.pow(2, d / 1200);
// Compute filter coefficients
let coef = filterTypeFunction(f / (sampleRate / 2), q, g);
b0[k] = coef.b0;
b1[k] = coef.b1;
b2[k] = coef.b2;
a1[k] = coef.a1;
a2[k] = coef.a2;
++k;
}
// Fill the rest of the arrays with the constant value to the end of
// the rendering duration.
b0.fill(b0[endFrame], endFrame + 1);
b1.fill(b1[endFrame], endFrame + 1);
b2.fill(b2[endFrame], endFrame + 1);
a1.fill(a1[endFrame], endFrame + 1);
a2.fill(a2[endFrame], endFrame + 1);
return {b0: b0, b1: b1, b2: b2, a1: a1, a2: a2};
}
// Apply the given time-varying biquad filter to the given signal,
// |signal|. |coef| should be the time-varying coefficients of the
// filter, as returned by |generateFilterCoefficients|.
function timeVaryingFilter(signal, coef) {
let length = signal.length;
// Use double precision for the internal computations.
let y = new Float64Array(length);
// Prime the pump. (Assumes the signal has length >= 2!)
y[0] = coef.b0[0] * signal[0];
y[1] =
coef.b0[1] * signal[1] + coef.b1[1] * signal[0] - coef.a1[1] * y[0];
for (let n = 2; n < length; ++n) {
y[n] = coef.b0[n] * signal[n] + coef.b1[n] * signal[n - 1] +
coef.b2[n] * signal[n - 2];
y[n] -= coef.a1[n] * y[n - 1] + coef.a2[n] * y[n - 2];
}
// But convert the result to single precision for comparison.
return y.map(Math.fround);
}
// Configure the audio graph using |context|. Returns the biquad filter
// node and the AudioBuffer used for the source.
function configureGraph(context, toneFrequency) {
// The source is just a simple sine wave.
let src = context.createBufferSource();
let b =
context.createBuffer(1, renderDuration * sampleRate, sampleRate);
let data = b.getChannelData(0);
let omega = 2 * Math.PI * toneFrequency / sampleRate;
for (let k = 0; k < data.length; ++k) {
data[k] = Math.sin(omega * k);
}
src.buffer = b;
let f = context.createBiquadFilter();
src.connect(f);
f.connect(context.destination);
src.start();
return {filter: f, source: b};
}
function createFilterVerifier(
should, filterCreator, threshold, parameters, input, message) {
return function(resultBuffer) {
let actual = resultBuffer.getChannelData(0);
let coefs = generateFilterCoefficients(
filterCreator, parameters, automationEndTime);
reference = timeVaryingFilter(input, coefs);
should(actual, message).beCloseToArray(reference, {
absoluteThreshold: threshold
});
};
}
// Automate just the frequency parameter. A bandpass filter is used where
// the center frequency is swept across the source (which is a simple
// tone).
audit.define('automate-freq', (task, should) => {
let context =
new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
// Center frequency of bandpass filter and also the frequency of the
// test tone.
let centerFreq = 10 * 440;
// Sweep the frequency +/- 5*440 Hz from the center. This should cause
// the output to be low at the beginning and end of the test where the
// tone is outside the pass band of the filter, but high in the middle
// of the automation time where the tone is near the center of the pass
// band. Make sure the frequency sweep stays inside the Nyquist
// frequency.
let parameters = {freq: [centerFreq - 5 * 440, centerFreq + 5 * 440]};
let graph = configureGraph(context, centerFreq);
let f = graph.filter;
let b = graph.source;
f.type = 'bandpass';
f.frequency.setValueAtTime(parameters.freq[0], 0);
f.frequency.linearRampToValueAtTime(
parameters.freq[1], automationEndTime);
context.startRendering()
.then(createFilterVerifier(
should, createBandpassFilter, 4.6455e-6, parameters,
b.getChannelData(0),
'Output of bandpass filter with frequency automation'))
.then(() => task.done());
});
// Automate just the Q parameter. A bandpass filter is used where the Q
// of the filter is swept.
audit.define('automate-q', (task, should) => {
let context =
new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
// The frequency of the test tone.
let centerFreq = 440;
// Sweep the Q paramter between 1 and 200. This will cause the output
// of the filter to pass most of the tone at the beginning to passing
// less of the tone at the end. This is because we set center frequency
// of the bandpass filter to be slightly off from the actual tone.
let parameters = {
Q: [1, 200],
// Center frequency of the bandpass filter is just 25 Hz above the
// tone frequency.
freq: [centerFreq + 25, centerFreq + 25]
};
let graph = configureGraph(context, centerFreq);
let f = graph.filter;
let b = graph.source;
f.type = 'bandpass';
f.frequency.value = parameters.freq[0];
f.Q.setValueAtTime(parameters.Q[0], 0);
f.Q.linearRampToValueAtTime(parameters.Q[1], automationEndTime);
context.startRendering()
.then(createFilterVerifier(
should, createBandpassFilter, 9.8348e-7, parameters,
b.getChannelData(0),
'Output of bandpass filter with Q automation'))
.then(() => task.done());
});
// Automate just the gain of the lowshelf filter. A test tone will be in
// the lowshelf part of the filter. The output will vary as the gain of
// the lowshelf is changed.
audit.define('automate-gain', (task, should) => {
let context =
new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
// Frequency of the test tone.
let centerFreq = 440;
// Set the cutoff frequency of the lowshelf to be significantly higher
// than the test tone. Sweep the gain from 20 dB to -20 dB. (We go from
// 20 to -20 to easily verify that the filter didn't go unstable.)
let parameters = {freq: [3500, 3500], gain: [20, -20]};
let graph = configureGraph(context, centerFreq);
let f = graph.filter;
let b = graph.source;
f.type = 'lowshelf';
f.frequency.value = parameters.freq[0];
f.gain.setValueAtTime(parameters.gain[0], 0);
f.gain.linearRampToValueAtTime(parameters.gain[1], automationEndTime);
context.startRendering()
.then(createFilterVerifier(
should, createLowShelfFilter, 2.7657e-5, parameters,
b.getChannelData(0),
'Output of lowshelf filter with gain automation'))
.then(() => task.done());
});
// Automate just the detune parameter. Basically the same test as for the
// frequncy parameter but we just use the detune parameter to modulate the
// frequency parameter.
audit.define('automate-detune', (task, should) => {
let context =
new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
let centerFreq = 10 * 440;
let parameters = {
freq: [centerFreq, centerFreq],
detune: [-10 * 1200, 10 * 1200]
};
let graph = configureGraph(context, centerFreq);
let f = graph.filter;
let b = graph.source;
f.type = 'bandpass';
f.frequency.value = parameters.freq[0];
f.detune.setValueAtTime(parameters.detune[0], 0);
f.detune.linearRampToValueAtTime(
parameters.detune[1], automationEndTime);
context.startRendering()
.then(createFilterVerifier(
should, createBandpassFilter, 3.1471e-5, parameters,
b.getChannelData(0),
'Output of bandpass filter with detune automation'))
.then(() => task.done());
});
// Automate all of the filter parameters at once. This is a basic check
// that everything is working. A peaking filter is used because it uses
// all of the parameters.
audit.define('automate-all', (task, should) => {
let context =
new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
let graph = configureGraph(context, 10 * 440);
let f = graph.filter;
let b = graph.source;
// Sweep all of the filter parameters. These are pretty much arbitrary.
let parameters = {
freq: [8000, 100],
Q: [f.Q.value, .0001],
gain: [f.gain.value, 20],
detune: [2400, -2400]
};
f.type = 'peaking';
// Set starting points for all parameters of the filter. Start at 10
// kHz for the center frequency, and the defaults for Q and gain.
f.frequency.setValueAtTime(parameters.freq[0], 0);
f.Q.setValueAtTime(parameters.Q[0], 0);
f.gain.setValueAtTime(parameters.gain[0], 0);
f.detune.setValueAtTime(parameters.detune[0], 0);
// Linear ramp each parameter
f.frequency.linearRampToValueAtTime(
parameters.freq[1], automationEndTime);
f.Q.linearRampToValueAtTime(parameters.Q[1], automationEndTime);
f.gain.linearRampToValueAtTime(parameters.gain[1], automationEndTime);
f.detune.linearRampToValueAtTime(
parameters.detune[1], automationEndTime);
context.startRendering()
.then(createFilterVerifier(
should, createPeakingFilter, 6.2907e-4, parameters,
b.getChannelData(0),
'Output of peaking filter with automation of all parameters'))
.then(() => task.done());
});
// Test that modulation of the frequency parameter of the filter works. A
// sinusoid of 440 Hz is the test signal that is applied to a bandpass
// biquad filter. The frequency parameter of the filter is modulated by a
// sinusoid at 103 Hz, and the frequency modulation varies from 116 to 412
// Hz. (This test was taken from the description in
// https://github.com/WebAudio/web-audio-api/issues/509#issuecomment-94731355)
audit.define('modulation', (task, should) => {
let context =
new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
// Create a graph with the sinusoidal source at 440 Hz as the input to a
// biquad filter.
let graph = configureGraph(context, 440);
let f = graph.filter;
let b = graph.source;
f.type = 'bandpass';
f.Q.value = 5;
f.frequency.value = 264;
// Create the modulation source, a sinusoid with frequency 103 Hz and
// amplitude 148. (The amplitude of 148 is added to the filter's
// frequency value of 264 to produce a sinusoidal modulation of the
// frequency parameter from 116 to 412 Hz.)
let mod = context.createBufferSource();
let mbuffer =
context.createBuffer(1, renderDuration * sampleRate, sampleRate);
let d = mbuffer.getChannelData(0);
let omega = 2 * Math.PI * 103 / sampleRate;
for (let k = 0; k < d.length; ++k) {
d[k] = 148 * Math.sin(omega * k);
}
mod.buffer = mbuffer;
mod.connect(f.frequency);
mod.start();
context.startRendering()
.then(function(resultBuffer) {
let actual = resultBuffer.getChannelData(0);
// Compute the filter coefficients using the mod sine wave
let endFrame = Math.ceil(renderDuration * sampleRate);
let nCoef = endFrame;
let b0 = new Float64Array(nCoef);
let b1 = new Float64Array(nCoef);
let b2 = new Float64Array(nCoef);
let a1 = new Float64Array(nCoef);
let a2 = new Float64Array(nCoef);
// Generate the filter coefficients when the frequency varies from
// 116 to 248 Hz using the 103 Hz sinusoid.
for (let k = 0; k < nCoef; ++k) {
let freq = f.frequency.value + d[k];
let c = createBandpassFilter(
freq / (sampleRate / 2), f.Q.value, f.gain.value);
b0[k] = c.b0;
b1[k] = c.b1;
b2[k] = c.b2;
a1[k] = c.a1;
a2[k] = c.a2;
}
reference = timeVaryingFilter(
b.getChannelData(0),
{b0: b0, b1: b1, b2: b2, a1: a1, a2: a2});
should(
actual,
'Output of bandpass filter with sinusoidal modulation of bandpass center frequency')
.beCloseToArray(reference, {absoluteThreshold: 3.9787e-5});
})
.then(() => task.done());
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,44 @@
<!DOCTYPE html>
<html>
<head>
<title>
biquad-bandpass.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
<script src="/webaudio/resources/biquad-filters.js"></script>
<script src="/webaudio/resources/biquad-testing.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
audit.define(
{label: 'test', description: 'Biquad bandpass filter.'},
function(task, should) {
// Create offline audio context.
let context = new OfflineAudioContext(
2, sampleRate * renderLengthSeconds, sampleRate);
// The filters we want to test.
let filterParameters = [
{cutoff: 0, q: 0, gain: 1},
{cutoff: 1, q: 0, gain: 1},
{cutoff: 0.5, q: 0, gain: 1},
{cutoff: 0.25, q: 1, gain: 1},
];
createTestAndRun(context, 'bandpass', {
should: should,
threshold: 2.2501e-8,
filterParameters: filterParameters
}).then(task.done.bind(task));
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,134 @@
<!DOCTYPE html>
<html>
<head>
<title>
Test Basic BiquadFilterNode Properties
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
</head>
<body>
<script id="layout-test-code">
let sampleRate = 48000;
let testFrames = 100;
// Global context that can be used by the individual tasks. It must be
// defined by the initialize task.
let context;
let audit = Audit.createTaskRunner();
audit.define('initialize', (task, should) => {
should(() => {
context = new OfflineAudioContext(1, testFrames, sampleRate);
}, 'Initialize context for testing').notThrow();
task.done();
});
audit.define('existence', (task, should) => {
should(context.createBiquadFilter, 'context.createBiquadFilter')
.exist();
task.done();
});
audit.define('parameters', (task, should) => {
// Create a really simple IIR filter. Doesn't much matter what.
let coef = Float32Array.from([1]);
let f = context.createBiquadFilter(coef, coef);
should(f.numberOfInputs, 'numberOfInputs').beEqualTo(1);
should(f.numberOfOutputs, 'numberOfOutputs').beEqualTo(1);
should(f.channelCountMode, 'channelCountMode').beEqualTo('max');
should(f.channelInterpretation, 'channelInterpretation')
.beEqualTo('speakers');
task.done();
});
audit.define('exceptions-createBiquadFilter', (task, should) => {
should(function() {
// Two args are required.
context.createBiquadFilter();
}, 'createBiquadFilter()').notThrow();
task.done();
});
audit.define('exceptions-getFrequencyData', (task, should) => {
// Create a really simple IIR filter. Doesn't much matter what.
let coef = Float32Array.from([1]);
let f = context.createBiquadFilter(coef, coef);
should(
function() {
// frequencyHz can't be null.
f.getFrequencyResponse(
null, new Float32Array(1), new Float32Array(1));
},
'getFrequencyResponse(' +
'null, ' +
'new Float32Array(1), ' +
'new Float32Array(1))')
.throw('TypeError');
should(
function() {
// magResponse can't be null.
f.getFrequencyResponse(
new Float32Array(1), null, new Float32Array(1));
},
'getFrequencyResponse(' +
'new Float32Array(1), ' +
'null, ' +
'new Float32Array(1))')
.throw('TypeError');
should(
function() {
// phaseResponse can't be null.
f.getFrequencyResponse(
new Float32Array(1), new Float32Array(1), null);
},
'getFrequencyResponse(' +
'new Float32Array(1), ' +
'new Float32Array(1), ' +
'null)')
.throw('TypeError');
should(
function() {
// magResponse array must the same length as frequencyHz
f.getFrequencyResponse(
new Float32Array(10), new Float32Array(1),
new Float32Array(20));
},
'getFrequencyResponse(' +
'new Float32Array(10), ' +
'new Float32Array(1), ' +
'new Float32Array(20))')
.throw('InvalidAccessError');
should(
function() {
// phaseResponse array must be the same length as frequencyHz
f.getFrequencyResponse(
new Float32Array(10), new Float32Array(20),
new Float32Array(1));
},
'getFrequencyResponse(' +
'new Float32Array(10), ' +
'new Float32Array(20), ' +
'new Float32Array(1))')
.throw('InvalidAccessError');
task.done();
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,335 @@
<!DOCTYPE html>
<html>
<head>
<title>
Test BiquadFilter getFrequencyResponse() functionality
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
<script src="/webaudio/resources/biquad-filters.js"></script>
<script src="/webaudio/resources/biquad-testing.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
// Test the frequency response of a biquad filter. We compute the
// frequency response for a simple peaking biquad filter and compare it
// with the expected frequency response. The actual filter used doesn't
// matter since we're testing getFrequencyResponse and not the actual
// filter output. The filters are extensively tested in other biquad
// tests.
// The magnitude response of the biquad filter.
let magResponse;
// The phase response of the biquad filter.
let phaseResponse;
// Number of frequency samples to take.
let numberOfFrequencies = 1000;
// The filter parameters.
let filterCutoff = 1000; // Hz.
let filterQ = 1;
let filterGain = 5; // Decibels.
// The maximum allowed error in the magnitude response.
let maxAllowedMagError = 9.775e-7;
// The maximum allowed error in the phase response.
let maxAllowedPhaseError = 5.4187e-8;
// The magnitudes and phases of the reference frequency response.
let expectedMagnitudes;
let expectedPhases;
// Convert frequency in Hz to a normalized frequency between 0 to 1 with 1
// corresponding to the Nyquist frequency.
function normalizedFrequency(freqHz, sampleRate) {
let nyquist = sampleRate / 2;
return freqHz / nyquist;
}
// Get the filter response at a (normalized) frequency |f| for the filter
// with coefficients |coef|.
function getResponseAt(coef, f) {
let b0 = coef.b0;
let b1 = coef.b1;
let b2 = coef.b2;
let a1 = coef.a1;
let a2 = coef.a2;
// H(z) = (b0 + b1 / z + b2 / z^2) / (1 + a1 / z + a2 / z^2)
//
// Compute H(exp(i * pi * f)). No native complex numbers in javascript,
// so break H(exp(i * pi * // f)) in to the real and imaginary parts of
// the numerator and denominator. Let omega = pi * f. Then the
// numerator is
//
// b0 + b1 * cos(omega) + b2 * cos(2 * omega) - i * (b1 * sin(omega) +
// b2 * sin(2 * omega))
//
// and the denominator is
//
// 1 + a1 * cos(omega) + a2 * cos(2 * omega) - i * (a1 * sin(omega) + a2
// * sin(2 * omega))
//
// Compute the magnitude and phase from the real and imaginary parts.
let omega = Math.PI * f;
let numeratorReal =
b0 + b1 * Math.cos(omega) + b2 * Math.cos(2 * omega);
let numeratorImag = -(b1 * Math.sin(omega) + b2 * Math.sin(2 * omega));
let denominatorReal =
1 + a1 * Math.cos(omega) + a2 * Math.cos(2 * omega);
let denominatorImag =
-(a1 * Math.sin(omega) + a2 * Math.sin(2 * omega));
let magnitude = Math.sqrt(
(numeratorReal * numeratorReal + numeratorImag * numeratorImag) /
(denominatorReal * denominatorReal +
denominatorImag * denominatorImag));
let phase = Math.atan2(numeratorImag, numeratorReal) -
Math.atan2(denominatorImag, denominatorReal);
if (phase >= Math.PI) {
phase -= 2 * Math.PI;
} else if (phase <= -Math.PI) {
phase += 2 * Math.PI;
}
return {magnitude: magnitude, phase: phase};
}
// Compute the reference frequency response for the biquad filter |filter|
// at the frequency samples given by |frequencies|.
function frequencyResponseReference(filter, frequencies) {
let sampleRate = filter.context.sampleRate;
let normalizedFreq =
normalizedFrequency(filter.frequency.value, sampleRate);
let filterCoefficients = createFilter(
filter.type, normalizedFreq, filter.Q.value, filter.gain.value);
let magnitudes = [];
let phases = [];
for (let k = 0; k < frequencies.length; ++k) {
let response = getResponseAt(
filterCoefficients,
normalizedFrequency(frequencies[k], sampleRate));
magnitudes.push(response.magnitude);
phases.push(response.phase);
}
return {magnitudes: magnitudes, phases: phases};
}
// Compute a set of linearly spaced frequencies.
function createFrequencies(nFrequencies, sampleRate) {
let frequencies = new Float32Array(nFrequencies);
let nyquist = sampleRate / 2;
let freqDelta = nyquist / nFrequencies;
for (let k = 0; k < nFrequencies; ++k) {
frequencies[k] = k * freqDelta;
}
return frequencies;
}
function linearToDecibels(x) {
if (x) {
return 20 * Math.log(x) / Math.LN10;
} else {
return -1000;
}
}
// Look through the array and find any NaN or infinity. Returns the index
// of the first occurence or -1 if none.
function findBadNumber(signal) {
for (let k = 0; k < signal.length; ++k) {
if (!isValidNumber(signal[k])) {
return k;
}
}
return -1;
}
// Compute absolute value of the difference between phase angles, taking
// into account the wrapping of phases.
function absolutePhaseDifference(x, y) {
let diff = Math.abs(x - y);
if (diff > Math.PI) {
diff = 2 * Math.PI - diff;
}
return diff;
}
// Compare the frequency response with our expected response.
function compareResponses(
should, filter, frequencies, magResponse, phaseResponse) {
let expectedResponse = frequencyResponseReference(filter, frequencies);
expectedMagnitudes = expectedResponse.magnitudes;
expectedPhases = expectedResponse.phases;
let n = magResponse.length;
let badResponse = false;
let maxMagError = -1;
let maxMagErrorIndex = -1;
let k;
let hasBadNumber;
hasBadNumber = findBadNumber(magResponse);
badResponse = !should(
hasBadNumber >= 0 ? 1 : 0,
'Number of non-finite values in magnitude response')
.beEqualTo(0);
hasBadNumber = findBadNumber(phaseResponse);
badResponse = !should(
hasBadNumber >= 0 ? 1 : 0,
'Number of non-finte values in phase response')
.beEqualTo(0);
// These aren't testing the implementation itself. Instead, these are
// sanity checks on the reference. Failure here does not imply an error
// in the implementation.
hasBadNumber = findBadNumber(expectedMagnitudes);
badResponse =
!should(
hasBadNumber >= 0 ? 1 : 0,
'Number of non-finite values in the expected magnitude response')
.beEqualTo(0);
hasBadNumber = findBadNumber(expectedPhases);
badResponse =
!should(
hasBadNumber >= 0 ? 1 : 0,
'Number of non-finite values in expected phase response')
.beEqualTo(0);
// If we found a NaN or infinity, the following tests aren't very
// helpful, especially for NaN. We run them anyway, after printing a
// warning message.
should(
!badResponse,
'Actual and expected results contained only finite values')
.beTrue();
for (k = 0; k < n; ++k) {
let error = Math.abs(
linearToDecibels(magResponse[k]) -
linearToDecibels(expectedMagnitudes[k]));
if (error > maxMagError) {
maxMagError = error;
maxMagErrorIndex = k;
}
}
should(
linearToDecibels(maxMagError),
'Max error (' + linearToDecibels(maxMagError) +
' dB) of magnitude response at frequency ' +
frequencies[maxMagErrorIndex] + ' Hz')
.beLessThanOrEqualTo(linearToDecibels(maxAllowedMagError));
let maxPhaseError = -1;
let maxPhaseErrorIndex = -1;
for (k = 0; k < n; ++k) {
let error =
absolutePhaseDifference(phaseResponse[k], expectedPhases[k]);
if (error > maxPhaseError) {
maxPhaseError = error;
maxPhaseErrorIndex = k;
}
}
should(
radToDegree(maxPhaseError),
'Max error (' + radToDegree(maxPhaseError) +
' deg) in phase response at frequency ' +
frequencies[maxPhaseErrorIndex] + ' Hz')
.beLessThanOrEqualTo(radToDegree(maxAllowedPhaseError));
}
function radToDegree(rad) {
// Radians to degrees
return rad * 180 / Math.PI;
}
audit.define(
{label: 'test', description: 'Biquad frequency response'},
function(task, should) {
context = new AudioContext();
filter = context.createBiquadFilter();
// Arbitrarily test a peaking filter, but any kind of filter can be
// tested.
filter.type = 'peaking';
filter.frequency.value = filterCutoff;
filter.Q.value = filterQ;
filter.gain.value = filterGain;
let frequencies =
createFrequencies(numberOfFrequencies, context.sampleRate);
magResponse = new Float32Array(numberOfFrequencies);
phaseResponse = new Float32Array(numberOfFrequencies);
filter.getFrequencyResponse(
frequencies, magResponse, phaseResponse);
compareResponses(
should, filter, frequencies, magResponse, phaseResponse);
task.done();
});
audit.define(
{
label: 'getFrequencyResponse',
description: 'Test out-of-bounds frequency values'
},
(task, should) => {
let context = new OfflineAudioContext(1, 1, sampleRate);
let filter = new BiquadFilterNode(context);
// Frequencies to test. These are all outside the valid range of
// frequencies of 0 to Nyquist.
let freq = new Float32Array(2);
freq[0] = -1;
freq[1] = context.sampleRate / 2 + 1;
let mag = new Float32Array(freq.length);
let phase = new Float32Array(freq.length);
filter.getFrequencyResponse(freq, mag, phase);
// Verify that the returned magnitude and phase entries are alL NaN
// since the frequencies are outside the valid range
for (let k = 0; k < mag.length; ++k) {
should(mag[k],
'Magnitude response at frequency ' + freq[k])
.beNaN();
}
for (let k = 0; k < phase.length; ++k) {
should(phase[k],
'Phase response at frequency ' + freq[k])
.beNaN();
}
task.done();
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,42 @@
<!DOCTYPE html>
<html>
<head>
<title>
biquad-highpass.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
<script src="/webaudio/resources/biquad-filters.js"></script>
<script src="/webaudio/resources/biquad-testing.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
audit.define(
{label: 'test', description: 'Biquad highpass filter'},
function(task, should) {
// Create offline audio context.
let context = new OfflineAudioContext(
2, sampleRate * renderLengthSeconds, sampleRate);
// The filters we want to test.
let filterParameters = [
{cutoff: 0, q: 1, gain: 1},
{cutoff: 1, q: 1, gain: 1},
{cutoff: 0.25, q: 1, gain: 1},
];
createTestAndRun(context, 'highpass', {
should: should,
threshold: 1.5487e-8,
filterParameters: filterParameters
}).then(task.done.bind(task));
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,43 @@
<!DOCTYPE html>
<html>
<head>
<title>
biquad-highshelf.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
<script src="/webaudio/resources/biquad-filters.js"></script>
<script src="/webaudio/resources/biquad-testing.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
audit.define(
{label: 'test', description: 'Biquad highshelf filter'},
function(task, should) {
// Create offline audio context.
let context = new OfflineAudioContext(
2, sampleRate * renderLengthSeconds, sampleRate);
// The filters we want to test.
let filterParameters = [
{cutoff: 0, q: 10, gain: 10},
{cutoff: 1, q: 10, gain: 10},
{cutoff: 0.25, q: 10, gain: 10},
];
createTestAndRun(context, 'highshelf', {
should: should,
threshold: 6.2577e-8,
filterParameters: filterParameters
}).then(task.done.bind(task));
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,45 @@
<!DOCTYPE html>
<html>
<head>
<title>
biquad-lowpass.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
<script src="/webaudio/resources/biquad-filters.js"></script>
<script src="/webaudio/resources/biquad-testing.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
audit.define(
{label: 'test', description: 'Biquad lowpass filter'},
function(task, should) {
// Create offline audio context.
let context = new OfflineAudioContext(
2, sampleRate * renderLengthSeconds, sampleRate);
// The filters we want to test.
let filterParameters = [
{cutoff: 0, q: 1, gain: 1},
{cutoff: 1, q: 1, gain: 1},
{cutoff: 0.25, q: 1, gain: 1},
{cutoff: 0.25, q: 1, gain: 1, detune: 100},
{cutoff: 0.01, q: 1, gain: 1, detune: -200},
];
createTestAndRun(context, 'lowpass', {
should: should,
threshold: 9.7869e-8,
filterParameters: filterParameters
}).then(task.done.bind(task));
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,43 @@
<!DOCTYPE html>
<html>
<head>
<title>
biquad-lowshelf.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
<script src="/webaudio/resources/biquad-filters.js"></script>
<script src="/webaudio/resources/biquad-testing.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
audit.define(
{label: 'test', description: 'Biquad lowshelf filter'},
function(task, should) {
// Create offline audio context.
let context = new OfflineAudioContext(
2, sampleRate * renderLengthSeconds, sampleRate);
// The filters we want to test.
let filterParameters = [
{cutoff: 0, q: 10, gain: 10},
{cutoff: 1, q: 10, gain: 10},
{cutoff: 0.25, q: 10, gain: 10},
];
createTestAndRun(context, 'lowshelf', {
should: should,
threshold: 3.8349e-8,
filterParameters: filterParameters
}).then(task.done.bind(task));
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,43 @@
<!DOCTYPE html>
<html>
<head>
<title>
biquad-notch.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
<script src="/webaudio/resources/biquad-filters.js"></script>
<script src="/webaudio/resources/biquad-testing.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
audit.define(
{label: 'test', description: 'Biquad notch filter'},
function(task, should) {
// Create offline audio context.
let context = new OfflineAudioContext(
2, sampleRate * renderLengthSeconds, sampleRate);
let filterParameters = [
{cutoff: 0, q: 10, gain: 1},
{cutoff: 1, q: 10, gain: 1},
{cutoff: .5, q: 0, gain: 1},
{cutoff: 0.25, q: 10, gain: 1},
];
createTestAndRun(context, 'notch', {
should: should,
threshold: 1.9669e-8,
filterParameters: filterParameters
}).then(task.done.bind(task));
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,46 @@
<!DOCTYPE html>
<html>
<head>
<title>
biquad-peaking.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
<script src="/webaudio/resources/biquad-filters.js"></script>
<script src="/webaudio/resources/biquad-testing.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
audit.define(
{label: 'test', description: 'Biquad peaking filter'},
function(task, should) {
window.jsTestIsAsync = true;
// Create offline audio context.
let context = new OfflineAudioContext(
2, sampleRate * renderLengthSeconds, sampleRate);
// The filters we want to test.
let filterParameters = [
{cutoff: 0, q: 10, gain: 10},
{cutoff: 1, q: 10, gain: 10},
{cutoff: .5, q: 0, gain: 10},
{cutoff: 0.25, q: 10, gain: 10},
];
createTestAndRun(context, 'peaking', {
should: should,
threshold: 5.8234e-8,
filterParameters: filterParameters
}).then(task.done.bind(task));
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,71 @@
<!DOCTYPE html>
<html>
<head>
<title>
Test Biquad Tail Output
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
// A high sample rate shows the issue more clearly.
let sampleRate = 192000;
// Some short duration because we don't need to run the test for very
// long.
let testDurationSec = 0.5;
let testDurationFrames = testDurationSec * sampleRate;
// Amplitude experimentally determined to give a biquad output close to 1.
// (No attempt was made to produce exactly 1; it's not needed.)
let sourceAmplitude = 100;
// The output of the biquad filter should not change by more than this
// much between output samples. Threshold was determined experimentally.
let glitchThreshold = 0.012968;
// Test that a Biquad filter doesn't have it's output terminated because
// the input has gone away. Generally, when a source node is finished, it
// disconnects itself from any downstream nodes. This is the correct
// behavior. Nodes that have no inputs (disconnected) are generally
// assumed to output zeroes. This is also desired behavior. However,
// biquad filters have memory so they should not suddenly output zeroes
// when the input is disconnected. This test checks to see if the output
// doesn't suddenly change to zero.
audit.define(
{label: 'test', description: 'Biquad Tail Output'},
function(task, should) {
let context =
new OfflineAudioContext(1, testDurationFrames, sampleRate);
// Create an impulse source.
let buffer = context.createBuffer(1, 1, context.sampleRate);
buffer.getChannelData(0)[0] = sourceAmplitude;
let source = context.createBufferSource();
source.buffer = buffer;
// Create the biquad filter. It doesn't really matter what kind, so
// the default filter type and parameters is fine. Connect the
// source to it.
let biquad = context.createBiquadFilter();
source.connect(biquad);
biquad.connect(context.destination);
source.start();
context.startRendering().then(function(result) {
// There should be no large discontinuities in the output
should(result.getChannelData(0), 'Biquad output')
.notGlitch(glitchThreshold);
task.done();
})
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,64 @@
<!DOCTYPE html>
<html>
<head>
<title>
biquadfilternode-basic.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
audit.define(
{label: 'test', description: 'Basic tests for BiquadFilterNode'},
function(task, should) {
let context = new AudioContext();
let filter = context.createBiquadFilter();
should(filter.numberOfInputs, 'Number of inputs').beEqualTo(1);
should(filter.numberOfOutputs, 'Number of outputs').beEqualTo(1);
should(filter.type, 'Default filter type').beEqualTo('lowpass');
should(filter.frequency.value, 'Default frequency value')
.beEqualTo(350);
should(filter.Q.value, 'Default Q value').beEqualTo(1);
should(filter.gain.value, 'Default gain value').beEqualTo(0);
// Check that all legal filter types can be set.
let filterTypeArray = [
{type: 'lowpass'}, {type: 'highpass'}, {type: 'bandpass'},
{type: 'lowshelf'}, {type: 'highshelf'}, {type: 'peaking'},
{type: 'notch'}, {type: 'allpass'}
];
for (let i = 0; i < filterTypeArray.length; ++i) {
should(
() => filter.type = filterTypeArray[i].type,
'Setting filter.type to ' + filterTypeArray[i].type)
.notThrow();
should(filter.type, 'Filter type is')
.beEqualTo(filterTypeArray[i].type);
}
// Check that numerical values are no longer supported
filter.type = 99;
should(filter.type, 'Setting filter.type to (invalid) 99')
.notBeEqualTo(99);
task.done();
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,288 @@
<!DOCTYPE html>
<html>
<head>
<title>
biquad-bandpass.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
<script src="/webaudio/resources/biquad-filters.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
// In the tests below, the initial values are not important, except that
// we wanted them to be all different so that the output contains
// different values for the first few samples. Otherwise, the actual
// values don't really matter. A peaking filter is used because the
// frequency, Q, gain, and detune parameters are used by this filter.
//
// Also, for the changeList option, the times and new values aren't really
// important. They just need to change so that we can verify that the
// outputs from the .value setter still matches the output from the
// corresponding setValueAtTime.
audit.define(
{label: 'Test 0', description: 'No dezippering for frequency'},
(task, should) => {
doTest(should, {
paramName: 'frequency',
initializer: {type: 'peaking', Q: 1, gain: 5},
changeList:
[{quantum: 2, newValue: 800}, {quantum: 7, newValue: 200}],
threshold: 3.0399e-6
}).then(() => task.done());
});
audit.define(
{label: 'Test 1', description: 'No dezippering for detune'},
(task, should) => {
doTest(should, {
paramName: 'detune',
initializer:
{type: 'peaking', frequency: 400, Q: 3, detune: 33, gain: 10},
changeList:
[{quantum: 2, newValue: 1000}, {quantum: 5, newValue: -400}],
threshold: 4.0532e-6
}).then(() => task.done());
});
audit.define(
{label: 'Test 2', description: 'No dezippering for Q'},
(task, should) => {
doTest(should, {
paramName: 'Q',
initializer: {type: 'peaking', Q: 5},
changeList:
[{quantum: 2, newValue: 10}, {quantum: 8, newValue: -10}]
}).then(() => task.done());
});
audit.define(
{label: 'Test 3', description: 'No dezippering for gain'},
(task, should) => {
doTest(should, {
paramName: 'gain',
initializer: {type: 'peaking', gain: 1},
changeList:
[{quantum: 2, newValue: 5}, {quantum: 6, newValue: -.3}],
threshold: 1.9074e-6
}).then(() => task.done());
});
// This test compares the filter output against a JS implementation of the
// filter. We're only testing a change in the frequency for a lowpass
// filter. This assumes we don't need to test other AudioParam changes
// with JS code because any mistakes would be exposed in the tests above.
audit.define(
{
label: 'Test 4',
description: 'No dezippering of frequency vs JS filter'
},
(task, should) => {
// Channel 0 is the source, channel 1 is the filtered output.
let context = new OfflineAudioContext(2, 2048, 16384);
let merger = new ChannelMergerNode(
context, {numberOfInputs: context.destination.channelCount});
merger.connect(context.destination);
let src = new OscillatorNode(context);
let f = new BiquadFilterNode(context, {type: 'lowpass'});
// Remember the initial filter parameters.
let initialFilter = {
type: f.type,
frequency: f.frequency.value,
gain: f.gain.value,
detune: f.detune.value,
Q: f.Q.value
};
src.connect(merger, 0, 0);
src.connect(f).connect(merger, 0, 1);
// Apply the filter change at frame |changeFrame| with a new
// frequency value of |newValue|.
let changeFrame = 2 * RENDER_QUANTUM_FRAMES;
let newValue = 750;
context.suspend(changeFrame / context.sampleRate)
.then(() => f.frequency.value = newValue)
.then(() => context.resume());
src.start();
context.startRendering()
.then(audio => {
let signal = audio.getChannelData(0);
let actual = audio.getChannelData(1);
// Get initial filter coefficients and updated coefficients
let nyquistFreq = context.sampleRate / 2;
let initialCoef = createFilter(
initialFilter.type, initialFilter.frequency / nyquistFreq,
initialFilter.Q, initialFilter.gain);
let finalCoef = createFilter(
f.type, f.frequency.value / nyquistFreq, f.Q.value,
f.gain.value);
let expected = new Float32Array(signal.length);
// Filter the initial part of the signal.
expected[0] =
filterSample(signal[0], initialCoef, 0, 0, 0, 0);
expected[1] = filterSample(
signal[1], initialCoef, expected[0], 0, signal[0], 0);
for (let k = 2; k < changeFrame; ++k) {
expected[k] = filterSample(
signal[k], initialCoef, expected[k - 1],
expected[k - 2], signal[k - 1], signal[k - 2]);
}
// Filter the rest of the input with the new coefficients
for (let k = changeFrame; k < signal.length; ++k) {
expected[k] = filterSample(
signal[k], finalCoef, expected[k - 1], expected[k - 2],
signal[k - 1], signal[k - 2]);
}
// The JS filter should match the actual output.
let match =
should(actual, 'Output from ' + f.type + ' filter')
.beCloseToArray(
expected, {absoluteThreshold: 4.7684e-7});
should(match, 'Output matches JS filter results').beTrue();
})
.then(() => task.done());
});
audit.define(
{label: 'Test 5', description: 'Test with modulation'},
(task, should) => {
doTest(should, {
prefix: 'Modulation: ',
paramName: 'frequency',
initializer: {type: 'peaking', Q: 5, gain: 5},
modulation: true,
changeList:
[{quantum: 2, newValue: 10}, {quantum: 8, newValue: -10}]
}).then(() => task.done());
});
audit.run();
// Run test, returning the promise from startRendering. |options|
// specifies the parameters for the test. |options.paramName| is the name
// of the AudioParam of the filter that is being tested.
// |options.initializer| is the initial value to be used in constructing
// the filter. |options.changeList| is an array consisting of dictionary
// with two members: |quantum| is the rendering quantum at which time we
// want to change the AudioParam value, and |newValue| is the value to be
// used.
function doTest(should, options) {
let paramName = options.paramName;
let newValue = options.newValue;
let prefix = options.prefix || '';
// Create offline audio context. The sample rate should be a power of
// two to eliminate any round-off errors in computing the time at which
// to suspend the context for the parameter change. The length is
// fairly arbitrary as long as it's big enough to the changeList
// values. There are two channels: channel 0 is output for the filter
// under test, and channel 1 is the output of referencef filter.
let context = new OfflineAudioContext(2, 2048, 16384);
let merger = new ChannelMergerNode(
context, {numberOfInputs: context.destination.channelCount});
merger.connect(context.destination);
let src = new OscillatorNode(context);
// |f0| is the filter under test that will have its AudioParam value
// changed. |f1| is the reference filter that uses setValueAtTime to
// update the AudioParam value.
let f0 = new BiquadFilterNode(context, options.initializer);
let f1 = new BiquadFilterNode(context, options.initializer);
src.connect(f0).connect(merger, 0, 0);
src.connect(f1).connect(merger, 0, 1);
// Modulate the AudioParam with an input signal, if requested.
if (options.modulation) {
// The modulation signal is a sine wave with amplitude 1/3 the cutoff
// frequency of the test filter. The amplitude is fairly arbitrary,
// but we want it to be a significant fraction of the cutoff so that
// the cutoff varies quite a bit in the test.
let mod =
new OscillatorNode(context, {type: 'sawtooth', frequency: 1000});
let modGain = new GainNode(context, {gain: f0.frequency.value / 3});
mod.connect(modGain);
modGain.connect(f0[paramName]);
modGain.connect(f1[paramName]);
mod.start();
}
// Output a message showing where we're starting from.
should(f0[paramName].value, prefix + `At time 0, ${paramName}`)
.beEqualTo(f0[paramName].value);
// Schedule all of the desired changes from |changeList|.
options.changeList.forEach(change => {
let changeTime =
change.quantum * RENDER_QUANTUM_FRAMES / context.sampleRate;
let value = change.newValue;
// Just output a message to show what we're doing.
should(value, prefix + `At time ${changeTime}, ${paramName}`)
.beEqualTo(value);
// Update the AudioParam value of each filter using setValueAtTime or
// the value setter.
f1[paramName].setValueAtTime(value, changeTime);
context.suspend(changeTime)
.then(() => f0[paramName].value = value)
.then(() => context.resume());
});
src.start();
return context.startRendering().then(audio => {
let actual = audio.getChannelData(0);
let expected = audio.getChannelData(1);
// The output from both filters MUST match exactly if dezippering has
// been properly removed.
let match = should(actual, `${prefix}Output from ${paramName} setter`)
.beCloseToArray(
expected, {absoluteThreshold: options.threshold});
// Just an extra message saying that what we're comparing, to make the
// output clearer. (Not really neceesary, but nice.)
should(
match,
`${prefix}Output from ${
paramName
} setter matches setValueAtTime output`)
.beTrue();
});
}
// Filter one sample:
//
// y[n] = b0 * x[n] + b1*x[n-1] + b2*x[n-2] - a1*y[n-1] - a2*y[n-2]
//
// where |x| is x[n], |xn1| is x[n-1], |xn2| is x[n-2], |yn1| is y[n-1],
// and |yn2| is y[n-2]. |coef| is a dictonary of the filter coefficients
// |b0|, |b1|, |b2|, |a1|, and |a2|.
function filterSample(x, coef, yn1, yn2, xn1, xn2) {
return coef.b0 * x + coef.b1 * xn1 + coef.b2 * xn2 - coef.a1 * yn1 -
coef.a2 * yn2;
}
</script>
</body>
</html>

View file

@ -1,156 +0,0 @@
<!DOCTYPE html>
<html class="a">
<head>
<title>DelayNode IDL Test</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/resources/idlharness.js"></script>
<script src="/resources/WebIDLParser.js"></script>
<script src="/webaudio/js/helpers.js"></script>
<style type="text/css">
#event-target-idl,
#base-audio-context-idl,
#audio-node-idl,
#audio-param-idl
{ visibility:hidden; height: 0px;}
</style>
</head>
<body class="a">
<pre id="event-target-idl">interface EventTarget {
void addEventListener(DOMString type, EventListener? callback, optional boolean capture = false);
void removeEventListener(DOMString type, EventListener? callback, optional boolean capture = false);
boolean dispatchEvent(Event event);
};
/*
callback interface EventListener {
void handleEvent(Event event);
};
*/
// Callback interfaces are not supported yet, but that's ok
interface EventListener {};
</pre>
<pre id="base-audio-context-idl">callback DecodeErrorCallback = void (DOMException error);
callback DecodeSuccessCallback = void (AudioBuffer decodedData);
interface BaseAudioContext : EventTarget {
readonly attribute AudioDestinationNode destination;
readonly attribute float sampleRate;
readonly attribute double currentTime;
readonly attribute AudioListener listener;
readonly attribute AudioContextState state;
readonly attribute double baseLatency;
Promise<void> resume ();
attribute EventHandler onstatechange;
AudioBuffer createBuffer (unsigned long numberOfChannels, unsigned long length, float sampleRate);
Promise<AudioBuffer> decodeAudioData (ArrayBuffer audioData, optional DecodeSuccessCallback successCallback, optional DecodeErrorCallback errorCallback);
AudioBufferSourceNode createBufferSource ();
ConstantSourceNode createConstantSource ();
ScriptProcessorNode createScriptProcessor (optional unsigned long bufferSize = 0
, optional unsigned long numberOfInputChannels = 2
, optional unsigned long numberOfOutputChannels = 2
);
AnalyserNode createAnalyser ();
GainNode createGain ();
DelayNode createDelay (optional double maxDelayTime);
BiquadFilterNode createBiquadFilter ();
IIRFilterNode createIIRFilter (sequence<double> feedforward, sequence<double> feedback);
WaveShaperNode createWaveShaper ();
PannerNode createPanner ();
StereoPannerNode createStereoPanner ();
ConvolverNode createConvolver ();
ChannelSplitterNode createChannelSplitter (optional unsigned long numberOfOutputs = 6
);
ChannelMergerNode createChannelMerger (optional unsigned long numberOfInputs = 6
);
DynamicsCompressorNode createDynamicsCompressor ();
OscillatorNode createOscillator ();
PeriodicWave createPeriodicWave (Float32Array real, Float32Array imag, optional PeriodicWaveConstraints constraints);
};</pre>
<pre id="audio-node-idl">enum ChannelCountMode {
"max",
"clamped-max",
"explicit"
};
enum ChannelInterpretation {
"speakers",
"discrete"
};
interface AudioNode : EventTarget {
void connect(AudioNode destination, optional unsigned long output = 0, optional unsigned long input = 0);
void connect(AudioParam destination, optional unsigned long output = 0);
void disconnect(optional unsigned long output = 0);
readonly attribute BaseAudioContext context;
readonly attribute unsigned long numberOfInputs;
readonly attribute unsigned long numberOfOutputs;
// Channel up-mixing and down-mixing rules for all inputs.
attribute unsigned long channelCount;
attribute ChannelCountMode channelCountMode;
attribute ChannelInterpretation channelInterpretation;
};</pre>
<pre id="audio-param-idl">interface AudioParam {
attribute float value;
readonly attribute float defaultValue;
readonly attribute float minValue;
readonly attribute float maxValue;
// Parameter automation.
void setValueAtTime(float value, double startTime);
void linearRampToValueAtTime(float value, double endTime);
void exponentialRampToValueAtTime(float value, double endTime);
// Exponentially approach the target value with a rate having the given time constant.
void setTargetAtTime(float target, double startTime, double timeConstant);
// Sets an array of arbitrary parameter values starting at time for the given duration.
// The number of values will be scaled to fit into the desired duration.
void setValueCurveAtTime(Float32Array values, double startTime, double duration);
// Cancels all scheduled parameter changes with times greater than or equal to startTime.
void cancelScheduledValues(double startTime);
};</pre>
<pre id="delay-node-idl">dictionary DelayOptions : AudioNodeOptions {
double maxDelayTime = 1;
double delayTime = 0;
};
[Constructor(BaseAudioContext context, optional DelayOptions options)]
interface DelayNode : AudioNode {
readonly attribute AudioParam delayTime;
};</pre>
<div id="log"></div>
<script>
(function() {
var idl_array = new IdlArray();
idl_array.add_untested_idls(document.getElementById("event-target-idl").textContent);
idl_array.add_untested_idls(document.getElementById("base-audio-context-idl").textContent);
idl_array.add_untested_idls(document.getElementById("audio-node-idl").textContent);
idl_array.add_untested_idls(document.getElementById("audio-param-idl").textContent);
idl_array.add_idls(document.getElementById("delay-node-idl").textContent);
delay_node = (new AudioContext).createDelay();
idl_array.add_objects({DelayNode: ["delay_node"]});
idl_array.test();
})();
</script>
</body>
</html>

View file

@ -1,154 +0,0 @@
<!DOCTYPE html>
<html class="a">
<head>
<title>GainNode IDL Test</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/resources/idlharness.js"></script>
<script src="/resources/WebIDLParser.js"></script>
<script src="/webaudio/js/helpers.js"></script>
<style type="text/css">
#event-target-idl,
#base-audio-context-idl,
#audio-node-idl,
#audio-param-idl
{ visibility:hidden; height: 0px;}
</style>
</head>
<body class="a">
<pre id="event-target-idl">interface EventTarget {
void addEventListener(DOMString type, EventListener? callback, optional boolean capture = false);
void removeEventListener(DOMString type, EventListener? callback, optional boolean capture = false);
boolean dispatchEvent(Event event);
};
/*
callback interface EventListener {
void handleEvent(Event event);
};
*/
// Callback interfaces are not supported yet, but that's ok
interface EventListener {};
</pre>
<pre id="base-audio-context-idl">callback DecodeErrorCallback = void (DOMException error);
callback DecodeSuccessCallback = void (AudioBuffer decodedData);
interface BaseAudioContext : EventTarget {
readonly attribute AudioDestinationNode destination;
readonly attribute float sampleRate;
readonly attribute double currentTime;
readonly attribute AudioListener listener;
readonly attribute AudioContextState state;
readonly attribute double baseLatency;
Promise<void> resume ();
attribute EventHandler onstatechange;
AudioBuffer createBuffer (unsigned long numberOfChannels, unsigned long length, float sampleRate);
Promise<AudioBuffer> decodeAudioData (ArrayBuffer audioData, optional DecodeSuccessCallback successCallback, optional DecodeErrorCallback errorCallback);
AudioBufferSourceNode createBufferSource ();
ConstantSourceNode createConstantSource ();
ScriptProcessorNode createScriptProcessor (optional unsigned long bufferSize = 0
, optional unsigned long numberOfInputChannels = 2
, optional unsigned long numberOfOutputChannels = 2
);
AnalyserNode createAnalyser ();
GainNode createGain ();
DelayNode createDelay (optional double maxDelayTime);
BiquadFilterNode createBiquadFilter ();
IIRFilterNode createIIRFilter (sequence<double> feedforward, sequence<double> feedback);
WaveShaperNode createWaveShaper ();
PannerNode createPanner ();
StereoPannerNode createStereoPanner ();
ConvolverNode createConvolver ();
ChannelSplitterNode createChannelSplitter (optional unsigned long numberOfOutputs = 6
);
ChannelMergerNode createChannelMerger (optional unsigned long numberOfInputs = 6
);
DynamicsCompressorNode createDynamicsCompressor ();
OscillatorNode createOscillator ();
PeriodicWave createPeriodicWave (Float32Array real, Float32Array imag, optional PeriodicWaveConstraints constraints);
};</pre>
<pre id="audio-node-idl">enum ChannelCountMode {
"max",
"clamped-max",
"explicit"
};
enum ChannelInterpretation {
"speakers",
"discrete"
};
interface AudioNode : EventTarget {
void connect(AudioNode destination, optional unsigned long output = 0, optional unsigned long input = 0);
void connect(AudioParam destination, optional unsigned long output = 0);
void disconnect(optional unsigned long output = 0);
readonly attribute BaseAudioContext context;
readonly attribute unsigned long numberOfInputs;
readonly attribute unsigned long numberOfOutputs;
// Channel up-mixing and down-mixing rules for all inputs.
attribute unsigned long channelCount;
attribute ChannelCountMode channelCountMode;
attribute ChannelInterpretation channelInterpretation;
};</pre>
<pre id="audio-param-idl">interface AudioParam {
attribute float value;
readonly attribute float defaultValue;
readonly attribute float minValue;
readonly attribute float maxValue;
// Parameter automation.
void setValueAtTime(float value, double startTime);
void linearRampToValueAtTime(float value, double endTime);
void exponentialRampToValueAtTime(float value, double endTime);
// Exponentially approach the target value with a rate having the given time constant.
void setTargetAtTime(float target, double startTime, double timeConstant);
// Sets an array of arbitrary parameter values starting at time for the given duration.
// The number of values will be scaled to fit into the desired duration.
void setValueCurveAtTime(Float32Array values, double startTime, double duration);
// Cancels all scheduled parameter changes with times greater than or equal to startTime.
void cancelScheduledValues(double startTime);
};</pre>
<pre id="gain-node-idl">dictionary GainOptions : AudioNodeOptions {
float gain = 1.0;
};
[Constructor(BaseAudioContext context, optional GainOptions options)]
interface GainNode : AudioNode {
readonly attribute AudioParam gain;
};</pre>
<div id="log"></div>
<script>
(function() {
var idl_array = new IdlArray();
idl_array.add_untested_idls(document.getElementById("event-target-idl").textContent);
idl_array.add_untested_idls(document.getElementById("base-audio-context-idl").textContent);
idl_array.add_untested_idls(document.getElementById("audio-node-idl").textContent);
idl_array.add_untested_idls(document.getElementById("audio-param-idl").textContent);
idl_array.add_idls(document.getElementById("gain-node-idl").textContent);
gain_node = (new AudioContext).createGain();
idl_array.add_objects({GainNode: ["gain_node"]});
idl_array.test();
})();
</script>
</body>
</html>