Update web-platform-tests to revision 7a6f5673ff5d146ca5c09c6a1b42b7706cfee328

This commit is contained in:
WPT Sync Bot 2018-06-28 21:05:13 -04:00
parent e2fca1b228
commit 4787b28da3
261 changed files with 8195 additions and 4689 deletions

View file

@ -0,0 +1,102 @@
function createTestBuffer(context, sampleFrameLength) {
let audioBuffer =
context.createBuffer(1, sampleFrameLength, context.sampleRate);
let channelData = audioBuffer.getChannelData(0);
// Create a simple linear ramp starting at zero, with each value in the buffer
// equal to its index position.
for (let i = 0; i < sampleFrameLength; ++i)
channelData[i] = i;
return audioBuffer;
}
function checkSingleTest(renderedBuffer, i, should) {
let renderedData = renderedBuffer.getChannelData(0);
let offsetFrame = i * testSpacingFrames;
let test = tests[i];
let expected = test.expected;
let description;
if (test.description) {
description = test.description;
} else {
// No description given, so create a basic one from the given test
// parameters.
description =
'loop from ' + test.loopStartFrame + ' -> ' + test.loopEndFrame;
if (test.offsetFrame)
description += ' with offset ' + test.offsetFrame;
if (test.playbackRate && test.playbackRate != 1)
description += ' with playbackRate of ' + test.playbackRate;
}
let framesToTest;
if (test.renderFrames)
framesToTest = test.renderFrames;
else if (test.durationFrames)
framesToTest = test.durationFrames;
// Verify that the output matches
let prefix = 'Case ' + i + ': ';
should(
renderedData.slice(offsetFrame, offsetFrame + framesToTest),
prefix + description)
.beEqualToArray(expected);
// Verify that we get all zeroes after the buffer (or duration) has passed.
should(
renderedData.slice(
offsetFrame + framesToTest, offsetFrame + testSpacingFrames),
prefix + description + ': tail')
.beConstantValueOf(0);
}
function checkAllTests(renderedBuffer, should) {
for (let i = 0; i < tests.length; ++i)
checkSingleTest(renderedBuffer, i, should);
}
// Create the actual result by modulating playbackRate or detune AudioParam of
// ABSN. |modTarget| is a string of AudioParam name, |modOffset| is the offset
// (anchor) point of modulation, and |modRange| is the range of modulation.
//
// createSawtoothWithModulation(context, 'detune', 440, 1200);
//
// The above will perform a modulation on detune within the range of
// [1200, -1200] around the sawtooth waveform on 440Hz.
function createSawtoothWithModulation(context, modTarget, modOffset, modRange) {
let lfo = context.createOscillator();
let amp = context.createGain();
// Create a sawtooth generator with the signal range of [0, 1].
let phasor = context.createBufferSource();
let phasorBuffer = context.createBuffer(1, sampleRate, sampleRate);
let phasorArray = phasorBuffer.getChannelData(0);
let phase = 0, phaseStep = 1 / sampleRate;
for (let i = 0; i < phasorArray.length; i++) {
phasorArray[i] = phase % 1.0;
phase += phaseStep;
}
phasor.buffer = phasorBuffer;
phasor.loop = true;
// 1Hz for audible (human-perceivable) parameter modulation by LFO.
lfo.frequency.value = 1.0;
amp.gain.value = modRange;
phasor.playbackRate.value = modOffset;
// The oscillator output should be amplified accordingly to drive the
// modulation within the desired range.
lfo.connect(amp);
amp.connect(phasor[modTarget]);
phasor.connect(context.destination);
lfo.start();
phasor.start();
}

View file

@ -33,6 +33,12 @@ window.Audit = (function() {
'use strict';
// NOTE: Moving this method (or any other code above) will change the location
// of 'CONSOLE ERROR...' message in the expected text files.
function _logError(message) {
console.error('[audit.js] ' + message);
}
function _logPassed(message) {
test(function(arg) {
assert_true(true);
@ -70,13 +76,9 @@ window.Audit = (function() {
String(target.slice(0, options.numberOfArrayElements)) + '...';
targetString = '[' + arrayElements + ']';
} else if (target === null) {
// null is an object, so we need to handle this specially.
targetString = String(target);
} else {
// We're expecting String() to return something like "[object Foo]",
// so we split the string to get the object type "Foo". This is
// pretty fragile.
targetString = '' + String(targetString).split(/[\s\]]/)[1];
targetString = '' + String(target).split(/[\s\]]/)[1];
}
break;
default:
@ -350,7 +352,7 @@ window.Audit = (function() {
*
* @example
* should('My promise', promise).beResolve().then((result) => {
* // log(result);
* log(result);
* });
*
* @result
@ -1288,6 +1290,56 @@ window.Audit = (function() {
}
}
/**
* Load file from a given URL and pass ArrayBuffer to the following promise.
* @param {String} fileUrl file URL.
* @return {Promise}
*
* @example
* Audit.loadFileFromUrl('resources/my-sound.ogg').then((response) => {
* audioContext.decodeAudioData(response).then((audioBuffer) => {
* // Do something with AudioBuffer.
* });
* });
*/
function loadFileFromUrl(fileUrl) {
return new Promise((resolve, reject) => {
let xhr = new XMLHttpRequest();
xhr.open('GET', fileUrl, true);
xhr.responseType = 'arraybuffer';
xhr.onload = () => {
// |status = 0| is a workaround for the run_web_test.py server. We are
// speculating the server quits the transaction prematurely without
// completing the request.
if (xhr.status === 200 || xhr.status === 0) {
resolve(xhr.response);
} else {
let errorMessage = 'loadFile: Request failed when loading ' +
fileUrl + '. ' + xhr.statusText + '. (status = ' + xhr.status +
')';
if (reject) {
reject(errorMessage);
} else {
new Error(errorMessage);
}
}
};
xhr.onerror = (event) => {
let errorMessage =
'loadFile: Network failure when loading ' + fileUrl + '.';
if (reject) {
reject(errorMessage);
} else {
new Error(errorMessage);
}
};
xhr.send();
});
}
/**
* @class Audit
* @description A WebAudio layout test task manager.
@ -1313,12 +1365,18 @@ window.Audit = (function() {
if (options && options.requireResultFile == true) {
_logError(
'this test requires the explicit comparison with the ' +
'expected result when it runs with run-webkit-tests.');
'expected result when it runs with run_web_tests.py.');
}
return new TaskRunner();
},
/**
* Load file from a given URL and pass ArrayBuffer to the following promise.
* See |loadFileFromUrl| method for the detail.
*/
loadFileFromUrl: loadFileFromUrl
};
})();

View file

@ -0,0 +1,159 @@
let sampleRate = 44100.0;
// How many grains to play.
let numberOfTests = 100;
// Duration of each grain to be played
let duration = 0.01;
// Time step between the start of each grain. We need to add a little
// bit of silence so we can detect grain boundaries
let timeStep = duration + .005;
// Time step between the start for each grain.
let grainOffsetStep = 0.001;
// How long to render to cover all of the grains.
let renderTime = (numberOfTests + 1) * timeStep;
let context;
let renderedData;
// Create a buffer containing the data that we want. The function f
// returns the desired value at sample frame k.
function createSignalBuffer(context, f) {
// Make sure the buffer has enough data for all of the possible
// grain offsets and durations. The additional 1 is for any
// round-off errors.
let signalLength =
Math.floor(1 + sampleRate * (numberOfTests * grainOffsetStep + duration));
let buffer = context.createBuffer(2, signalLength, sampleRate);
let data = buffer.getChannelData(0);
for (let k = 0; k < signalLength; ++k) {
data[k] = f(k);
}
return buffer;
}
// From the data array, find the start and end sample frame for each
// grain. This depends on the data having 0's between grain, and
// that the grain is always strictly non-zero.
function findStartAndEndSamples(data) {
let nSamples = data.length;
let startTime = [];
let endTime = [];
let lookForStart = true;
// Look through the rendered data to find the start and stop
// times of each grain.
for (let k = 0; k < nSamples; ++k) {
if (lookForStart) {
// Find a non-zero point and record the start. We're not
// concerned with the value in this test, only that the
// grain started here.
if (renderedData[k]) {
startTime.push(k);
lookForStart = false;
}
} else {
// Find a zero and record the end of the grain.
if (!renderedData[k]) {
endTime.push(k);
lookForStart = true;
}
}
}
return {start: startTime, end: endTime};
}
function playGrain(context, source, time, offset, duration) {
let bufferSource = context.createBufferSource();
bufferSource.buffer = source;
bufferSource.connect(context.destination);
bufferSource.start(time, offset, duration);
}
// Play out all grains. Returns a object containing two arrays, one
// for the start time and one for the grain offset time.
function playAllGrains(context, source, numberOfNotes) {
let startTimes = new Array(numberOfNotes);
let offsets = new Array(numberOfNotes);
for (let k = 0; k < numberOfNotes; ++k) {
let timeOffset = k * timeStep;
let grainOffset = k * grainOffsetStep;
playGrain(context, source, timeOffset, grainOffset, duration);
startTimes[k] = timeOffset;
offsets[k] = grainOffset;
}
return {startTimes: startTimes, grainOffsetTimes: offsets};
}
// Verify that the start and end frames for each grain match our
// expected start and end frames.
function verifyStartAndEndFrames(startEndFrames, should) {
let startFrames = startEndFrames.start;
let endFrames = startEndFrames.end;
// Count of how many grains started at the incorrect time.
let errorCountStart = 0;
// Count of how many grains ended at the incorrect time.
let errorCountEnd = 0;
should(
startFrames.length == endFrames.length, 'Found all grain starts and ends')
.beTrue();
should(startFrames.length, 'Number of start frames').beEqualTo(numberOfTests);
should(endFrames.length, 'Number of end frames').beEqualTo(numberOfTests);
// Examine the start and stop times to see if they match our
// expectations.
for (let k = 0; k < startFrames.length; ++k) {
let expectedStart = timeToSampleFrame(k * timeStep, sampleRate);
// The end point is the duration.
let expectedEnd = expectedStart +
grainLengthInSampleFrames(k * grainOffsetStep, duration, sampleRate);
if (startFrames[k] != expectedStart)
++errorCountStart;
if (endFrames[k] != expectedEnd)
++errorCountEnd;
should([startFrames[k], endFrames[k]], 'Pulse ' + k + ' boundary')
.beEqualToArray([expectedStart, expectedEnd]);
}
// Check that all the grains started or ended at the correct time.
if (!errorCountStart) {
should(
startFrames.length, 'Number of grains that started at the correct time')
.beEqualTo(numberOfTests);
} else {
should(
errorCountStart,
'Number of grains out of ' + numberOfTests +
'that started at the wrong time')
.beEqualTo(0);
}
if (!errorCountEnd) {
should(endFrames.length, 'Number of grains that ended at the correct time')
.beEqualTo(numberOfTests);
} else {
should(
errorCountEnd,
'Number of grains out of ' + numberOfTests +
' that ended at the wrong time')
.beEqualTo(0);
}
}

View file

@ -0,0 +1,37 @@
<!doctype html>
<html>
<head>
<title>
Basic Test of AudioBufferSourceNode
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
<script src="/webaudio/resources/start-stop-exceptions.js"></script>
</head>
<script id="layout-test-code">
let sampleRate = 44100;
let renderLengthSeconds = 0.25;
let oscTypes = ['sine', 'square', 'sawtooth', 'triangle', 'custom'];
let audit = Audit.createTaskRunner();
audit.define('start/stop exceptions', (task, should) => {
// We're not going to render anything, so make it simple
let context = new OfflineAudioContext(1, 1, sampleRate);
let node = new AudioBufferSourceNode(context);
testStartStop(should, node, [
{args: [0, -1], errorType: 'RangeError'},
{args: [0, 0, -1], errorType: 'RangeError'}
]);
task.done();
});
audit.run();
</script>
<body>
</body>
</html>

View file

@ -0,0 +1,97 @@
<!DOCTYPE html>
<html>
<head>
<title>
audiobuffersource-channels.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
let context;
let source;
audit.define(
{
label: 'validate .buffer',
description:
'Validatation of AudioBuffer in .buffer attribute setter'
},
function(task, should) {
context = new AudioContext();
source = context.createBufferSource();
// Make sure we can't set to something which isn't an AudioBuffer.
should(function() {
source.buffer = 57;
}, 'source.buffer = 57').throw('TypeError');
// It's ok to set the buffer to null.
should(function() {
source.buffer = null;
}, 'source.buffer = null').notThrow();
// Set the buffer to a valid AudioBuffer
let buffer =
new AudioBuffer({length: 128, sampleRate: context.sampleRate});
should(function() {
source.buffer = buffer;
}, 'source.buffer = buffer').notThrow();
// The buffer has been set; we can't set it again.
should(function() {
source.buffer =
new AudioBuffer({length: 128, sampleRate: context.sampleRate})
}, 'source.buffer = new buffer').throw('InvalidStateError');
// The buffer has been set; it's ok to set it to null.
should(function() {
source.buffer = null;
}, 'source.buffer = null again').notThrow();
// The buffer was already set (and set to null). Can't set it
// again.
should(function() {
source.buffer = buffer;
}, 'source.buffer = buffer again').throw('InvalidStateError');
// But setting to null is ok.
should(function() {
}, 'source.buffer = null after setting to null').notThrow();
// Check that mono buffer can be set.
should(function() {
let monoBuffer =
context.createBuffer(1, 1024, context.sampleRate);
let testSource = context.createBufferSource();
testSource.buffer = monoBuffer;
}, 'Setting source with mono buffer').notThrow();
// Check that stereo buffer can be set.
should(function() {
let stereoBuffer =
context.createBuffer(2, 1024, context.sampleRate);
let testSource = context.createBufferSource();
testSource.buffer = stereoBuffer;
}, 'Setting source with stereo buffer').notThrow();
// Check buffers with more than two channels.
for (let i = 3; i < 10; ++i) {
should(function() {
let buffer = context.createBuffer(i, 1024, context.sampleRate);
let testSource = context.createBufferSource();
testSource.buffer = buffer;
}, 'Setting source with ' + i + ' channels buffer').notThrow();
}
task.done();
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,40 @@
<!DOCTYPE html>
<html>
<head>
<title>
audiobuffersource-ended.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
<script src="/webaudio/resources/audiobuffersource-testing.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
let context;
let source;
audit.define(
'AudioBufferSourceNode calls its onended EventListener',
function(task, should) {
let sampleRate = 44100.0;
let numberOfFrames = 32;
context = new OfflineAudioContext(1, numberOfFrames, sampleRate);
source = context.createBufferSource();
source.buffer = createTestBuffer(context, numberOfFrames);
source.connect(context.destination);
source.onended = function() {
should(true, 'source.onended called').beTrue();
task.done();
};
source.start(0);
context.startRendering();
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,71 @@
<!DOCTYPE html>
<html>
<head>
<title>
Test Start Grain with Delayed Buffer Setting
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
let context;
let source;
let buffer;
let renderedData;
let sampleRate = 44100;
let testDurationSec = 1;
let testDurationSamples = testDurationSec * sampleRate;
let startTime = 0.9 * testDurationSec;
audit.define(
'Test setting the source buffer after starting the grain',
function(task, should) {
context =
new OfflineAudioContext(1, testDurationSamples, sampleRate);
buffer = createConstantBuffer(context, testDurationSamples, 1);
source = context.createBufferSource();
source.connect(context.destination);
// Start the source BEFORE we set the buffer. The grain offset and
// duration aren't important, as long as we specify some offset.
source.start(startTime, .1);
source.buffer = buffer;
// Render it!
context.startRendering()
.then(function(buffer) {
checkResult(buffer, should);
})
.then(task.done.bind(task));
;
});
function checkResult(buffer, should) {
let success = false;
renderedData = buffer.getChannelData(0);
// Check that the rendered data is not all zeroes. Any non-zero data
// means the test passed.
let startFrame = Math.round(startTime * sampleRate);
for (k = 0; k < renderedData.length; ++k) {
if (renderedData[k]) {
success = true;
break;
}
}
should(success, 'Buffer was played').beTrue();
}
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,78 @@
<!DOCTYPE html>
<!--
Test AudioBufferSourceNode supports 5.1 channel.
-->
<html>
<head>
<title>
audiobuffersource-multi-channels.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
<script src="/webaudio/resources/mix-testing.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
let context;
let expectedAudio;
audit.define('initialize', (task, should) => {
// Create offline audio context
let sampleRate = 44100.0;
should(() => {
context = new OfflineAudioContext(
6, sampleRate * toneLengthSeconds, sampleRate);
}, 'Creating context for testing').notThrow();
should(
Audit
.loadFileFromUrl(
'audiobuffersource-multi-channels-expected.wav')
.then(arrayBuffer => {
context.decodeAudioData(arrayBuffer).then(audioBuffer => {
expectedAudio = audioBuffer;
task.done();
});
}),
'Fetching expected audio')
.beResolved();
});
audit.define(
{label: 'test', description: 'AudioBufferSource with 5.1 buffer'},
(task, should) => {
let toneBuffer =
createToneBuffer(context, 440, toneLengthSeconds, 6);
let source = context.createBufferSource();
source.buffer = toneBuffer;
source.connect(context.destination);
source.start(0);
context.startRendering()
.then(renderedAudio => {
// Compute a threshold based on the maximum error, |maxUlp|,
// in ULP. This is experimentally determined. Assuming that
// the reference file is a 16-bit wav file, the max values in
// the wave file are +/- 32768.
let maxUlp = 1;
let threshold = maxUlp / 32768;
for (let k = 0; k < renderedAudio.numberOfChannels; ++k) {
should(
renderedAudio.getChannelData(k),
'Rendered audio for channel ' + k)
.beCloseToArray(
expectedAudio.getChannelData(k),
{absoluteThreshold: threshold});
}
})
.then(() => task.done());
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,47 @@
<!DOCTYPE html>
<html>
<head>
<title>
Test AudioBufferSourceNode With Looping a Single-Sample Buffer
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
let sampleRate = 44100;
let testDurationSamples = 1000;
audit.define('one-sample-loop', function(task, should) {
// Create the offline context for the test.
let context =
new OfflineAudioContext(1, testDurationSamples, sampleRate);
// Create the single sample buffer
let buffer = createConstantBuffer(context, 1, 1);
// Create the source and connect it to the destination
let source = context.createBufferSource();
source.buffer = buffer;
source.loop = true;
source.connect(context.destination);
source.start();
// Render it!
context.startRendering()
.then(function(audioBuffer) {
should(audioBuffer.getChannelData(0), 'Rendered data')
.beConstantValueOf(1);
})
.then(task.done.bind(task));
;
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,80 @@
<!DOCTYPE html>
<html>
<head>
<title>
audiobuffersource-playbackrate-zero.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
</head>
<body>
<script id="layout-test-code">
// Sample rate should be power of 128 to observe the change of AudioParam
// at the beginning of rendering quantum. (playbackRate is k-rate) This is
// the minimum sample rate in the valid sample rate range.
let sampleRate = 8192;
// The render duration in seconds, and the length in samples.
let renderDuration = 1.0;
let renderLength = renderDuration * sampleRate;
let context = new OfflineAudioContext(1, renderLength, sampleRate);
let audit = Audit.createTaskRunner();
// Task: Render the actual buffer and compare with the reference.
audit.define('synthesize-verify', (task, should) => {
let ramp = context.createBufferSource();
let rampBuffer = createLinearRampBuffer(context, renderLength);
ramp.buffer = rampBuffer;
ramp.connect(context.destination);
ramp.start();
// Leave the playbackRate as 1 for the first half, then change it
// to zero at the exact half. The zero playback rate should hold the
// sample value of the buffer index at the moment. (sample-and-hold)
ramp.playbackRate.setValueAtTime(1.0, 0.0);
ramp.playbackRate.setValueAtTime(0.0, renderDuration / 2);
context.startRendering()
.then(function(renderedBuffer) {
let data = renderedBuffer.getChannelData(0);
let rampData = rampBuffer.getChannelData(0);
let half = rampData.length / 2;
let passed = true;
let i;
for (i = 1; i < rampData.length; i++) {
if (i < half) {
// Before the half position, the actual should match with the
// original ramp data.
if (data[i] !== rampData[i]) {
passed = false;
break;
}
} else {
// From the half position, the actual value should not change.
if (data[i] !== rampData[half]) {
passed = false;
break;
}
}
}
should(passed, 'The zero playbackRate')
.message(
'held the sample value correctly',
'should hold the sample value. ' +
'Expected ' + rampData[half] + ' but got ' + data[i] +
' at the index ' + i);
})
.then(() => task.done());
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,174 @@
<!DOCTYPE html>
<html>
<head>
<title>
audiobuffersource-start.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
<script src="/webaudio/resources/audiobuffersource-testing.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
// The following test cases assume an AudioBuffer of length 8 whose PCM
// data is a linear ramp, 0, 1, 2, 3,...
let tests = [
{
description:
'start(when): implicitly play whole buffer from beginning to end',
offsetFrame: 'none',
durationFrames: 'none',
renderFrames: 16,
playbackRate: 1,
expected: [0, 1, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0]
},
{
description:
'start(when, 0): play whole buffer from beginning to end explicitly giving offset of 0',
offsetFrame: 0,
durationFrames: 'none',
renderFrames: 16,
playbackRate: 1,
expected: [0, 1, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0]
},
{
description:
'start(when, 0, 8_frames): play whole buffer from beginning to end explicitly giving offset of 0 and duration of 8 frames',
offsetFrame: 0,
durationFrames: 8,
renderFrames: 16,
playbackRate: 1,
expected: [0, 1, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0]
},
{
description:
'start(when, 4_frames): play with explicit non-zero offset',
offsetFrame: 4,
durationFrames: 'none',
renderFrames: 16,
playbackRate: 1,
expected: [4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
},
{
description:
'start(when, 4_frames, 4_frames): play with explicit non-zero offset and duration',
offsetFrame: 4,
durationFrames: 4,
renderFrames: 16,
playbackRate: 1,
expected: [4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
},
{
description:
'start(when, 7_frames): play with explicit non-zero offset near end of buffer',
offsetFrame: 7,
durationFrames: 1,
renderFrames: 16,
playbackRate: 1,
expected: [7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
},
{
description:
'start(when, 8_frames): play with explicit offset at end of buffer',
offsetFrame: 8,
durationFrames: 0,
renderFrames: 16,
playbackRate: 1,
expected: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
},
{
description:
'start(when, 9_frames): play with explicit offset past end of buffer',
offsetFrame: 8,
durationFrames: 0,
renderFrames: 16,
playbackRate: 1,
expected: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
},
// When the duration exceeds the buffer, just play to the end of the
// buffer. (This is different from the case when we're looping, which is
// tested in loop-comprehensive.)
{
description:
'start(when, 0, 15_frames): play with whole buffer, with long duration (clipped)',
offsetFrame: 0,
durationFrames: 15,
renderFrames: 16,
playbackRate: 1,
expected: [0, 1, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0]
},
// Enable test when AudioBufferSourceNode hack is fixed:
// https://bugs.webkit.org/show_bug.cgi?id=77224 { description:
// "start(when, 3_frames, 3_frames): play a middle section with explicit
// offset and duration",
// offsetFrame: 3, durationFrames: 3, renderFrames: 16, playbackRate:
// 1, expected: [4,5,6,7,0,0,0,0,0,0,0,0,0,0,0,0] },
];
let sampleRate = 44100;
let buffer;
let bufferFrameLength = 8;
let testSpacingFrames = 32;
let testSpacingSeconds = testSpacingFrames / sampleRate;
let totalRenderLengthFrames = tests.length * testSpacingFrames;
function runLoopTest(context, testNumber, test) {
let source = context.createBufferSource();
source.buffer = buffer;
source.playbackRate.value = test.playbackRate;
source.connect(context.destination);
// Render each test one after the other, spaced apart by
// testSpacingSeconds.
let startTime = testNumber * testSpacingSeconds;
if (test.offsetFrame == 'none' && test.durationFrames == 'none') {
source.start(startTime);
} else if (test.durationFrames == 'none') {
let offset = test.offsetFrame / context.sampleRate;
source.start(startTime, offset);
} else {
let offset = test.offsetFrame / context.sampleRate;
let duration = test.durationFrames / context.sampleRate;
source.start(startTime, offset, duration);
}
}
audit.define(
'Tests AudioBufferSourceNode start()', function(task, should) {
// Create offline audio context.
let context =
new OfflineAudioContext(1, totalRenderLengthFrames, sampleRate);
buffer = createTestBuffer(context, bufferFrameLength);
for (let i = 0; i < tests.length; ++i)
runLoopTest(context, i, tests[i]);
context.startRendering().then(function(audioBuffer) {
checkAllTests(audioBuffer, should);
task.done();
});
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,101 @@
<!DOCTYPE html>
<html>
<head>
<title>
Test Onended Event Listener
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
</head>
<body>
<script id="layout-test-code">
let sampleRate = 44100;
let renderLengthSeconds = 1;
let renderLengthFrames = renderLengthSeconds * sampleRate;
// Length of the source buffer. Anything less than the render length is
// fine.
let sourceBufferLengthFrames = renderLengthFrames / 8;
// When to stop the oscillator. Anything less than the render time is
// fine.
let stopTime = renderLengthSeconds / 8;
let audit = Audit.createTaskRunner();
audit.define('absn-set-onended', (task, should) => {
// Test that the onended event for an AudioBufferSourceNode is fired
// when it is set directly.
let context =
new OfflineAudioContext(1, renderLengthFrames, sampleRate);
let buffer = context.createBuffer(
1, sourceBufferLengthFrames, context.sampleRate);
let source = context.createBufferSource();
source.buffer = buffer;
source.connect(context.destination);
source.onended = function(e) {
should(
true, 'AudioBufferSource.onended called when ended set directly')
.beEqualTo(true);
};
source.start();
context.startRendering().then(() => task.done());
});
audit.define('absn-add-listener', (task, should) => {
// Test that the onended event for an AudioBufferSourceNode is fired
// when addEventListener is used to set the handler.
let context =
new OfflineAudioContext(1, renderLengthFrames, sampleRate);
let buffer = context.createBuffer(
1, sourceBufferLengthFrames, context.sampleRate);
let source = context.createBufferSource();
source.buffer = buffer;
source.connect(context.destination);
source.addEventListener('ended', function(e) {
should(
true,
'AudioBufferSource.onended called when using addEventListener')
.beEqualTo(true);
});
source.start();
context.startRendering().then(() => task.done());
});
audit.define('osc-set-onended', (task, should) => {
// Test that the onended event for an OscillatorNode is fired when it is
// set directly.
let context =
new OfflineAudioContext(1, renderLengthFrames, sampleRate);
let source = context.createOscillator();
source.connect(context.destination);
source.onended = function(e) {
should(true, 'Oscillator.onended called when ended set directly')
.beEqualTo(true);
};
source.start();
source.stop(stopTime);
context.startRendering().then(() => task.done());
});
audit.define('osc-add-listener', (task, should) => {
// Test that the onended event for an OscillatorNode is fired when
// addEventListener is used to set the handler.
let context =
new OfflineAudioContext(1, renderLengthFrames, sampleRate);
let source = context.createOscillator();
source.connect(context.destination);
source.addEventListener('ended', function(e) {
should(true, 'Oscillator.onended called when using addEventListener')
.beEqualTo(true);
});
source.start();
source.stop(stopTime);
context.startRendering().then(() => task.done());
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,74 @@
<!DOCTYPE html>
<html>
<head>
<title>
Test Scheduled Sources with Huge Time Limits
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
<script src="/webaudio/resources/audioparam-testing.js"></script>
</head>
<body>
<script id="layout-test-code">
let sampleRate = 48000;
let renderFrames = 1000;
let audit = Audit.createTaskRunner();
audit.define('buffersource: huge stop time', (task, should) => {
// We only need to generate a small number of frames for this test.
let context = new OfflineAudioContext(1, renderFrames, sampleRate);
let src = context.createBufferSource();
// Constant source of amplitude 1, looping.
src.buffer = createConstantBuffer(context, 1, 1);
src.loop = true;
// Create the graph and go!
let endTime = 1e300;
src.connect(context.destination);
src.start();
src.stop(endTime);
context.startRendering()
.then(function(resultBuffer) {
let result = resultBuffer.getChannelData(0);
should(
result, 'Output from AudioBufferSource.stop(' + endTime + ')')
.beConstantValueOf(1);
})
.then(() => task.done());
});
audit.define('oscillator: huge stop time', (task, should) => {
// We only need to generate a small number of frames for this test.
let context = new OfflineAudioContext(1, renderFrames, sampleRate);
let src = context.createOscillator();
// Create the graph and go!
let endTime = 1e300;
src.connect(context.destination);
src.start();
src.stop(endTime);
context.startRendering()
.then(function(resultBuffer) {
let result = resultBuffer.getChannelData(0);
// The buffer should not be empty. Just find the max and verify
// that it's not zero.
let max = Math.max.apply(null, result);
should(
max, 'Peak amplitude from oscillator.stop(' + endTime + ')')
.beGreaterThan(0);
})
.then(() => task.done());
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,121 @@
<!DOCTYPE html>
<html>
<head>
<title>
note-grain-on-play.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
<script src="/webaudio/resources/note-grain-on-testing.js"></script>
</head>
<body>
<div id="description"></div>
<div id="console"></div>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
// To test noteGrainOn, a single ramp signal is created.
// Various sections of the ramp are rendered by noteGrainOn() at
// different times, and we verify that the actual output
// consists of the correct section of the ramp at the correct
// time.
let linearRampBuffer;
// Array of the grain offset used for each ramp played.
let grainOffsetTime = [];
// Verify the received signal is a ramp from the correct section
// of our ramp signal.
function verifyGrain(renderedData, startFrame, endFrame, grainIndex) {
let grainOffsetFrame =
timeToSampleFrame(grainOffsetTime[grainIndex], sampleRate);
let grainFrameLength = endFrame - startFrame;
let ramp = linearRampBuffer.getChannelData(0);
let isCorrect = true;
let expected;
let actual;
let frame;
for (let k = 0; k < grainFrameLength; ++k) {
if (renderedData[startFrame + k] != ramp[grainOffsetFrame + k]) {
expected = ramp[grainOffsetFrame + k];
actual = renderedData[startFrame + k];
frame = startFrame + k;
isCorrect = false;
break;
}
}
return {
verified: isCorrect,
expected: expected,
actual: actual,
frame: frame
};
}
function checkResult(buffer, should) {
renderedData = buffer.getChannelData(0);
let nSamples = renderedData.length;
// Number of grains that we found that have incorrect data.
let invalidGrainDataCount = 0;
let startEndFrames = findStartAndEndSamples(renderedData);
// Verify the start and stop times. Not strictly needed for
// this test, but it's useful to know that if the ramp data
// appears to be incorrect.
verifyStartAndEndFrames(startEndFrames, should);
// Loop through each of the rendered grains and check that
// each grain contains our expected ramp.
for (let k = 0; k < startEndFrames.start.length; ++k) {
// Verify that the rendered data matches the expected
// section of our ramp signal.
let result = verifyGrain(
renderedData, startEndFrames.start[k], startEndFrames.end[k], k);
should(result.verified, 'Pulse ' + k + ' contained the expected data')
.beTrue();
}
should(
invalidGrainDataCount,
'Number of grains that did not contain the expected data')
.beEqualTo(0);
}
audit.define(
{
label: 'note-grain-on-play',
description: 'Test noteGrainOn offset rendering'
},
function(task, should) {
// Create offline audio context.
context =
new OfflineAudioContext(2, sampleRate * renderTime, sampleRate);
// Create a linear ramp for testing noteGrainOn.
linearRampBuffer = createSignalBuffer(context, function(k) {
// Want the ramp to start
// with 1, not 0.
return k + 1;
});
let grainInfo =
playAllGrains(context, linearRampBuffer, numberOfTests);
grainOffsetTime = grainInfo.grainOffsetTimes;
context.startRendering().then(function(audioBuffer) {
checkResult(audioBuffer, should);
task.done();
});
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,47 @@
<!DOCTYPE html>
<html>
<head>
<title>
note-grain-on-timing.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
<script src="/webaudio/resources/note-grain-on-testing.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
let squarePulseBuffer;
function checkResult(buffer, should) {
renderedData = buffer.getChannelData(0);
let nSamples = renderedData.length;
let startEndFrames = findStartAndEndSamples(renderedData);
verifyStartAndEndFrames(startEndFrames, should);
}
audit.define('Test timing of noteGrainOn', function(task, should) {
// Create offline audio context.
context =
new OfflineAudioContext(2, sampleRate * renderTime, sampleRate);
squarePulseBuffer = createSignalBuffer(context, function(k) {
return 1
});
playAllGrains(context, squarePulseBuffer, numberOfTests);
context.startRendering().then(function(audioBuffer) {
checkResult(audioBuffer, should);
task.done();
});
});
audit.run();
</script>
</body>
</html>

View file

@ -0,0 +1,110 @@
<!DOCTYPE html>
<!--
Tests that we are able to schedule a series of notes to playback with sample-accuracy.
We use an impulse so we can tell exactly where the rendering is happening.
-->
<html>
<head>
<title>
sample-accurate-scheduling.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
<script src="/webaudio/resources/buffer-loader.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
let sampleRate = 44100.0;
let lengthInSeconds = 4;
let context = 0;
let bufferLoader = 0;
let impulse;
// See if we can render at exactly these sample offsets.
let sampleOffsets = [0, 3, 512, 517, 1000, 1005, 20000, 21234, 37590];
function createImpulse() {
// An impulse has a value of 1 at time 0, and is otherwise 0.
impulse = context.createBuffer(2, 512, sampleRate);
let sampleDataL = impulse.getChannelData(0);
let sampleDataR = impulse.getChannelData(1);
sampleDataL[0] = 1.0;
sampleDataR[0] = 1.0;
}
function playNote(time) {
let bufferSource = context.createBufferSource();
bufferSource.buffer = impulse;
bufferSource.connect(context.destination);
bufferSource.start(time);
}
function checkSampleAccuracy(buffer, should) {
let bufferDataL = buffer.getChannelData(0);
let bufferDataR = buffer.getChannelData(1);
let impulseCount = 0;
let badOffsetCount = 0;
// Left and right channels must be the same.
should(bufferDataL, 'Content of left and right channels match and')
.beEqualToArray(bufferDataR);
// Go through every sample and make sure it's 0, except at positions in
// sampleOffsets.
for (let i = 0; i < buffer.length; ++i) {
if (bufferDataL[i] != 0) {
// Make sure this index is in sampleOffsets
let found = false;
for (let j = 0; j < sampleOffsets.length; ++j) {
if (sampleOffsets[j] == i) {
found = true;
break;
}
}
++impulseCount;
should(found, 'Non-zero sample found at sample offset ' + i)
.beTrue();
if (!found) {
++badOffsetCount;
}
}
}
should(impulseCount, 'Number of impulses found')
.beEqualTo(sampleOffsets.length);
if (impulseCount == sampleOffsets.length) {
should(badOffsetCount, 'bad offset').beEqualTo(0);
}
}
audit.define(
{label: 'test', description: 'Test sample-accurate scheduling'},
function(task, should) {
// Create offline audio context.
context = new OfflineAudioContext(
2, sampleRate * lengthInSeconds, sampleRate);
createImpulse();
for (let i = 0; i < sampleOffsets.length; ++i) {
let timeInSeconds = sampleOffsets[i] / sampleRate;
playNote(timeInSeconds);
}
context.startRendering().then(function(buffer) {
checkSampleAccuracy(buffer, should);
task.done();
});
});
audit.run();
</script>
</body>
</html>

View file

@ -19,9 +19,10 @@
should(timestamp.contextTime, 'timestamp.contextTime').exist();
should(timestamp.performanceTime, 'timestamp.performanceTime').exist();
should(timestamp.contextTime, 'timestamp.contextTime').beEqualTo(0);
should(timestamp.contextTime, 'timestamp.contextTime')
.beGreaterThanOrEqualTo(0);
should(timestamp.performanceTime, 'timestamp.performanceTime')
.beEqualTo(0);
.beGreaterThanOrEqualTo(0);
task.done();
});

View file

@ -0,0 +1,51 @@
<!DOCTYPE html>
<html>
<head>
<title>
convolver-setBuffer-already-has-value.html
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
audit.define('test', (task, should) => {
let context = new AudioContext();
let audioBuffer = new AudioBuffer(
{numberOfChannels: 1, length: 1, sampleRate: context.sampleRate});
let convolver = context.createConvolver();
should(() => {
convolver.buffer = null;
}, 'Set buffer to null before set non-null').notThrow();
should(() => {
convolver.buffer = audioBuffer;
}, 'Set buffer first normally').notThrow();
should(() => {
convolver.buffer = audioBuffer;
}, 'Set buffer a second time').throw('InvalidStateError');
should(() => {
convolver.buffer = null;
}, 'Set buffer to null').notThrow();
should(() => {
convolver.buffer = null;
}, 'Set buffer to null again, to make sure').notThrow();
should(() => {
convolver.buffer = audioBuffer;
}, 'Set buffer to non-null to verify to throw an error')
.throw('InvalidStateError');
task.done();
});
audit.run();
</script>
</body>
</html>