Update web-platform-tests to revision 00fa50687cab43b660296389acad6cc48717f1d1

This commit is contained in:
WPT Sync Bot 2019-01-28 20:36:36 -05:00
parent 07d53e32c4
commit 28bbe1473c
58 changed files with 2119 additions and 360 deletions

View file

@ -5,15 +5,16 @@ let signal;
let renderedBuffer;
let renderedData;
let sampleRate = 44100.0;
// Use a power of two to eliminate round-off in converting frame to time
let sampleRate = 32768;
let pulseLengthFrames = .1 * sampleRate;
// Maximum allowed error for the test to succeed. Experimentally determined.
let maxAllowedError = 5.9e-8;
// This must be large enough so that the filtered result is
// essentially zero. See comments for createTestAndRun.
let timeStep = .1;
// This must be large enough so that the filtered result is essentially zero.
// See comments for createTestAndRun. This must be a whole number of frames.
let timeStep = Math.ceil(.1 * sampleRate) / sampleRate;
// Maximum number of filters we can process (mostly for setting the
// render length correctly.)

View file

@ -1,10 +1,13 @@
let sampleRate = 44100.0;
// Use a power of two to eliminate round-off when converting frames to time and
// vice versa.
let sampleRate = 32768;
// How many panner nodes to create for the test.
let nodesToCreate = 100;
// Time step when each panner node starts.
let timeStep = 0.001;
// Time step when each panner node starts. Make sure it starts on a frame
// boundary.
let timeStep = Math.floor(0.001 * sampleRate) / sampleRate;
// Make sure we render long enough to get all of our nodes.
let renderLengthSeconds = timeStep * (nodesToCreate + 1);
@ -134,7 +137,7 @@ function checkDistanceResult(renderedBuffer, model, should) {
// The max allowed error between the actual gain and the expected
// value. This is determined experimentally. Set to 0 to see
// what the actual errors are.
let maxAllowedError = 3.3e-6;
let maxAllowedError = 2.2720e-6;
let success = true;

View file

@ -1,17 +1,23 @@
let sampleRate = 44100.0;
// Use a power of two to eliminate round-off converting from frames to time.
let sampleRate = 32768;
// How many grains to play.
let numberOfTests = 100;
// Duration of each grain to be played
let duration = 0.01;
// Duration of each grain to be played. Make a whole number of frames
let duration = Math.floor(0.01 * sampleRate) / sampleRate;
// A little extra bit of silence between grain boundaries. Must be a whole
// number of frames.
let grainGap = Math.floor(0.005 * sampleRate) / sampleRate;
// Time step between the start of each grain. We need to add a little
// bit of silence so we can detect grain boundaries
let timeStep = duration + .005;
let timeStep = duration + grainGap;
// Time step between the start for each grain.
let grainOffsetStep = 0.001;
// Time step between the start for each grain. Must be a whole number of
// frames.
let grainOffsetStep = Math.floor(0.001 * sampleRate) / sampleRate;
// How long to render to cover all of the grains.
let renderTime = (numberOfTests + 1) * timeStep;

View file

@ -1,9 +1,12 @@
let sampleRate = 44100.0;
// Use a power of two to eliminate round-off when converting frames to time and
// vice versa.
let sampleRate = 32768;
let numberOfChannels = 1;
// Time step when each panner node starts.
let timeStep = 0.001;
// Time step when each panner node starts. Make sure it starts on a frame
// boundary.
let timeStep = Math.floor(0.001 * sampleRate) / sampleRate;
// Length of the impulse signal.
let pulseLengthFrames = Math.round(timeStep * sampleRate);
@ -114,7 +117,7 @@ function checkResult(renderedBuffer, should) {
// The max error we allow between the rendered impulse and the
// expected value. This value is experimentally determined. Set
// to 0 to make the test fail to see what the actual error is.
let maxAllowedError = 1.3e-6;
let maxAllowedError = 1.1597e-6;
let success = true;

View file

@ -3,10 +3,12 @@ let StereoPannerTest = (function() {
// Constants
let PI_OVER_TWO = Math.PI * 0.5;
let gSampleRate = 44100;
// Use a power of two to eliminate any round-off when converting frames to
// time.
let gSampleRate = 32768;
// Time step when each panner node starts.
let gTimeStep = 0.001;
// Time step when each panner node starts. Make sure this is on a frame boundary.
let gTimeStep = Math.floor(0.001 * gSampleRate) / gSampleRate;
// How many panner nodes to create for the test
let gNodesToCreate = 100;
@ -77,7 +79,7 @@ let StereoPannerTest = (function() {
// The max error we allow between the rendered impulse and the
// expected value. This value is experimentally determined. Set
// to 0 to make the test fail to see what the actual error is.
this.maxAllowedError = 1.3e-6;
this.maxAllowedError = 9.8015e-8;
// Max (absolute) error and the index of the maxima for the left
// and right channels.

View file

@ -74,6 +74,42 @@
.then(() => task.done());
});
audit.define('subsample start with playback rate 0', (task, should) => {
let context = new OfflineAudioContext(1, renderLength, sampleRate);
let rampBuffer = new AudioBuffer(
{length: renderLength, sampleRate: context.sampleRate});
let data = new Float32Array(renderLength);
let startValue = 5;
for (let k = 0; k < data.length; ++k) {
data[k] = k + startValue;
}
rampBuffer.copyToChannel(data, 0);
let src = new AudioBufferSourceNode(
context, {buffer: rampBuffer, playbackRate: 0});
src.connect(context.destination);
// Purposely start the source between frame boundaries
let startFrame = 27.3;
src.start(startFrame / context.sampleRate);
context.startRendering()
.then(audioBuffer => {
let actualStartFrame = Math.ceil(startFrame);
let audio = audioBuffer.getChannelData(0);
should(
audio.slice(0, actualStartFrame),
`output[0:${actualStartFrame - 1}]`)
.beConstantValueOf(0);
should(
audio.slice(actualStartFrame), `output[${actualStartFrame}:]`)
.beConstantValueOf(startValue);
})
.then(() => task.done());
});
audit.run();
</script>
</body>

View file

@ -0,0 +1,423 @@
<!doctype html>
<html>
<head>
<title>
Test Sub-Sample Accurate Scheduling for ABSN
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
</head>
<body>
<script>
// Power of two so there's no roundoff converting from integer frames to
// time.
let sampleRate = 32768;
let audit = Audit.createTaskRunner();
audit.define('sub-sample accurate start', (task, should) => {
// There are two channels, one for each source. Only need to render
// quanta for this test.
let context = new OfflineAudioContext(
{numberOfChannels: 2, length: 8192, sampleRate: sampleRate});
let merger = new ChannelMergerNode(
context, {numberOfInputs: context.destination.channelCount});
merger.connect(context.destination);
// Use a simple linear ramp for the sources with integer steps starting
// at 1 to make it easy to verify and test that have sub-sample accurate
// start. Ramp starts at 1 so we can easily tell when the source
// starts.
let rampBuffer = new AudioBuffer(
{length: context.length, sampleRate: context.sampleRate});
let r = rampBuffer.getChannelData(0);
for (let k = 0; k < r.length; ++k) {
r[k] = k + 1;
}
const src0 = new AudioBufferSourceNode(context, {buffer: rampBuffer});
const src1 = new AudioBufferSourceNode(context, {buffer: rampBuffer});
// Frame where sources should start. This is pretty arbitrary, but one
// should be close to an integer and the other should be close to the
// next integer. We do this to catch the case where rounding of the
// start frame is being done. Rounding is incorrect.
const startFrame = 33;
const startFrame0 = startFrame + 0.1;
const startFrame1 = startFrame + 0.9;
src0.connect(merger, 0, 0);
src1.connect(merger, 0, 1);
src0.start(startFrame0 / context.sampleRate);
src1.start(startFrame1 / context.sampleRate);
context.startRendering()
.then(audioBuffer => {
const output0 = audioBuffer.getChannelData(0);
const output1 = audioBuffer.getChannelData(1);
// Compute the expected output by interpolating the ramp buffer of
// the sources if they started at the given frame.
const ramp = rampBuffer.getChannelData(0);
const expected0 = interpolateRamp(ramp, startFrame0);
const expected1 = interpolateRamp(ramp, startFrame1);
// Verify output0 has the correct values
// For information only
should(startFrame0, 'src0 start frame').beEqualTo(startFrame0);
// Output must be zero before the source start frame, and it must
// be interpolated correctly after the start frame. The
// absoluteThreshold below is currently set for Chrome which does
// linear interpolation. This needs to be updated eventually if
// other browsers do not user interpolation.
should(
output0.slice(0, startFrame + 1), `output0[0:${startFrame}]`)
.beConstantValueOf(0);
should(
output0.slice(startFrame + 1, expected0.length),
`output0[${startFrame + 1}:${expected0.length - 1}]`)
.beCloseToArray(
expected0.slice(startFrame + 1), {absoluteThreshold: 0});
// Verify output1 has the correct values. Same approach as for
// output0.
should(startFrame1, 'src1 start frame').beEqualTo(startFrame1);
should(
output1.slice(0, startFrame + 1), `output1[0:${startFrame}]`)
.beConstantValueOf(0);
should(
output1.slice(startFrame + 1, expected1.length),
`output1[${startFrame + 1}:${expected1.length - 1}]`)
.beCloseToArray(
expected1.slice(startFrame + 1), {absoluteThreshold: 0});
})
.then(() => task.done());
});
audit.define('sub-sample accurate stop', (task, should) => {
// There are threes channesl, one for each source. Only need to render
// quanta for this test.
let context = new OfflineAudioContext(
{numberOfChannels: 3, length: 128, sampleRate: sampleRate});
let merger = new ChannelMergerNode(
context, {numberOfInputs: context.destination.channelCount});
merger.connect(context.destination);
// The source can be as simple constant for this test.
let buffer = new AudioBuffer(
{length: context.length, sampleRate: context.sampleRate});
buffer.getChannelData(0).fill(1);
const src0 = new AudioBufferSourceNode(context, {buffer: buffer});
const src1 = new AudioBufferSourceNode(context, {buffer: buffer});
const src2 = new AudioBufferSourceNode(context, {buffer: buffer});
// Frame where sources should start. This is pretty arbitrary, but one
// should be an integer, one should be close to an integer and the other
// should be close to the next integer. This is to catch the case where
// rounding is used for the end frame. Rounding is incorrect.
const endFrame = 33;
const endFrame1 = endFrame + 0.1;
const endFrame2 = endFrame + 0.9;
src0.connect(merger, 0, 0);
src1.connect(merger, 0, 1);
src2.connect(merger, 0, 2);
src0.start(0);
src1.start(0);
src2.start(0);
src0.stop(endFrame / context.sampleRate);
src1.stop(endFrame1 / context.sampleRate);
src2.stop(endFrame2 / context.sampleRate);
context.startRendering()
.then(audioBuffer => {
let actual0 = audioBuffer.getChannelData(0);
let actual1 = audioBuffer.getChannelData(1);
let actual2 = audioBuffer.getChannelData(2);
// Just verify that we stopped at the right time.
// This is case where the end frame is an integer. Since the first
// output ends on an exact frame, the output must be zero at that
// frame number. We print the end frame for information only; it
// makes interpretation of the rest easier.
should(endFrame - 1, 'src0 end frame')
.beEqualTo(endFrame - 1);
should(actual0[endFrame - 1], `output0[${endFrame - 1}]`)
.notBeEqualTo(0);
should(actual0.slice(endFrame),
`output0[${endFrame}:]`)
.beConstantValueOf(0);
// The case where the end frame is just a little above an integer.
// The output must not be zero just before the end and must be zero
// after.
should(endFrame1, 'src1 end frame')
.beEqualTo(endFrame1);
should(actual1[endFrame], `output1[${endFrame}]`)
.notBeEqualTo(0);
should(actual1.slice(endFrame + 1),
`output1[${endFrame + 1}:]`)
.beConstantValueOf(0);
// The case where the end frame is just a little below an integer.
// The output must not be zero just before the end and must be zero
// after.
should(endFrame2, 'src2 end frame')
.beEqualTo(endFrame2);
should(actual2[endFrame], `output2[${endFrame}]`)
.notBeEqualTo(0);
should(actual2.slice(endFrame + 1),
`output2[${endFrame + 1}:]`)
.beConstantValueOf(0);
})
.then(() => task.done());
});
audit.define('sub-sample-grain', (task, should) => {
let context = new OfflineAudioContext(
{numberOfChannels: 2, length: 128, sampleRate: sampleRate});
let merger = new ChannelMergerNode(
context, {numberOfInputs: context.destination.channelCount});
merger.connect(context.destination);
// The source can be as simple constant for this test.
let buffer = new AudioBuffer(
{length: context.length, sampleRate: context.sampleRate});
buffer.getChannelData(0).fill(1);
let src0 = new AudioBufferSourceNode(context, {buffer: buffer});
let src1 = new AudioBufferSourceNode(context, {buffer: buffer});
src0.connect(merger, 0, 0);
src1.connect(merger, 0, 1);
// Start a short grain.
const src0StartGrain = 3.1;
const src0EndGrain = 37.2;
src0.start(
src0StartGrain / context.sampleRate, 0,
(src0EndGrain - src0StartGrain) / context.sampleRate);
const src1StartGrain = 5.8;
const src1EndGrain = 43.9;
src1.start(
src1StartGrain / context.sampleRate, 0,
(src1EndGrain - src1StartGrain) / context.sampleRate);
context.startRendering()
.then(audioBuffer => {
let output0 = audioBuffer.getChannelData(0);
let output1 = audioBuffer.getChannelData(1);
let expected = new Float32Array(context.length);
// Compute the expected output for output0 and verify the actual
// output matches.
expected.fill(1);
for (let k = 0; k <= Math.floor(src0StartGrain); ++k) {
expected[k] = 0;
}
for (let k = Math.ceil(src0EndGrain); k < expected.length; ++k) {
expected[k] = 0;
}
verifyGrain(should, output0, {
startGrain: src0StartGrain,
endGrain: src0EndGrain,
sourceName: 'src0',
outputName: 'output0'
});
verifyGrain(should, output1, {
startGrain: src1StartGrain,
endGrain: src1EndGrain,
sourceName: 'src1',
outputName: 'output1'
});
})
.then(() => task.done());
});
audit.define(
'sub-sample accurate start with playbackRate', (task, should) => {
// There are two channels, one for each source. Only need to render
// quanta for this test.
let context = new OfflineAudioContext(
{numberOfChannels: 2, length: 8192, sampleRate: sampleRate});
let merger = new ChannelMergerNode(
context, {numberOfInputs: context.destination.channelCount});
merger.connect(context.destination);
// Use a simple linear ramp for the sources with integer steps
// starting at 1 to make it easy to verify and test that have
// sub-sample accurate start. Ramp starts at 1 so we can easily
// tell when the source starts.
let buffer = new AudioBuffer(
{length: context.length, sampleRate: context.sampleRate});
let r = buffer.getChannelData(0);
for (let k = 0; k < r.length; ++k) {
r[k] = k + 1;
}
// Two sources with different playback rates
const src0 = new AudioBufferSourceNode(
context, {buffer: buffer, playbackRate: .25});
const src1 = new AudioBufferSourceNode(
context, {buffer: buffer, playbackRate: 4});
// Frame where sources start. Pretty arbitrary but should not be an
// integer.
const startFrame = 17.8;
src0.connect(merger, 0, 0);
src1.connect(merger, 0, 1);
src0.start(startFrame / context.sampleRate);
src1.start(startFrame / context.sampleRate);
context.startRendering()
.then(audioBuffer => {
const output0 = audioBuffer.getChannelData(0);
const output1 = audioBuffer.getChannelData(1);
const frameBefore = Math.floor(startFrame);
const frameAfter = frameBefore + 1;
// Informative message so we know what the following output
// indices really mean.
should(startFrame, 'Source start frame')
.beEqualTo(startFrame);
// Verify the output
// With a startFrame of 17.8, the first output is at frame 18,
// but the actual start is at 17.8. So we would interpolate
// the output 0.2 fraction of the way between 17.8 and 18, for
// an output of 1.2 for our ramp. But the playback rate is
// 0.25, so we're really only 1/4 as far along as we think so
// the output is .2*0.25 of the way between 1 and 2 or 1.05.
const ramp0 = buffer.getChannelData(0)[0];
const ramp1 = buffer.getChannelData(0)[1];
const src0Output = ramp0 +
(ramp1 - ramp0) * (frameAfter - startFrame) *
src0.playbackRate.value;
let playbackMessage =
`With playbackRate ${src0.playbackRate.value}:`;
should(
output0[frameBefore],
`${playbackMessage} output0[${frameBefore}]`)
.beEqualTo(0);
should(
output0[frameAfter],
`${playbackMessage} output0[${frameAfter}]`)
.beCloseTo(src0Output, {threshold: 4.542e-8});
const src1Output = ramp0 +
(ramp1 - ramp0) * (frameAfter - startFrame) *
src1.playbackRate.value;
playbackMessage =
`With playbackRate ${src1.playbackRate.value}:`;
should(
output1[frameBefore],
`${playbackMessage} output1[${frameBefore}]`)
.beEqualTo(0);
should(
output1[frameAfter],
`${playbackMessage} output1[${frameAfter}]`)
.beCloseTo(src1Output, {threshold: 4.542e-8});
})
.then(() => task.done());
});
audit.run();
// Given an input ramp in |rampBuffer|, interpolate the signal assuming
// this ramp is used for an ABSN that starts at frame |startFrame|, which
// is not necessarily an integer. For simplicity we just use linear
// interpolation here. The interpolation is not part of the spec but
// this should be pretty close to whatever interpolation is being done.
function interpolateRamp(rampBuffer, startFrame) {
// |start| is the last zero sample before the ABSN actually starts.
const start = Math.floor(startFrame);
// One less than the rampBuffer because we can't linearly interpolate
// the last frame.
let result = new Float32Array(rampBuffer.length - 1);
for (let k = 0; k <= start; ++k) {
result[k] = 0;
}
// Now start linear interpolation.
let frame = startFrame;
let index = 1;
for (let k = start + 1; k < result.length; ++k) {
let s0 = rampBuffer[index];
let s1 = rampBuffer[index - 1];
let delta = frame - k;
let s = s1 - delta * (s0 - s1);
result[k] = s;
++frame;
++index;
}
return result;
}
function verifyGrain(should, output, options) {
let {startGrain, endGrain, sourceName, outputName} = options;
let expected = new Float32Array(output.length);
// Compute the expected output for output and verify the actual
// output matches.
expected.fill(1);
for (let k = 0; k <= Math.floor(startGrain); ++k) {
expected[k] = 0;
}
for (let k = Math.ceil(endGrain); k < expected.length; ++k) {
expected[k] = 0;
}
should(startGrain, `${sourceName} grain start`).beEqualTo(startGrain);
should(endGrain - startGrain, `${sourceName} grain duration`)
.beEqualTo(endGrain - startGrain);
should(endGrain, `${sourceName} grain end`).beEqualTo(endGrain);
should(output, outputName).beEqualToArray(expected);
should(
output[Math.floor(startGrain)],
`${outputName}[${Math.floor(startGrain)}]`)
.beEqualTo(0);
should(
output[1 + Math.floor(startGrain)],
`${outputName}[${1 + Math.floor(startGrain)}]`)
.notBeEqualTo(0);
should(
output[Math.floor(endGrain)],
`${outputName}[${Math.floor(endGrain)}]`)
.notBeEqualTo(0);
should(
output[1 + Math.floor(endGrain)],
`${outputName}[${1 + Math.floor(endGrain)}]`)
.beEqualTo(0);
}
</script>
</body>
</html>

View file

@ -14,7 +14,8 @@
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
let context = 0;
let sampleRate = 44100;
// Use a power of two to eliminate round-off converting frames to time.
let sampleRate = 32768;
let renderNumberOfChannels = 8;
let singleTestFrameLength = 8;
let testBuffers;

View file

@ -131,16 +131,20 @@
// Set listener properties to "random" values so that motion on one of
// the attributes actually changes things relative to the panner
// location.
// location. And the up and forward directions should have a simple
// relationship between them.
listener.positionX.value = -1;
listener.positionY.value = 1;
listener.positionZ.value = -1;
listener.forwardX.value = -1;
listener.forwardY.value = 1;
listener.forwardZ.value = -1;
// Make the up vector not parallel or perpendicular to the forward and
// position vectors so that automations of the up vector produce
// noticeable differences.
listener.upX.value = 1;
listener.upY.value = 1;
listener.upZ.value = 1;
listener.upZ.value = 2;
let audioParam = listener[options.param];
audioParam.automationRate = 'k-rate';

View file

@ -34,7 +34,7 @@
createTestAndRun(context, 'lowpass', {
should: should,
threshold: 9.7869e-8,
threshold: 4.6943e-8,
filterParameters: filterParameters
}).then(task.done.bind(task));
});

View file

@ -18,11 +18,19 @@
let audit = Audit.createTaskRunner();
let sampleRate = 44100.0;
let bufferDurationSeconds = 0.125;
// Use a power of two to eliminate any round-off when converting frame to
// time.
let sampleRate = 32768;
// Make sure the buffer duration and spacing are all exact frame lengths
// so that the note spacing is also on frame boundaries to eliminate
// sub-sample accurate start of a ABSN.
let bufferDurationSeconds = Math.floor(0.125 * sampleRate) / sampleRate;
let numberOfNotes = 11;
let noteSpacing = bufferDurationSeconds +
0.020; // leave 20ms of silence between each "note"
// Leave about 20ms of silence, being sure this is an exact frame
// duration.
let noteSilence = Math.floor(0.020 * sampleRate) / sampleRate;
let noteSpacing = bufferDurationSeconds + noteSilence;
let lengthInSeconds = numberOfNotes * noteSpacing;
let context = 0;
@ -131,18 +139,18 @@
// Verify the channels are clsoe to the reference.
should(actual0, 'Left output from gain node')
.beCloseToArray(
reference0, {relativeThreshold: 1.1908e-7});
reference0, {relativeThreshold: 1.1877e-7});
should(actual1, 'Right output from gain node')
.beCloseToArray(
reference1, {relativeThreshold: 1.1908e-7});
reference1, {relativeThreshold: 1.1877e-7});
// Test the SNR too for both channels.
let snr0 = 10 * Math.log10(computeSNR(actual0, reference0));
let snr1 = 10 * Math.log10(computeSNR(actual1, reference1));
should(snr0, 'Left SNR (in dB)')
.beGreaterThanOrEqualTo(148.69);
.beGreaterThanOrEqualTo(148.71);
should(snr1, 'Right SNR (in dB)')
.beGreaterThanOrEqualTo(148.69);
.beGreaterThanOrEqualTo(148.71);
})
.then(() => task.done());
;

View file

@ -0,0 +1,51 @@
<!DOCTYPE html>
<html>
<head>
<title>Test Panner Azimuth Calculation</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="../../resources/audit.js"></script>
</head>
<body>
<script>
const audit = Audit.createTaskRunner();
// Fairly arbitrary sample rate
const sampleRate = 16000;
audit.define('Azimuth calculation', (task, should) => {
// Two channels for the context so we can see each channel of the
// panner node.
let context = new OfflineAudioContext(2, sampleRate, sampleRate);
let src = new ConstantSourceNode(context);
let panner = new PannerNode(context);
src.connect(panner).connect(context.destination);
// The source is still pointed directly at the listener, but is now
// directly above. The audio should be the same in both the left and
// right channels.
panner.positionY.value = 1;
src.start();
context.startRendering()
.then(audioBuffer => {
// The left and right channels should contain the same signal.
let c0 = audioBuffer.getChannelData(0);
let c1 = audioBuffer.getChannelData(1);
let expected = Math.fround(Math.SQRT1_2);
should(c0, 'Left channel').beConstantValueOf(expected);
should(c1, 'Righteft channel').beConstantValueOf(expected);
})
.then(() => task.done());
});
audit.run();
</script>
</body>
</html>