Update web-platform-tests to revision 719721f655102bcd24d45eba91339eb2d7dbc591

This commit is contained in:
WPT Sync Bot 2019-07-14 10:26:29 +00:00
parent 2b84348372
commit bc8af9cf87
70 changed files with 1271 additions and 294 deletions

View file

@ -35,7 +35,7 @@
`context.sampleRate (${context.sampleRate} Hz)`).beGreaterThan(0);
defaultLatency = context.baseLatency;
should(defaultLatency, 'default baseLatency').beGreaterThan(0);
should(defaultLatency, 'default baseLatency').beGreaterThanOrEqualTo(0);
// Verify that an AudioContext can be created with the expected
// latency types.
@ -130,7 +130,7 @@
should(context1.baseLatency, 'high latency context baseLatency')
.beEqualTo(context2.baseLatency);
should(context1.baseLatency, 'high latency context baseLatency')
.beGreaterThan(interactiveLatency);
.beGreaterThanOrEqualTo(interactiveLatency);
closingPromises.push(context1.close());
closingPromises.push(context2.close());

View file

@ -0,0 +1,54 @@
/**
* @class ActiveProcessingTester
* @extends AudioWorkletProcessor
*
* This processor class sends a message to its AudioWorkletNodew whenever the
* number of channels on the input changes. The message includes the actual
* number of channels, the context time at which this occurred, and whether
* we're done processing or not.
*/
class ActiveProcessingTester extends AudioWorkletProcessor {
constructor(options) {
super(options);
this._lastChannelCount = 0;
// See if user specified a value for test duration.
if (options.hasOwnProperty('processorOptions') &&
options.processorOptions.hasOwnProperty('testDuration')) {
this._testDuration = options.processorOptions.testDuration;
} else {
this._testDuration = 5;
}
// Time at which we'll signal we're done, based on the requested
// |testDuration|
this._endTime = currentTime + this._testDuration;
}
process(inputs, outputs) {
const input = inputs[0];
const output = outputs[0];
const inputChannelCount = input.length;
const isFinished = currentTime > this._endTime;
// Send a message if we're done or the count changed.
if (isFinished || (inputChannelCount != this._lastChannelCount)) {
this.port.postMessage({
channelCount: inputChannelCount,
finished: isFinished,
time: currentTime
});
this._lastChannelCount = inputChannelCount;
}
// Just copy the input to the output for no particular reason.
for (let channel = 0; channel < input.length; ++channel) {
output[channel].set(input[channel]);
}
// When we're finished, this method no longer needs to be called.
return !isFinished;
}
}
registerProcessor('active-processing-tester', ActiveProcessingTester);

View file

@ -6,101 +6,86 @@
</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
// The sample rate MUST be a power of two to eliminate round-off when
// computing render boundaries but is otherwise arbitrary. And we only new
// a few blocks for rendering to see if things are working.
let sampleRate = 8192;
let renderLength = 10 * RENDER_QUANTUM_FRAMES;
// Number of inputs for the ChannelMergerNode. Pretty arbitrary, but
// should not be 1.
let numberOfInputs = 7;
// How many frames the source should run. Arbitrary but should be more
// than a render quantum.
let sourceDurationFrames = 131;
// Frame at which to connect the source to the merger
let connectFrame = 2 * RENDER_QUANTUM_FRAMES;
// AudioProcessor that counts the number of channels on its single input.
// AudioProcessor that sends a message to its AudioWorkletNode whenver the
// number of channels on its input changes.
let filePath =
'../the-audioworklet-interface/processors/input-count-processor.js';
'../the-audioworklet-interface/processors/active-processing.js';
audit.define(
{
label: 'Test',
description: 'Active processing for ChannelMergerNode'
},
async (task, should) => {
const context = new OfflineAudioContext({
numberOfChannels: numberOfInputs,
length: renderLength,
sampleRate: sampleRate
const audit = Audit.createTaskRunner();
let context;
audit.define('initialize', (task, should) => {
// Create context and load the module
context = new AudioContext();
should(
context.audioWorklet.addModule(filePath),
'AudioWorklet module loading')
.beResolved()
.then(() => task.done());
});
audit.define('test', (task, should) => {
const src = new OscillatorNode(context);
// Number of inputs for the ChannelMergerNode. Pretty arbitrary, but
// should not be 1.
const numberOfInputs = 7;
const merger =
new ChannelMergerNode(context, {numberOfInputs: numberOfInputs});
const testerNode =
new AudioWorkletNode(context, 'active-processing-tester', {
// Use as short a duration as possible to keep the test from
// taking too much time.
processorOptions: {testDuration: .5},
});
// Don't mix the inputs to the destination!
context.destination.channelInterpretation = 'discrete';
// Expected number of output channels from the merger node. We should
// start with the number of inputs, because the source (oscillator) is
// actively processing. When the source stops, the number of channels
// should change to 1.
const expectedValues = [numberOfInputs, 1];
let index = 0;
await context.audioWorklet.addModule(filePath);
testerNode.port.onmessage = event => {
let count = event.data.channelCount;
let finished = event.data.finished;
let src = new ConstantSourceNode(context);
let merger = new ChannelMergerNode(
context, {numberOfInputs: numberOfInputs});
let counter = new AudioWorkletNode(context, 'counter');
// Just to print a message that we created the graph with a
// convolver in it.
should(
() => {
merger.connect(counter).connect(context.destination);
},
`Construction of graph with ChannelMergerNode with ${
merger.numberOfInputs} inputs`)
.notThrow()
// Connect the source now and start it and let it run for
// |sourceDurationFrames| frames.
context.suspend(connectFrame / context.sampleRate)
.then(() => {
src.connect(merger, 0, 0);
src.start();
src.stop(
context.currentTime +
sourceDurationFrames / context.sampleRate);
})
.then(() => context.resume());
const renderedBuffer = await context.startRendering();
// The expected output is something like:
//
// 1, 1, 1,..., 7, 7, 7.,,,, 1, 1, 1
//
// When the merger has no inputs, it's not actively processing
// so it must output mono silence. After connecting a source,
// the number of channels of the output should be the same as
// the number of inputs to the merger. Finally, when the
// source stops, the merger is not actively processing anymore
// and should output mono silence again. For this test, we
// don't care too much how many different values there are.
// There just has to be at least one of each value, in the
// order given.
const output = renderedBuffer.getChannelData(0);
should(output, 'Number of output channels').containValues([
1, numberOfInputs, 1
]);
// If we're finished, end testing.
if (finished) {
// Verify that we got the expected number of changes.
should(index, 'Number of distinct values')
.beEqualTo(expectedValues.length);
task.done();
});
return;
}
if (index < expectedValues.length) {
// Verify that the number of channels matches the expected number of
// channels.
should(count, `Test ${index}: Number of convolver output channels`)
.beEqualTo(expectedValues[index]);
}
++index;
};
// Create the graph and go
src.connect(merger).connect(testerNode).connect(context.destination);
src.start();
// Stop the source after a short time so we can test that the channel
// merger changes to not actively processing and thus produces a single
// channel of silence.
src.stop(context.currentTime + .1);
});
audit.run();
</script>