Update webgpu cts

This commit is contained in:
sagudev 2023-05-24 17:32:04 +02:00
parent 886e88d4ea
commit e378bea56d
580 changed files with 127367 additions and 9404 deletions

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1 @@
9ab2eade6a818ed58ac1a7b36b706858f3ba5eb3

View file

@ -0,0 +1,89 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/
/** DataCache is an interface to a data store used to hold cached data */
export class DataCache {
/** setDataStore() sets the backing data store used by the data cache */
setStore(dataStore) {
this.dataStore = dataStore;
}
/** setDebugLogger() sets the verbose logger */
setDebugLogger(logger) {
this.debugLogger = logger;
}
/**
* fetch() retrieves cacheable data from the data cache, first checking the
* in-memory cache, then the data store (if specified), then resorting to
* building the data and storing it in the cache.
*/
async fetch(cacheable) {
// First check the in-memory cache
let data = this.cache.get(cacheable.path);
if (data !== undefined) {
this.log('in-memory cache hit');
return Promise.resolve(data);
}
this.log('in-memory cache miss');
// In in-memory cache miss.
// Next, try the data store.
if (this.dataStore !== null && !this.unavailableFiles.has(cacheable.path)) {
let serialized;
try {
serialized = await this.dataStore.load(cacheable.path);
this.log('loaded serialized');
} catch (err) {
// not found in data store
this.log(`failed to load (${cacheable.path}): ${err}`);
this.unavailableFiles.add(cacheable.path);
}
if (serialized !== undefined) {
this.log(`deserializing`);
data = cacheable.deserialize(serialized);
this.cache.set(cacheable.path, data);
return data;
}
}
// Not found anywhere. Build the data, and cache for future lookup.
this.log(`cache: building (${cacheable.path})`);
data = await cacheable.build();
this.cache.set(cacheable.path, data);
return data;
}
log(msg) {
if (this.debugLogger !== null) {
this.debugLogger(`DataCache: ${msg}`);
}
}
cache = new Map();
unavailableFiles = new Set();
dataStore = null;
debugLogger = null;
}
/** The data cache */
export const dataCache = new DataCache();
/** true if the current process is building the cache */
let isBuildingDataCache = false;
/** @returns true if the data cache is currently being built */
export function getIsBuildingDataCache() {
return isBuildingDataCache;
}
/** Sets whether the data cache is currently being built */
export function setIsBuildingDataCache(value = true) {
isBuildingDataCache = value;
}
/**
* Cacheable is the interface to something that can be stored into the
* DataCache.
* The 'npm run gen_cache' tool will look for module-scope variables of this
* interface, with the name `d`.
*/

View file

@ -1,64 +1,178 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ function _defineProperty(obj, key, value) {
if (key in obj) {
Object.defineProperty(obj, key, {
value: value,
enumerable: true,
configurable: true,
writable: true,
});
} else {
obj[key] = value;
}
return obj;
}
import { assert } from './util/util.js';
**/ import { assert, unreachable } from '../util/util.js';
export class SkipTestCase extends Error {}
export class UnexpectedPassError extends Error {}
// A Fixture is a class used to instantiate each test case at run time.
// A new instance of the Fixture is created for every single test case
// (i.e. every time the test function is run).
export class Fixture {
constructor(rec, params) {
_defineProperty(this, 'params', void 0);
_defineProperty(this, 'rec', void 0);
_defineProperty(this, 'eventualExpectations', []);
_defineProperty(this, 'numOutstandingAsyncExpectations', 0);
this.rec = rec;
export { TestCaseRecorder } from '../internal/logging/test_case_recorder.js';
/** The fully-general type for params passed to a test function invocation. */
export class SubcaseBatchState {
constructor(
recorder,
/** The case parameters for this test fixture shared state. Subcase params are not included. */
params
) {
this.recorder = recorder;
this.params = params;
}
// This has to be a member function instead of an async `createFixture` function, because
// we need to be able to ergonomically override it in subclasses.
/**
* Runs before the `.before()` function.
* @internal MAINTENANCE_TODO: Make this not visible to test code?
*/
async init() {}
/**
* Runs between the `.before()` function and the subcases.
* @internal MAINTENANCE_TODO: Make this not visible to test code?
*/
async postInit() {}
/**
* Runs after all subcases finish.
* @internal MAINTENANCE_TODO: Make this not visible to test code?
*/
async finalize() {}
}
/**
* A Fixture is a class used to instantiate each test sub/case at run time.
* A new instance of the Fixture is created for every single test subcase
* (i.e. every time the test function is run).
*/
export class Fixture {
/**
* Interface for recording logs and test status.
*
* @internal
*/
eventualExpectations = [];
numOutstandingAsyncExpectations = 0;
objectsToCleanUp = [];
static MakeSharedState(recorder, params) {
return new SubcaseBatchState(recorder, params);
}
/** @internal */
constructor(sharedState, rec, params) {
this._sharedState = sharedState;
this.rec = rec;
this._params = params;
}
/**
* Returns the (case+subcase) parameters for this test function invocation.
*/
get params() {
return this._params;
}
/**
* Gets the test fixture's shared state. This object is shared between subcases
* within the same testcase.
*/
get sharedState() {
return this._sharedState;
}
/**
* Override this to do additional pre-test-function work in a derived fixture.
* This has to be a member function instead of an async `createFixture` function, because
* we need to be able to ergonomically override it in subclasses.
*
* @internal MAINTENANCE_TODO: Make this not visible to test code?
*/
async init() {}
/**
* Override this to do additional post-test-function work in a derived fixture.
*
* Called even if init was unsuccessful.
*
* @internal MAINTENANCE_TODO: Make this not visible to test code?
*/
async finalize() {
assert(
this.numOutstandingAsyncExpectations === 0,
'there were outstanding immediateAsyncExpectations (e.g. expectUncapturedError) at the end of the test'
);
// Loop to exhaust the eventualExpectations in case they chain off each other.
while (this.eventualExpectations.length) {
const p = this.eventualExpectations.shift();
try {
await p;
} catch (ex) {
this.rec.threw(ex);
}
}
// And clean up any objects now that they're done being used.
for (const o of this.objectsToCleanUp) {
if ('getExtension' in o) {
const WEBGL_lose_context = o.getExtension('WEBGL_lose_context');
if (WEBGL_lose_context) WEBGL_lose_context.loseContext();
} else if ('destroy' in o) {
o.destroy();
} else {
o.close();
}
}
}
/**
* Tracks an object to be cleaned up after the test finishes.
*
* MAINTENANCE_TODO: Use this in more places. (Will be easier once .destroy() is allowed on
* invalid objects.)
*/
trackForCleanup(o) {
this.objectsToCleanUp.push(o);
return o;
}
/** Tracks an object, if it's destroyable, to be cleaned up after the test finishes. */
tryTrackForCleanup(o) {
if (typeof o === 'object' && o !== null) {
if (
'destroy' in o ||
'close' in o ||
o instanceof WebGLRenderingContext ||
o instanceof WebGL2RenderingContext
) {
this.objectsToCleanUp.push(o);
}
}
return o;
}
/** Log a debug message. */
debug(msg) {
this.rec.debug(new Error(msg));
}
/** Throws an exception marking the subcase as skipped. */
skip(msg) {
throw new SkipTestCase(msg);
}
async finalize() {
assert(
this.numOutstandingAsyncExpectations === 0,
'there were outstanding asynchronous expectations (e.g. shouldReject) at the end of the test'
);
await Promise.all(this.eventualExpectations);
}
/** Log a warning and increase the result status to "Warn". */
warn(msg) {
this.rec.warn(new Error(msg));
}
/** Log an error and increase the result status to "ExpectFailed". */
fail(msg) {
this.rec.expectationFailed(new Error(msg));
}
/**
* Wraps an async function. Tracks its status to fail if the test tries to report a test status
* before the async work has finished.
*/
async immediateAsyncExpectation(fn) {
this.numOutstandingAsyncExpectations++;
const ret = await fn();
@ -66,28 +180,32 @@ export class Fixture {
return ret;
}
/**
* Wraps an async function, passing it an `Error` object recording the original stack trace.
* The async work will be implicitly waited upon before reporting a test status.
*/
eventualAsyncExpectation(fn) {
const promise = fn(new Error());
this.eventualExpectations.push(promise);
return promise;
}
expectErrorValue(expectedName, ex, niceStack) {
expectErrorValue(expectedError, ex, niceStack) {
if (!(ex instanceof Error)) {
niceStack.message = `THREW non-error value, of type ${typeof ex}: ${ex}`;
this.rec.expectationFailed(niceStack);
return;
}
const actualName = ex.name;
if (actualName !== expectedName) {
niceStack.message = `THREW ${actualName}, instead of ${expectedName}: ${ex}`;
if (expectedError !== true && actualName !== expectedError) {
niceStack.message = `THREW ${actualName}, instead of ${expectedError}: ${ex}`;
this.rec.expectationFailed(niceStack);
} else {
niceStack.message = `OK: threw ${actualName}${ex.message}`;
niceStack.message = `OK: threw ${actualName}: ${ex.message}`;
this.rec.debug(niceStack);
}
}
/** Expect that the provided promise resolves (fulfills). */
shouldResolve(p, msg) {
this.eventualAsyncExpectation(async niceStack => {
const m = msg ? ': ' + msg : '';
@ -95,12 +213,16 @@ export class Fixture {
await p;
niceStack.message = 'resolved as expected' + m;
} catch (ex) {
niceStack.message = `REJECTED${m}\n${ex.message}`;
niceStack.message = `REJECTED${m}`;
if (ex instanceof Error) {
niceStack.message += '\n' + ex.message;
}
this.rec.expectationFailed(niceStack);
}
});
}
/** Expect that the provided promise rejects, with the provided exception name. */
shouldReject(expectedName, p, msg) {
this.eventualAsyncExpectation(async niceStack => {
const m = msg ? ': ' + msg : '';
@ -115,16 +237,31 @@ export class Fixture {
});
}
shouldThrow(expectedName, fn, msg) {
/**
* Expect that the provided function throws (if `true` or `string`) or not (if `false`).
* If a string is provided, expect that the throw exception has that name.
*
* MAINTENANCE_TODO: Change to `string | false` so the exception name is always checked.
*/
shouldThrow(expectedError, fn, msg) {
const m = msg ? ': ' + msg : '';
try {
fn();
this.rec.expectationFailed(new Error('DID NOT THROW' + m));
if (expectedError === false) {
this.rec.debug(new Error('did not throw, as expected' + m));
} else {
this.rec.expectationFailed(new Error('unexpectedly did not throw' + m));
}
} catch (ex) {
this.expectErrorValue(expectedName, ex, new Error(m));
if (expectedError === false) {
this.rec.expectationFailed(new Error('threw unexpectedly' + m));
} else {
this.expectErrorValue(expectedError, ex, new Error(m));
}
}
}
/** Expect that a condition is true. */
expect(cond, msg) {
if (cond) {
const m = msg ? ': ' + msg : '';
@ -134,4 +271,39 @@ export class Fixture {
}
return cond;
}
/**
* If the argument is an `Error`, fail (or warn). If it's `undefined`, no-op.
* If the argument is an array, apply the above behavior on each of elements.
*/
expectOK(error, { mode = 'fail', niceStack } = {}) {
const handleError = error => {
if (error instanceof Error) {
if (niceStack) {
error.stack = niceStack.stack;
}
if (mode === 'fail') {
this.rec.expectationFailed(error);
} else if (mode === 'warn') {
this.rec.warn(error);
} else {
unreachable();
}
}
};
if (Array.isArray(error)) {
for (const e of error) {
handleError(e);
}
} else {
handleError(error);
}
}
eventualExpectOK(error, { mode = 'fail' } = {}) {
this.eventualAsyncExpectation(async niceStack => {
this.expectOK(await error, { mode, niceStack });
});
}
}

View file

@ -1,155 +0,0 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ function _defineProperty(obj, key, value) {
if (key in obj) {
Object.defineProperty(obj, key, {
value: value,
enumerable: true,
configurable: true,
writable: true,
});
} else {
obj[key] = value;
}
return obj;
}
import { assert, raceWithRejectOnTimeout, unreachable, assertReject } from '../util/util.js';
import { getGPU } from './implementation.js';
class TestFailedButDeviceReusable extends Error {}
export class TestOOMedShouldAttemptGC extends Error {}
const kPopErrorScopeTimeoutMS = 5000;
export class DevicePool {
constructor() {
_defineProperty(this, 'failed', false);
_defineProperty(this, 'holder', undefined);
} // undefined if "uninitialized" (not yet initialized, or lost)
async acquire() {
assert(!this.failed, 'WebGPU device previously failed to initialize; not retrying');
if (this.holder === undefined) {
try {
this.holder = await DevicePool.makeHolder();
} catch (ex) {
this.failed = true;
throw ex;
}
}
assert(!this.holder.acquired, 'Device was in use on DevicePool.acquire');
this.holder.acquired = true;
this.beginErrorScopes();
return this.holder.device;
}
// When a test is done using a device, it's released back into the pool.
// This waits for error scopes, checks their results, and checks for various error conditions.
async release(device) {
const holder = this.holder;
assert(holder !== undefined, 'trying to release a device while pool is uninitialized');
assert(holder.acquired, 'trying to release a device while already released');
assert(device === holder.device, 'Released device was the wrong device');
try {
// Time out if popErrorScope never completes. This could happen due to a browser bug - e.g.,
// as of this writing, on Chrome GPU process crash, popErrorScope just hangs.
await raceWithRejectOnTimeout(
this.endErrorScopes(),
kPopErrorScopeTimeoutMS,
'finalization popErrorScope timed out'
);
// (Hopefully if the device was lost, it has been reported by the time endErrorScopes()
// has finished (or timed out). If not, it could cause a finite number of extra test
// failures following this one (but should recover eventually).)
const lostReason = holder.lostReason;
if (lostReason !== undefined) {
// Fail the current test.
unreachable(`Device was lost: ${lostReason}`);
}
} catch (ex) {
// Any error that isn't explicitly TestFailedButDeviceReusable forces a new device to be
// created for the next test.
if (!(ex instanceof TestFailedButDeviceReusable)) {
this.holder = undefined;
}
throw ex;
} finally {
// TODO: device.destroy()
// Mark the holder as free. (This only has an effect if the pool still has the holder.)
// This could be done at the top but is done here to guard against async-races during release.
holder.acquired = false;
}
}
// Gets a device and creates a DeviceHolder.
// If the device is lost, DeviceHolder.lostReason gets set.
static async makeHolder() {
const gpu = getGPU();
const adapter = await gpu.requestAdapter();
assert(adapter !== null);
const device = await adapter.requestDevice();
assert(device !== null);
const holder = {
acquired: false,
device,
lostReason: undefined,
};
holder.device.lost.then(ev => {
holder.lostReason = ev.message;
});
return holder;
}
// Create error scopes that wrap the entire test.
beginErrorScopes() {
assert(this.holder !== undefined);
this.holder.device.pushErrorScope('out-of-memory');
this.holder.device.pushErrorScope('validation');
}
// End the whole-test error scopes. Check that there are no extra error scopes, and that no
// otherwise-uncaptured errors occurred during the test.
async endErrorScopes() {
assert(this.holder !== undefined);
let gpuValidationError;
let gpuOutOfMemoryError;
try {
// May reject if the device was lost.
gpuValidationError = await this.holder.device.popErrorScope();
gpuOutOfMemoryError = await this.holder.device.popErrorScope();
} catch (ex) {
assert(
this.holder.lostReason !== undefined,
"popErrorScope failed, but device.lost hasn't fired (yet)"
);
throw ex;
}
await assertReject(
this.holder.device.popErrorScope(),
'There was an extra error scope on the stack after a test'
);
if (gpuValidationError !== null) {
assert(gpuValidationError instanceof GPUValidationError);
// Allow the device to be reused.
throw new TestFailedButDeviceReusable(
`Unexpected validation error occurred: ${gpuValidationError.message}`
);
}
if (gpuOutOfMemoryError !== null) {
assert(gpuOutOfMemoryError instanceof GPUOutOfMemoryError);
// Don't allow the device to be reused; unexpected OOM could break the device.
throw new TestOOMedShouldAttemptGC('Unexpected out-of-memory error occurred');
}
}
}

View file

@ -1,19 +0,0 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ /// <reference types="@webgpu/types" />
import { assert } from '../util/util.js';
let impl = undefined;
export function getGPU() {
if (impl) {
return impl;
}
assert(
typeof navigator !== 'undefined' && navigator.gpu !== undefined,
'No WebGPU implementation found'
);
impl = navigator.gpu;
return impl;
}

View file

@ -1,48 +0,0 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ function _defineProperty(obj, key, value) {
if (key in obj) {
Object.defineProperty(obj, key, {
value: value,
enumerable: true,
configurable: true,
writable: true,
});
} else {
obj[key] = value;
}
return obj;
}
import { extractImportantStackTrace } from '../util/stack.js';
export class LogMessageWithStack extends Error {
constructor(name, ex) {
super(ex.message);
_defineProperty(this, 'stackHidden', false);
_defineProperty(this, 'timesSeen', 1);
this.name = name;
this.stack = ex.stack;
}
/** Set a flag so the stack is not printed in toJSON(). */
setStackHidden() {
this.stackHidden = true;
}
/** Increment the "seen x times" counter. */
incrementTimesSeen() {
this.timesSeen++;
}
toJSON() {
let m = this.name;
if (this.message) m += ': ' + this.message;
if (!this.stackHidden && this.stack) {
m += '\n' + extractImportantStackTrace(this);
}
if (this.timesSeen > 1) {
m += `\n(seen ${this.timesSeen} times with identical stack)`;
}
return m;
}
}

View file

@ -1,35 +0,0 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ function _defineProperty(obj, key, value) {
if (key in obj) {
Object.defineProperty(obj, key, {
value: value,
enumerable: true,
configurable: true,
writable: true,
});
} else {
obj[key] = value;
}
return obj;
}
import { version } from '../version.js';
import { TestCaseRecorder } from './test_case_recorder.js';
export class Logger {
constructor(debug) {
_defineProperty(this, 'debug', void 0);
_defineProperty(this, 'results', new Map());
this.debug = debug;
}
record(name) {
const result = { status: 'running', timems: -1 };
this.results.set(name, result);
return [new TestCaseRecorder(result, this.debug), result];
}
asJSON(space) {
return JSON.stringify({ version, results: Array.from(this.results) }, undefined, space);
}
}

View file

@ -1,139 +0,0 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ function _defineProperty(obj, key, value) {
if (key in obj) {
Object.defineProperty(obj, key, {
value: value,
enumerable: true,
configurable: true,
writable: true,
});
} else {
obj[key] = value;
}
return obj;
}
import { SkipTestCase } from '../fixture.js';
import { now, assert } from '../util/util.js';
import { LogMessageWithStack } from './log_message.js';
var LogSeverity;
(function (LogSeverity) {
LogSeverity[(LogSeverity['Pass'] = 0)] = 'Pass';
LogSeverity[(LogSeverity['Skip'] = 1)] = 'Skip';
LogSeverity[(LogSeverity['Warn'] = 2)] = 'Warn';
LogSeverity[(LogSeverity['ExpectFailed'] = 3)] = 'ExpectFailed';
LogSeverity[(LogSeverity['ValidationFailed'] = 4)] = 'ValidationFailed';
LogSeverity[(LogSeverity['ThrewException'] = 5)] = 'ThrewException';
})(LogSeverity || (LogSeverity = {}));
const kMaxLogStacks = 2;
/** Holds onto a LiveTestCaseResult owned by the Logger, and writes the results into it. */
export class TestCaseRecorder {
/** Used to dedup log messages which have identical stacks. */
constructor(result, debugging) {
_defineProperty(this, 'result', void 0);
_defineProperty(this, 'maxLogSeverity', LogSeverity.Pass);
_defineProperty(this, 'startTime', -1);
_defineProperty(this, 'logs', []);
_defineProperty(this, 'logLinesAtCurrentSeverity', 0);
_defineProperty(this, 'debugging', false);
_defineProperty(this, 'messagesForPreviouslySeenStacks', new Map());
this.result = result;
this.debugging = debugging;
}
start() {
assert(this.startTime < 0, 'TestCaseRecorder cannot be reused');
this.startTime = now();
}
finish() {
assert(this.startTime >= 0, 'finish() before start()');
const timeMilliseconds = now() - this.startTime;
// Round to next microsecond to avoid storing useless .xxxx00000000000002 in results.
this.result.timems = Math.ceil(timeMilliseconds * 1000) / 1000;
// Convert numeric enum back to string (but expose 'exception' as 'fail')
this.result.status =
this.maxLogSeverity === LogSeverity.Pass
? 'pass'
: this.maxLogSeverity === LogSeverity.Skip
? 'skip'
: this.maxLogSeverity === LogSeverity.Warn
? 'warn'
: 'fail'; // Everything else is an error
this.result.logs = this.logs;
}
injectResult(injectedResult) {
Object.assign(this.result, injectedResult);
}
debug(ex) {
if (!this.debugging) {
return;
}
const logMessage = new LogMessageWithStack('DEBUG', ex);
logMessage.setStackHidden();
this.logImpl(LogSeverity.Pass, logMessage);
}
skipped(ex) {
this.logImpl(LogSeverity.Skip, new LogMessageWithStack('SKIP', ex));
}
warn(ex) {
this.logImpl(LogSeverity.Warn, new LogMessageWithStack('WARN', ex));
}
expectationFailed(ex) {
this.logImpl(LogSeverity.ExpectFailed, new LogMessageWithStack('EXPECTATION FAILED', ex));
}
validationFailed(ex) {
this.logImpl(LogSeverity.ValidationFailed, new LogMessageWithStack('VALIDATION FAILED', ex));
}
threw(ex) {
if (ex instanceof SkipTestCase) {
this.skipped(ex);
return;
}
this.logImpl(LogSeverity.ThrewException, new LogMessageWithStack('EXCEPTION', ex));
}
logImpl(level, logMessage) {
// Deduplicate errors with the exact same stack
if (logMessage.stack) {
const seen = this.messagesForPreviouslySeenStacks.get(logMessage.stack);
if (seen) {
seen.incrementTimesSeen();
return;
}
this.messagesForPreviouslySeenStacks.set(logMessage.stack, logMessage);
}
// Mark printStack=false for all logs except 2 at the highest severity
if (level > this.maxLogSeverity) {
this.logLinesAtCurrentSeverity = 0;
this.maxLogSeverity = level;
if (!this.debugging) {
// Go back and turn off printStack for everything of a lower log level
for (const log of this.logs) {
log.setStackHidden();
}
}
}
if (level < this.maxLogSeverity || this.logLinesAtCurrentSeverity >= kMaxLogStacks) {
if (!this.debugging) {
logMessage.setStackHidden();
}
}
this.logs.push(logMessage);
this.logLinesAtCurrentSeverity++;
}
}

View file

@ -1,140 +1,213 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ let _Symbol$iterator;
function _defineProperty(obj, key, value) {
if (key in obj) {
Object.defineProperty(obj, key, {
value: value,
enumerable: true,
configurable: true,
writable: true,
});
} else {
obj[key] = value;
**/ import { mergeParams } from '../internal/params_utils.js';
import { stringifyPublicParams } from '../internal/query/stringify_params.js';
import { assert, mapLazy } from '../util/util.js';
// ================================================================
// "Public" ParamsBuilder API / Documentation
// ================================================================
/**
* Provides doc comments for the methods of CaseParamsBuilder and SubcaseParamsBuilder.
* (Also enforces rough interface match between them.)
*/
/**
* Base class for `CaseParamsBuilder` and `SubcaseParamsBuilder`.
*/
export class ParamsBuilderBase {
constructor(cases) {
this.cases = cases;
}
return obj;
}
import { publicParamsEquals } from './params_utils.js';
import { assert } from './util/util.js';
/** Forces a type to resolve its type definitions, to make it readable/debuggable. */
function typeAssert() {}
{
{
typeAssert();
typeAssert();
typeAssert();
typeAssert();
typeAssert();
typeAssert();
typeAssert();
typeAssert();
typeAssert();
typeAssert();
typeAssert();
// Unexpected test results - hopefully okay to ignore these
typeAssert();
typeAssert();
}
/**
* Hidden from test files. Use `builderIterateCasesWithSubcases` to access this.
*/
}
export function poptions(name, values) {
const iter = makeReusableIterable(function* () {
for (const value of values) {
yield { [name]: value };
/**
* Calls the (normally hidden) `iterateCasesWithSubcases()` method.
*/
export function builderIterateCasesWithSubcases(builder) {
return builder.iterateCasesWithSubcases();
}
/**
* Builder for combinatorial test **case** parameters.
*
* CaseParamsBuilder is immutable. Each method call returns a new, immutable object,
* modifying the list of cases according to the method called.
*
* This means, for example, that the `unit` passed into `TestBuilder.params()` can be reused.
*/
export class CaseParamsBuilder extends ParamsBuilderBase {
*iterateCasesWithSubcases() {
for (const a of this.cases()) {
yield [a, undefined];
}
});
return iter;
}
export function pbool(name) {
return poptions(name, [false, true]);
}
export function params() {
return new ParamsBuilder();
}
_Symbol$iterator = Symbol.iterator;
export class ParamsBuilder {
constructor() {
_defineProperty(this, 'paramSpecs', [{}]);
}
[_Symbol$iterator]() {
const iter = this.paramSpecs[Symbol.iterator]();
return iter;
}
combine(newParams) {
const paramSpecs = this.paramSpecs;
this.paramSpecs = makeReusableIterable(function* () {
for (const a of paramSpecs) {
for (const b of newParams) {
yield mergeParams(a, b);
}
[Symbol.iterator]() {
return this.cases();
}
/** @inheritDoc */
expandWithParams(expander) {
const newGenerator = expanderGenerator(this.cases, expander);
return new CaseParamsBuilder(() => newGenerator({}));
}
/** @inheritDoc */
expand(key, expander) {
return this.expandWithParams(function* (p) {
for (const value of expander(p)) {
yield { [key]: value };
}
});
return this;
}
expand(expander) {
const paramSpecs = this.paramSpecs;
this.paramSpecs = makeReusableIterable(function* () {
for (const a of paramSpecs) {
for (const b of expander(a)) {
yield mergeParams(a, b);
}
}
});
/** @inheritDoc */
combineWithParams(newParams) {
assertNotGenerator(newParams);
const seenValues = new Set();
for (const params of newParams) {
const paramsStr = stringifyPublicParams(params);
assert(!seenValues.has(paramsStr), `Duplicate entry in combine[WithParams]: ${paramsStr}`);
seenValues.add(paramsStr);
}
return this;
return this.expandWithParams(() => newParams);
}
/** @inheritDoc */
combine(key, values) {
assertNotGenerator(values);
const mapped = mapLazy(values, v => ({ [key]: v }));
return this.combineWithParams(mapped);
}
/** @inheritDoc */
filter(pred) {
const paramSpecs = this.paramSpecs;
this.paramSpecs = makeReusableIterable(function* () {
for (const p of paramSpecs) {
if (pred(p)) {
yield p;
}
}
});
return this;
const newGenerator = filterGenerator(this.cases, pred);
return new CaseParamsBuilder(() => newGenerator({}));
}
/** @inheritDoc */
unless(pred) {
return this.filter(x => !pred(x));
}
exclude(exclude) {
const excludeArray = Array.from(exclude);
const paramSpecs = this.paramSpecs;
this.paramSpecs = makeReusableIterable(function* () {
for (const p of paramSpecs) {
if (excludeArray.every(e => !publicParamsEquals(p, e))) {
yield p;
}
/**
* "Finalize" the list of cases and begin defining subcases.
* Returns a new SubcaseParamsBuilder. Methods called on SubcaseParamsBuilder
* generate new subcases instead of new cases.
*/
beginSubcases() {
return new SubcaseParamsBuilder(
() => this.cases(),
function* () {
yield {};
}
);
}
}
/**
* The unit CaseParamsBuilder, representing a single case with no params: `[ {} ]`.
*
* `punit` is passed to every `.params()`/`.paramsSubcasesOnly()` call, so `kUnitCaseParamsBuilder`
* is only explicitly needed if constructing a ParamsBuilder outside of a test builder.
*/
export const kUnitCaseParamsBuilder = new CaseParamsBuilder(function* () {
yield {};
});
/**
* Builder for combinatorial test _subcase_ parameters.
*
* SubcaseParamsBuilder is immutable. Each method call returns a new, immutable object,
* modifying the list of subcases according to the method called.
*/
export class SubcaseParamsBuilder extends ParamsBuilderBase {
constructor(cases, generator) {
super(cases);
this.subcases = generator;
}
*iterateCasesWithSubcases() {
for (const caseP of this.cases()) {
const subcases = Array.from(this.subcases(caseP));
if (subcases.length) {
yield [caseP, subcases];
}
}
}
/** @inheritDoc */
expandWithParams(expander) {
return new SubcaseParamsBuilder(this.cases, expanderGenerator(this.subcases, expander));
}
/** @inheritDoc */
expand(key, expander) {
return this.expandWithParams(function* (p) {
for (const value of expander(p)) {
// TypeScript doesn't know here that NewPKey is always a single literal string type.
yield { [key]: value };
}
});
return this;
}
/** @inheritDoc */
combineWithParams(newParams) {
assertNotGenerator(newParams);
return this.expandWithParams(() => newParams);
}
/** @inheritDoc */
combine(key, values) {
assertNotGenerator(values);
return this.expand(key, () => values);
}
/** @inheritDoc */
filter(pred) {
return new SubcaseParamsBuilder(this.cases, filterGenerator(this.subcases, pred));
}
/** @inheritDoc */
unless(pred) {
return this.filter(x => !pred(x));
}
}
// If you create an Iterable by calling a generator function (e.g. in IIFE), it is exhausted after
// one use. This just wraps a generator function in an object so it be iterated multiple times.
function makeReusableIterable(generatorFn) {
return { [Symbol.iterator]: generatorFn };
function expanderGenerator(baseGenerator, expander) {
return function* (base) {
for (const a of baseGenerator(base)) {
for (const b of expander(mergeParams(base, a))) {
yield mergeParams(a, b);
}
}
};
}
// (keyof A & keyof B) is not empty, so they overlapped
function filterGenerator(baseGenerator, pred) {
return function* (base) {
for (const a of baseGenerator(base)) {
if (pred(mergeParams(base, a))) {
yield a;
}
}
};
}
function mergeParams(a, b) {
for (const key of Object.keys(a)) {
assert(!(key in b), 'Duplicate key: ' + key);
/** Assert an object is not a Generator (a thing returned from a generator function). */
function assertNotGenerator(x) {
if ('constructor' in x) {
assert(
x.constructor !== (function* () {})().constructor,
'Argument must not be a generator, as generators are not reusable'
);
}
return { ...a, ...b };
}

View file

@ -1,19 +0,0 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { assert } from '../util/util.js';
// JSON can't represent `undefined` and by default stores it as `null`.
// Instead, store `undefined` as this magic string value in JSON.
const jsUndefinedMagicValue = '_undef_';
export function stringifyParamValue(value) {
return JSON.stringify(value, (k, v) => {
// Make sure no one actually uses the magic value as a parameter.
assert(v !== jsUndefinedMagicValue);
return v === undefined ? jsUndefinedMagicValue : v;
});
}
export function parseParamValue(s) {
return JSON.parse(s, (k, v) => (v === jsUndefinedMagicValue ? undefined : v));
}

View file

@ -1,127 +0,0 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ function _defineProperty(obj, key, value) {
if (key in obj) {
Object.defineProperty(obj, key, {
value: value,
enumerable: true,
configurable: true,
writable: true,
});
} else {
obj[key] = value;
}
return obj;
}
import { assert } from '../util/util.js';
import { encodeURIComponentSelectively } from './encode_selectively.js';
import { kBigSeparator, kPathSeparator, kWildcard, kParamSeparator } from './separators.js';
import { stringifyPublicParams } from './stringify_params.js';
/**
* Represents a test query of some level.
*
* TestQuery types are immutable.
*/
// SingleCase
/**
* A multi-file test query, like `s:*` or `s:a,b,*`.
*
* Immutable (makes copies of constructor args).
*/
export class TestQueryMultiFile {
constructor(suite, file) {
_defineProperty(this, 'level', 1);
_defineProperty(this, 'isMultiFile', true);
_defineProperty(this, 'suite', void 0);
_defineProperty(this, 'filePathParts', void 0);
this.suite = suite;
this.filePathParts = [...file];
}
toString() {
return encodeURIComponentSelectively(this.toStringHelper().join(kBigSeparator));
}
toStringHelper() {
return [this.suite, [...this.filePathParts, kWildcard].join(kPathSeparator)];
}
}
/**
* A multi-test test query, like `s:f:*` or `s:f:a,b,*`.
*
* Immutable (makes copies of constructor args).
*/
export class TestQueryMultiTest extends TestQueryMultiFile {
constructor(suite, file, test) {
super(suite, file);
_defineProperty(this, 'level', 2);
_defineProperty(this, 'isMultiFile', false);
_defineProperty(this, 'isMultiTest', true);
_defineProperty(this, 'testPathParts', void 0);
assert(file.length > 0, 'multi-test (or finer) query must have file-path');
this.testPathParts = [...test];
}
toStringHelper() {
return [
this.suite,
this.filePathParts.join(kPathSeparator),
[...this.testPathParts, kWildcard].join(kPathSeparator),
];
}
}
/**
* A multi-case test query, like `s:f:t:*` or `s:f:t:a,b,*`.
*
* Immutable (makes copies of constructor args), except for param values
* (which aren't normally supposed to change; they're marked readonly in CaseParams).
*/
export class TestQueryMultiCase extends TestQueryMultiTest {
constructor(suite, file, test, params) {
super(suite, file, test);
_defineProperty(this, 'level', 3);
_defineProperty(this, 'isMultiTest', false);
_defineProperty(this, 'isMultiCase', true);
_defineProperty(this, 'params', void 0);
assert(test.length > 0, 'multi-case (or finer) query must have test-path');
this.params = { ...params };
}
toStringHelper() {
const paramsParts = stringifyPublicParams(this.params);
return [
this.suite,
this.filePathParts.join(kPathSeparator),
this.testPathParts.join(kPathSeparator),
[...paramsParts, kWildcard].join(kParamSeparator),
];
}
}
/**
* A multi-case test query, like `s:f:t:` or `s:f:t:a=1,b=1`.
*
* Immutable (makes copies of constructor args).
*/
export class TestQuerySingleCase extends TestQueryMultiCase {
constructor(...args) {
super(...args);
_defineProperty(this, 'level', 4);
_defineProperty(this, 'isMultiCase', false);
}
toStringHelper() {
const paramsParts = stringifyPublicParams(this.params);
return [
this.suite,
this.filePathParts.join(kPathSeparator),
this.testPathParts.join(kPathSeparator),
paramsParts.join(kParamSeparator),
];
}
}

View file

@ -1,27 +0,0 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { badParamValueChars, paramKeyIsPublic } from '../params_utils.js';
import { assert } from '../util/util.js';
import { stringifyParamValue } from './json_param_value.js';
import { kParamKVSeparator } from './separators.js';
export function stringifyPublicParams(p) {
return Object.keys(p)
.filter(k => paramKeyIsPublic(k))
.map(k => stringifySingleParam(k, p[k]));
}
export function stringifySingleParam(k, v) {
return `${k}${kParamKVSeparator}${stringifySingleParamValue(v)}`;
}
function stringifySingleParamValue(v) {
const s = stringifyParamValue(v);
assert(
!badParamValueChars.test(s),
`JSON.stringified param value must not match ${badParamValueChars} - was ${s}`
);
return s;
}

View file

@ -0,0 +1,111 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ /**
* Base path for resources. The default value is correct for non-worker WPT, but standalone and
* workers must access resources using a different base path, so this is overridden in
* `test_worker-worker.ts` and `standalone.ts`.
*/ let baseResourcePath = './resources';
let crossOriginHost = '';
function getAbsoluteBaseResourcePath(path) {
// Path is already an absolute one.
if (path[0] === '/') {
return path;
}
// Path is relative
const relparts = window.location.pathname.split('/');
relparts.pop();
const pathparts = path.split('/');
let i;
for (i = 0; i < pathparts.length; ++i) {
switch (pathparts[i]) {
case '':
break;
case '.':
break;
case '..':
relparts.pop();
break;
default:
relparts.push(pathparts[i]);
break;
}
}
return relparts.join('/');
}
function runningOnLocalHost() {
const hostname = window.location.hostname;
return hostname === 'localhost' || hostname === '127.0.0.1' || hostname === '::1';
}
/**
* Get a path to a resource in the `resources` directory relative to the current execution context
* (html file or worker .js file), for `fetch()`, `<img>`, `<video>`, etc but from cross origin host.
* Provide onlineUrl if the case running online.
* @internal MAINTENANCE_TODO: Cases may run in the LAN environment (not localhost but no internet
* access). We temporarily use `crossOriginHost` to configure the cross origin host name in that situation.
* But opening to auto-detect mechanism or other solutions.
*/
export function getCrossOriginResourcePath(pathRelativeToResourcesDir, onlineUrl = '') {
// A cross origin host has been configured. Use this to load resource.
if (crossOriginHost !== '') {
return (
crossOriginHost +
getAbsoluteBaseResourcePath(baseResourcePath) +
'/' +
pathRelativeToResourcesDir
);
}
// Using 'localhost' and '127.0.0.1' trick to load cross origin resource. Set cross origin host name
// to 'localhost' if case is not running in 'localhost' domain. Otherwise, use '127.0.0.1'.
// host name to locahost unless the server running in
if (runningOnLocalHost()) {
let crossOriginHostName = '';
if (location.hostname === 'localhost') {
crossOriginHostName = 'http://127.0.0.1';
} else {
crossOriginHostName = 'http://localhost';
}
return (
crossOriginHostName +
':' +
location.port +
getAbsoluteBaseResourcePath(baseResourcePath) +
'/' +
pathRelativeToResourcesDir
);
}
return onlineUrl;
}
/**
* Get a path to a resource in the `resources` directory, relative to the current execution context
* (html file or worker .js file), for `fetch()`, `<img>`, `<video>`, etc. Pass the cross origin host
* name if wants to load resoruce from cross origin host.
*/
export function getResourcePath(pathRelativeToResourcesDir) {
return baseResourcePath + '/' + pathRelativeToResourcesDir;
}
/**
* Set the base resource path (path to the `resources` directory relative to the current
* execution context).
*/
export function setBaseResourcePath(path) {
baseResourcePath = path;
}
/**
* Set the cross origin host and cases related to cross origin
* will load resource from the given host.
*/
export function setCrossOriginHost(host) {
crossOriginHost = host;
}

View file

@ -0,0 +1,10 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/
export const globalTestConfig = {
maxSubcasesInFlight: 500,
testHeartbeatCallback: () => {},
noRaceWithRejectOnTimeout: false,
unrollConstEvalLoops: false,
};

View file

@ -1,162 +1,3 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ function _defineProperty(obj, key, value) {
if (key in obj) {
Object.defineProperty(obj, key, {
value: value,
enumerable: true,
configurable: true,
writable: true,
});
} else {
obj[key] = value;
}
return obj;
}
import { extractPublicParams, publicParamsEquals } from './params_utils.js';
import { kPathSeparator } from './query/separators.js';
import { stringifyPublicParams } from './query/stringify_params.js';
import { validQueryPart } from './query/validQueryPart.js';
import { assert } from './util/util.js';
export function makeTestGroup(fixture) {
return new TestGroup(fixture);
}
// Interface for running tests
export function makeTestGroupForUnitTesting(fixture) {
return new TestGroup(fixture);
}
class TestGroup {
constructor(fixture) {
_defineProperty(this, 'fixture', void 0);
_defineProperty(this, 'seen', new Set());
_defineProperty(this, 'tests', []);
this.fixture = fixture;
}
*iterate() {
for (const test of this.tests) {
yield* test.iterate();
}
}
checkName(name) {
assert(
// Shouldn't happen due to the rule above. Just makes sure that treated
// unencoded strings as encoded strings is OK.
name === decodeURIComponent(name),
`Not decodeURIComponent-idempotent: ${name} !== ${decodeURIComponent(name)}`
);
assert(!this.seen.has(name), `Duplicate test name: ${name}`);
this.seen.add(name);
}
// TODO: This could take a fixture, too, to override the one for the group.
test(name) {
this.checkName(name);
const parts = name.split(kPathSeparator);
for (const p of parts) {
assert(validQueryPart.test(p), `Invalid test name part ${p}; must match ${validQueryPart}`);
}
const test = new TestBuilder(parts, this.fixture);
this.tests.push(test);
return test;
}
checkCaseNamesAndDuplicates() {
for (const test of this.tests) {
test.checkCaseNamesAndDuplicates();
}
}
}
class TestBuilder {
constructor(testPath, fixture) {
_defineProperty(this, 'testPath', void 0);
_defineProperty(this, 'fixture', void 0);
_defineProperty(this, 'testFn', void 0);
_defineProperty(this, 'cases', undefined);
this.testPath = testPath;
this.fixture = fixture;
}
fn(fn) {
this.testFn = fn;
}
checkCaseNamesAndDuplicates() {
if (this.cases === undefined) {
return;
}
// This is n^2.
const seen = [];
for (const testcase of this.cases) {
// stringifyPublicParams also checks for invalid params values
const testcaseString = stringifyPublicParams(testcase);
assert(
!seen.some(x => publicParamsEquals(x, testcase)),
`Duplicate public test case params: ${testcaseString}`
);
seen.push(testcase);
}
}
params(casesIterable) {
assert(this.cases === undefined, 'test case is already parameterized');
this.cases = Array.from(casesIterable);
return this;
}
*iterate() {
assert(this.testFn !== undefined, 'No test function (.fn()) for test');
for (const params of this.cases || [{}]) {
yield new RunCaseSpecific(this.testPath, params, this.fixture, this.testFn);
}
}
}
class RunCaseSpecific {
constructor(testPath, params, fixture, fn) {
_defineProperty(this, 'id', void 0);
_defineProperty(this, 'params', void 0);
_defineProperty(this, 'fixture', void 0);
_defineProperty(this, 'fn', void 0);
this.id = { test: testPath, params: extractPublicParams(params) };
this.params = params;
this.fixture = fixture;
this.fn = fn;
}
async run(rec) {
rec.start();
try {
const inst = new this.fixture(rec, this.params || {});
try {
await inst.init();
await this.fn(inst);
} finally {
// Runs as long as constructor succeeded, even if initialization or the test failed.
await inst.finalize();
}
} catch (ex) {
// There was an exception from constructor, init, test, or finalize.
// An error from init or test may have been a SkipTestCase.
// An error from finalize may have been an eventualAsyncExpectation failure
// or unexpected validation/OOM error from the GPUDevice.
rec.threw(ex);
}
rec.finish();
}
}
**/ export { makeTestGroup } from '../internal/test_group.js';

View file

@ -1,38 +0,0 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ function _defineProperty(obj, key, value) {
if (key in obj) {
Object.defineProperty(obj, key, {
value: value,
enumerable: true,
configurable: true,
writable: true,
});
} else {
obj[key] = value;
}
return obj;
}
export class AsyncMutex {
constructor() {
_defineProperty(this, 'newestQueueItem', void 0);
}
// Run an async function with a lock on this mutex.
// Waits until the mutex is available, locks it, runs the function, then releases it.
async with(fn) {
const p = (async () => {
// If the mutex is locked, wait for the last thing in the queue before running.
// (Everything in the queue runs in order, so this is after everything currently enqueued.)
if (this.newestQueueItem) {
await this.newestQueueItem;
}
return fn();
})();
// Push the newly-created Promise onto the queue by replacing the old "newest" item.
this.newestQueueItem = p;
// And return so the caller can wait on the result.
return p;
}
}

View file

@ -1,3 +0,0 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const timeout = typeof step_timeout !== 'undefined' ? step_timeout : setTimeout;

View file

@ -1,72 +0,0 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { timeout } from './timeout.js';
export function assert(condition, msg) {
if (!condition) {
throw new Error(msg && (typeof msg === 'string' ? msg : msg()));
}
}
export async function assertReject(p, msg) {
try {
await p;
unreachable(msg);
} catch (ex) {
// Assertion OK
}
}
export function unreachable(msg) {
throw new Error(msg);
}
// performance.now() is available in all browsers, but not in scope by default in Node.
const perf = typeof performance !== 'undefined' ? performance : require('perf_hooks').performance;
export function now() {
return perf.now();
}
export function resolveOnTimeout(ms) {
return new Promise(resolve => {
timeout(() => {
resolve();
}, ms);
});
}
export class PromiseTimeoutError extends Error {}
export function rejectOnTimeout(ms, msg) {
return new Promise((_resolve, reject) => {
timeout(() => {
reject(new PromiseTimeoutError(msg));
}, ms);
});
}
export function raceWithRejectOnTimeout(p, ms, msg) {
return Promise.race([p, rejectOnTimeout(ms, msg)]);
}
export function objectEquals(x, y) {
if (typeof x !== 'object' || typeof y !== 'object') return x === y;
if (x === null || y === null) return x === y;
if (x.constructor !== y.constructor) return false;
if (x instanceof Function) return x === y;
if (x instanceof RegExp) return x === y;
if (x === y || x.valueOf() === y.valueOf()) return true;
if (Array.isArray(x) && Array.isArray(y) && x.length !== y.length) return false;
if (x instanceof Date) return false;
if (!(x instanceof Object)) return false;
if (!(y instanceof Object)) return false;
const x1 = x;
const y1 = y;
const p = Object.keys(x);
return Object.keys(y).every(i => p.indexOf(i) !== -1) && p.every(i => objectEquals(x1[i], y1[i]));
}
export function range(n, fn) {
return [...new Array(n)].map((_, i) => fn(i));
}

View file

@ -1,3 +0,0 @@
// AUTO-GENERATED - DO NOT EDIT. See tools/gen_version.
export const version = 'c1df7f4ff1adcde985384633e7cffa52d53e3535';

View file

@ -1,6 +1,7 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { parseQuery } from './query/parseQuery.js';
**/ import { assert } from '../util/util.js';
import { parseQuery } from './query/parseQuery.js';
import { loadTreeForQuery } from './tree.js';
@ -9,17 +10,30 @@ import { loadTreeForQuery } from './tree.js';
// - `out/webgpu/listing.js` (which is pre-baked, has a TestSuiteListing)
// Base class for DefaultTestFileLoader and FakeTestFileLoader.
export class TestFileLoader {
importSpecFile(suite, path) {
return this.import(`${suite}/${path.join('/')}.spec.js`);
export class TestFileLoader extends EventTarget {
async importSpecFile(suite, path) {
const url = `${suite}/${path.join('/')}.spec.js`;
this.dispatchEvent(new MessageEvent('import', { data: { url } }));
const ret = await this.import(url);
this.dispatchEvent(new MessageEvent('imported', { data: { url } }));
return ret;
}
async loadTree(query, subqueriesToExpand = []) {
return loadTreeForQuery(
const tree = await loadTreeForQuery(
this,
query,
subqueriesToExpand.map(q => parseQuery(q))
subqueriesToExpand.map(s => {
const q = parseQuery(s);
assert(q.level >= 2, () => `subqueriesToExpand entries should not be multi-file:\n ${q}`);
return q;
})
);
this.dispatchEvent(new MessageEvent('finish'));
return tree;
}
async loadCases(query) {

View file

@ -0,0 +1,42 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { extractImportantStackTrace } from '../stack.js';
export class LogMessageWithStack extends Error {
stackHiddenMessage = undefined;
constructor(name, ex) {
super(ex.message);
this.name = name;
this.stack = ex.stack;
if ('extra' in ex) {
this.extra = ex.extra;
}
}
/** Set a flag so the stack is not printed in toJSON(). */
setStackHidden(stackHiddenMessage) {
this.stackHiddenMessage ??= stackHiddenMessage;
}
toJSON() {
let m = this.name;
if (this.message) m += ': ' + this.message;
if (this.stack) {
if (this.stackHiddenMessage === undefined) {
m += '\n' + extractImportantStackTrace(this);
} else if (this.stackHiddenMessage) {
m += `\n at (elided: ${this.stackHiddenMessage})`;
}
}
return m;
}
}
/**
* Returns a string, nicely indented, for debug logs.
* This is used in the cmdline and wpt runtimes. In WPT, it shows up in the `*-actual.txt` file.
*/
export function prettyPrintLog(log) {
return ' - ' + log.toJSON().replace(/\n/g, '\n ');
}

View file

@ -0,0 +1,27 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { version } from '../version.js';
import { TestCaseRecorder } from './test_case_recorder.js';
export class Logger {
static globalDebugMode = false;
results = new Map();
constructor({ overrideDebugMode } = {}) {
this.overriddenDebugMode = overrideDebugMode;
}
record(name) {
const result = { status: 'running', timems: -1 };
this.results.set(name, result);
return [
new TestCaseRecorder(result, this.overriddenDebugMode ?? Logger.globalDebugMode),
result,
];
}
asJSON(space) {
return JSON.stringify({ version, results: Array.from(this.results) }, undefined, space);
}
}

View file

@ -1,3 +1,3 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/
**/ export {};

View file

@ -0,0 +1,158 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { SkipTestCase, UnexpectedPassError } from '../../framework/fixture.js';
import { globalTestConfig } from '../../framework/test_config.js';
import { now, assert } from '../../util/util.js';
import { LogMessageWithStack } from './log_message.js';
var LogSeverity;
(function (LogSeverity) {
LogSeverity[(LogSeverity['Pass'] = 0)] = 'Pass';
LogSeverity[(LogSeverity['Skip'] = 1)] = 'Skip';
LogSeverity[(LogSeverity['Warn'] = 2)] = 'Warn';
LogSeverity[(LogSeverity['ExpectFailed'] = 3)] = 'ExpectFailed';
LogSeverity[(LogSeverity['ValidationFailed'] = 4)] = 'ValidationFailed';
LogSeverity[(LogSeverity['ThrewException'] = 5)] = 'ThrewException';
})(LogSeverity || (LogSeverity = {}));
const kMaxLogStacks = 2;
const kMinSeverityForStack = LogSeverity.Warn;
/** Holds onto a LiveTestCaseResult owned by the Logger, and writes the results into it. */
export class TestCaseRecorder {
inSubCase = false;
subCaseStatus = LogSeverity.Pass;
finalCaseStatus = LogSeverity.Pass;
hideStacksBelowSeverity = kMinSeverityForStack;
startTime = -1;
logs = [];
logLinesAtCurrentSeverity = 0;
debugging = false;
/** Used to dedup log messages which have identical stacks. */
messagesForPreviouslySeenStacks = new Map();
constructor(result, debugging) {
this.result = result;
this.debugging = debugging;
}
start() {
assert(this.startTime < 0, 'TestCaseRecorder cannot be reused');
this.startTime = now();
}
finish() {
assert(this.startTime >= 0, 'finish() before start()');
const timeMilliseconds = now() - this.startTime;
// Round to next microsecond to avoid storing useless .xxxx00000000000002 in results.
this.result.timems = Math.ceil(timeMilliseconds * 1000) / 1000;
// Convert numeric enum back to string (but expose 'exception' as 'fail')
this.result.status =
this.finalCaseStatus === LogSeverity.Pass
? 'pass'
: this.finalCaseStatus === LogSeverity.Skip
? 'skip'
: this.finalCaseStatus === LogSeverity.Warn
? 'warn'
: 'fail'; // Everything else is an error
this.result.logs = this.logs;
}
beginSubCase() {
this.subCaseStatus = LogSeverity.Pass;
this.inSubCase = true;
}
endSubCase(expectedStatus) {
try {
if (expectedStatus === 'fail') {
if (this.subCaseStatus <= LogSeverity.Warn) {
throw new UnexpectedPassError();
} else {
this.subCaseStatus = LogSeverity.Pass;
}
}
} finally {
this.inSubCase = false;
if (this.subCaseStatus > this.finalCaseStatus) {
this.finalCaseStatus = this.subCaseStatus;
}
}
}
injectResult(injectedResult) {
Object.assign(this.result, injectedResult);
}
debug(ex) {
if (!this.debugging) return;
this.logImpl(LogSeverity.Pass, 'DEBUG', ex);
}
info(ex) {
this.logImpl(LogSeverity.Pass, 'INFO', ex);
}
skipped(ex) {
this.logImpl(LogSeverity.Skip, 'SKIP', ex);
}
warn(ex) {
this.logImpl(LogSeverity.Warn, 'WARN', ex);
}
expectationFailed(ex) {
this.logImpl(LogSeverity.ExpectFailed, 'EXPECTATION FAILED', ex);
}
validationFailed(ex) {
this.logImpl(LogSeverity.ValidationFailed, 'VALIDATION FAILED', ex);
}
threw(ex) {
if (ex instanceof SkipTestCase) {
this.skipped(ex);
return;
}
this.logImpl(LogSeverity.ThrewException, 'EXCEPTION', ex);
}
logImpl(level, name, baseException) {
assert(baseException instanceof Error, 'test threw a non-Error object');
globalTestConfig.testHeartbeatCallback();
const logMessage = new LogMessageWithStack(name, baseException);
// Final case status should be the "worst" of all log entries.
if (this.inSubCase) {
if (level > this.subCaseStatus) this.subCaseStatus = level;
} else {
if (level > this.finalCaseStatus) this.finalCaseStatus = level;
}
// setFirstLineOnly for all logs except `kMaxLogStacks` stacks at the highest severity
if (level > this.hideStacksBelowSeverity) {
this.logLinesAtCurrentSeverity = 0;
this.hideStacksBelowSeverity = level;
// Go back and setFirstLineOnly for everything of a lower log level
for (const log of this.logs) {
log.setStackHidden('below max severity');
}
}
if (level === this.hideStacksBelowSeverity) {
this.logLinesAtCurrentSeverity++;
} else if (level < kMinSeverityForStack) {
logMessage.setStackHidden('');
} else if (level < this.hideStacksBelowSeverity) {
logMessage.setStackHidden('below max severity');
}
if (this.logLinesAtCurrentSeverity > kMaxLogStacks) {
logMessage.setStackHidden(`only ${kMaxLogStacks} shown`);
}
this.logs.push(logMessage);
}
}

View file

@ -1,11 +1,9 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { comparePublicParamsPaths, Ordering } from './query/compare.js';
**/ import { assert } from '../util/util.js';
import { comparePublicParamsPaths, Ordering } from './query/compare.js';
import { kWildcard, kParamSeparator, kParamKVSeparator } from './query/separators.js';
// Consider adding more types here if needed
//
// TODO: This type isn't actually used to constrain what you're allowed to do in `.params()`, so
// it's not really serving its purpose. Figure out how to fix that?
export function paramKeyIsPublic(key) {
return !key.startsWith('_');
@ -28,3 +26,33 @@ export const badParamValueChars = new RegExp(
export function publicParamsEquals(x, y) {
return comparePublicParamsPaths(x, y) === Ordering.Equal;
}
function typeAssert() {}
{
{
typeAssert();
typeAssert();
typeAssert();
typeAssert();
typeAssert();
typeAssert();
typeAssert();
typeAssert();
typeAssert();
typeAssert();
typeAssert();
// Unexpected test results - hopefully okay to ignore these
typeAssert();
typeAssert();
}
}
export function mergeParams(a, b) {
for (const key of Object.keys(a)) {
assert(!(key in b), 'Duplicate key: ' + key);
}
return { ...a, ...b };
}

View file

@ -1,7 +1,7 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { paramKeyIsPublic } from '../params_utils.js';
import { assert, objectEquals } from '../util/util.js';
**/ import { assert, objectEquals } from '../../util/util.js';
import { paramKeyIsPublic } from '../params_utils.js';
export let Ordering;

View file

@ -0,0 +1,103 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { assert, sortObjectByKey } from '../../util/util.js';
// JSON can't represent various values and by default stores them as `null`.
// Instead, storing them as a magic string values in JSON.
const jsUndefinedMagicValue = '_undef_';
const jsNaNMagicValue = '_nan_';
const jsPositiveInfinityMagicValue = '_posinfinity_';
const jsNegativeInfinityMagicValue = '_neginfinity_';
// -0 needs to be handled separately, because -0 === +0 returns true. Not
// special casing +0/0, since it behaves intuitively. Assuming that if -0 is
// being used, the differentiation from +0 is desired.
const jsNegativeZeroMagicValue = '_negzero_';
// bigint values are not defined in JSON, so need to wrap them up as strings
const jsBigIntMagicPattern = /^(\d+)n$/;
const toStringMagicValue = new Map([
[undefined, jsUndefinedMagicValue],
[NaN, jsNaNMagicValue],
[Number.POSITIVE_INFINITY, jsPositiveInfinityMagicValue],
[Number.NEGATIVE_INFINITY, jsNegativeInfinityMagicValue],
// No -0 handling because it is special cased.
]);
const fromStringMagicValue = new Map([
[jsUndefinedMagicValue, undefined],
[jsNaNMagicValue, NaN],
[jsPositiveInfinityMagicValue, Number.POSITIVE_INFINITY],
[jsNegativeInfinityMagicValue, Number.NEGATIVE_INFINITY],
// -0 is handled in this direction because there is no comparison issue.
[jsNegativeZeroMagicValue, -0],
]);
function stringifyFilter(k, v) {
// Make sure no one actually uses a magic value as a parameter.
if (typeof v === 'string') {
assert(
!fromStringMagicValue.has(v),
`${v} is a magic value for stringification, so cannot be used`
);
assert(
v !== jsNegativeZeroMagicValue,
`${v} is a magic value for stringification, so cannot be used`
);
assert(
v.match(jsBigIntMagicPattern) === null,
`${v} matches bigint magic pattern for stringification, so cannot be used`
);
}
if (Object.is(v, -0)) {
return jsNegativeZeroMagicValue;
}
if (typeof v === 'bigint') {
return `${v}n`;
}
return toStringMagicValue.has(v) ? toStringMagicValue.get(v) : v;
}
export function stringifyParamValue(value) {
return JSON.stringify(value, stringifyFilter);
}
/**
* Like stringifyParamValue but sorts dictionaries by key, for hashing.
*/
export function stringifyParamValueUniquely(value) {
return JSON.stringify(value, (k, v) => {
if (typeof v === 'object' && v !== null) {
return sortObjectByKey(v);
}
return stringifyFilter(k, v);
});
}
// 'any' is part of the JSON.parse reviver interface, so cannot be avoided.
function parseParamValueReviver(k, v) {
if (fromStringMagicValue.has(v)) {
return fromStringMagicValue.get(v);
}
if (typeof v === 'string') {
const match = v.match(jsBigIntMagicPattern);
if (match !== null) {
// [0] is the entire match, and following entries are the capture groups
return BigInt(match[1]);
}
}
return v;
}
export function parseParamValue(s) {
return JSON.parse(s, parseParamValueReviver);
}

View file

@ -1,7 +1,7 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { badParamValueChars, paramKeyIsPublic } from '../params_utils.js';
import { assert } from '../util/util.js';
**/ import { assert } from '../../util/util.js';
import { badParamValueChars, paramKeyIsPublic } from '../params_utils.js';
import { parseParamValue } from './json_param_value.js';
import {
@ -17,7 +17,9 @@ export function parseQuery(s) {
try {
return parseQueryImpl(s);
} catch (ex) {
ex.message += '\n on: ' + s;
if (ex instanceof Error) {
ex.message += '\n on: ' + s;
}
throw ex;
}
}
@ -91,7 +93,7 @@ function parseQueryImpl(s) {
const params = {};
for (const paramPart of paramsParts) {
const [k, v] = parseSingleParam(paramPart);
assert(validQueryPart.test(k), 'param key names must match ' + validQueryPart);
assert(validQueryPart.test(k), `param key names must match ${validQueryPart}`);
params[k] = v;
}
if (paramsHasWildcard) {

View file

@ -0,0 +1,237 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { optionEnabled } from '../../runtime/helper/options.js';
import { assert, unreachable } from '../../util/util.js';
import { compareQueries, Ordering } from './compare.js';
import { encodeURIComponentSelectively } from './encode_selectively.js';
import { parseQuery } from './parseQuery.js';
import { kBigSeparator, kPathSeparator, kWildcard } from './separators.js';
import { stringifyPublicParams } from './stringify_params.js';
/**
* Represents a test query of some level.
*
* TestQuery types are immutable.
*/
/**
* A multi-file test query, like `s:*` or `s:a,b,*`.
*
* Immutable (makes copies of constructor args).
*/
export class TestQueryMultiFile {
level = 1;
isMultiFile = true;
constructor(suite, file) {
this.suite = suite;
this.filePathParts = [...file];
}
get depthInLevel() {
return this.filePathParts.length;
}
toString() {
return encodeURIComponentSelectively(this.toStringHelper().join(kBigSeparator));
}
toStringHelper() {
return [this.suite, [...this.filePathParts, kWildcard].join(kPathSeparator)];
}
}
/**
* A multi-test test query, like `s:f:*` or `s:f:a,b,*`.
*
* Immutable (makes copies of constructor args).
*/
export class TestQueryMultiTest extends TestQueryMultiFile {
level = 2;
isMultiFile = false;
isMultiTest = true;
constructor(suite, file, test) {
super(suite, file);
assert(file.length > 0, 'multi-test (or finer) query must have file-path');
this.testPathParts = [...test];
}
get depthInLevel() {
return this.testPathParts.length;
}
toStringHelper() {
return [
this.suite,
this.filePathParts.join(kPathSeparator),
[...this.testPathParts, kWildcard].join(kPathSeparator),
];
}
}
/**
* A multi-case test query, like `s:f:t:*` or `s:f:t:a,b,*`.
*
* Immutable (makes copies of constructor args), except for param values
* (which aren't normally supposed to change; they're marked readonly in TestParams).
*/
export class TestQueryMultiCase extends TestQueryMultiTest {
level = 3;
isMultiTest = false;
isMultiCase = true;
constructor(suite, file, test, params) {
super(suite, file, test);
assert(test.length > 0, 'multi-case (or finer) query must have test-path');
this.params = { ...params };
}
get depthInLevel() {
return Object.keys(this.params).length;
}
toStringHelper() {
return [
this.suite,
this.filePathParts.join(kPathSeparator),
this.testPathParts.join(kPathSeparator),
stringifyPublicParams(this.params, true),
];
}
}
/**
* A multi-case test query, like `s:f:t:` or `s:f:t:a=1,b=1`.
*
* Immutable (makes copies of constructor args).
*/
export class TestQuerySingleCase extends TestQueryMultiCase {
level = 4;
isMultiCase = false;
get depthInLevel() {
return 0;
}
toStringHelper() {
return [
this.suite,
this.filePathParts.join(kPathSeparator),
this.testPathParts.join(kPathSeparator),
stringifyPublicParams(this.params),
];
}
}
/**
* Parse raw expectations input into TestQueryWithExpectation[], filtering so that only
* expectations that are relevant for the provided query and wptURL.
*
* `rawExpectations` should be @type {{ query: string, expectation: Expectation }[]}
*
* The `rawExpectations` are parsed and validated that they are in the correct format.
* If `wptURL` is passed, the query string should be of the full path format such
* as `path/to/cts.https.html?worker=0&q=suite:test_path:test_name:foo=1;bar=2;*`.
* If `wptURL` is `undefined`, the query string should be only the query
* `suite:test_path:test_name:foo=1;bar=2;*`.
*/
export function parseExpectationsForTestQuery(
rawExpectations,
query,
wptURL
) {
if (!Array.isArray(rawExpectations)) {
unreachable('Expectations should be an array');
}
const expectations = [];
for (const entry of rawExpectations) {
assert(typeof entry === 'object');
const rawExpectation = entry;
assert(rawExpectation.query !== undefined, 'Expectation missing query string');
assert(rawExpectation.expectation !== undefined, 'Expectation missing expectation string');
let expectationQuery;
if (wptURL !== undefined) {
const expectationURL = new URL(`${wptURL.origin}/${entry.query}`);
if (expectationURL.pathname !== wptURL.pathname) {
continue;
}
assert(
expectationURL.pathname === wptURL.pathname,
`Invalid expectation path ${expectationURL.pathname}
Expectation should be of the form path/to/cts.https.html?worker=0&q=suite:test_path:test_name:foo=1;bar=2;...
`
);
const params = expectationURL.searchParams;
if (optionEnabled('worker', params) !== optionEnabled('worker', wptURL.searchParams)) {
continue;
}
const qs = params.getAll('q');
assert(qs.length === 1, 'currently, there must be exactly one ?q= in the expectation string');
expectationQuery = parseQuery(qs[0]);
} else {
expectationQuery = parseQuery(entry.query);
}
// Strip params from multicase expectations so that an expectation of foo=2;*
// is stored if the test query is bar=3;*
const queryForFilter =
expectationQuery instanceof TestQueryMultiCase
? new TestQueryMultiCase(
expectationQuery.suite,
expectationQuery.filePathParts,
expectationQuery.testPathParts,
{}
)
: expectationQuery;
if (compareQueries(query, queryForFilter) === Ordering.Unordered) {
continue;
}
switch (entry.expectation) {
case 'pass':
case 'skip':
case 'fail':
break;
default:
unreachable(`Invalid expectation ${entry.expectation}`);
}
expectations.push({
query: expectationQuery,
expectation: entry.expectation,
});
}
return expectations;
}
/**
* For display purposes only, produces a "relative" query string from parent to child.
* Used in the wpt runtime to reduce the verbosity of logs.
*/
export function relativeQueryString(parent, child) {
const ordering = compareQueries(parent, child);
if (ordering === Ordering.Equal) {
return '';
} else if (ordering === Ordering.StrictSuperset) {
const parentString = parent.toString();
assert(parentString.endsWith(kWildcard));
const childString = child.toString();
assert(
childString.startsWith(parentString.substring(0, parentString.length - 2)),
'impossible?: childString does not start with parentString[:-2]'
);
return childString.substring(parentString.length - 2);
} else {
unreachable(
`relativeQueryString arguments have invalid ordering ${ordering}:\n${parent}\n${child}`
);
}
}

View file

@ -0,0 +1,46 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { assert } from '../../util/util.js';
import { badParamValueChars, paramKeyIsPublic } from '../params_utils.js';
import { stringifyParamValue, stringifyParamValueUniquely } from './json_param_value.js';
import { kParamKVSeparator, kParamSeparator, kWildcard } from './separators.js';
export function stringifyPublicParams(p, addWildcard = false) {
const parts = Object.keys(p)
.filter(k => paramKeyIsPublic(k))
.map(k => stringifySingleParam(k, p[k]));
if (addWildcard) parts.push(kWildcard);
return parts.join(kParamSeparator);
}
/**
* An _approximately_ unique string representing a CaseParams value.
*/
export function stringifyPublicParamsUniquely(p) {
const keys = Object.keys(p).sort();
return keys
.filter(k => paramKeyIsPublic(k))
.map(k => stringifySingleParamUniquely(k, p[k]))
.join(kParamSeparator);
}
export function stringifySingleParam(k, v) {
return `${k}${kParamKVSeparator}${stringifySingleParamValue(v)}`;
}
function stringifySingleParamUniquely(k, v) {
return `${k}${kParamKVSeparator}${stringifyParamValueUniquely(v)}`;
}
function stringifySingleParamValue(v) {
const s = stringifyParamValue(v);
assert(
!badParamValueChars.test(s),
`JSON.stringified param value must not match ${badParamValueChars} - was ${s}`
);
return s;
}

View file

@ -3,17 +3,23 @@
**/ // Returns the stack trace of an Error, but without the extra boilerplate at the bottom
// (e.g. RunCaseSpecific, processTicksAndRejections, etc.), for logging.
export function extractImportantStackTrace(e) {
if (!e.stack) {
let stack = e.stack;
if (!stack) {
return '';
}
const lines = e.stack.split('\n');
const redundantMessage = 'Error: ' + e.message + '\n';
if (stack.startsWith(redundantMessage)) {
stack = stack.substring(redundantMessage.length);
}
const lines = stack.split('\n');
for (let i = lines.length - 1; i >= 0; --i) {
const line = lines[i];
if (line.indexOf('.spec.') !== -1) {
return lines.slice(0, i + 1).join('\n');
}
}
return e.stack;
return stack;
}
// *** Examples ***

View file

@ -0,0 +1,471 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { SkipTestCase, UnexpectedPassError } from '../framework/fixture.js';
import {
builderIterateCasesWithSubcases,
kUnitCaseParamsBuilder,
} from '../framework/params_builder.js';
import { globalTestConfig } from '../framework/test_config.js';
import { TestCaseRecorder } from '../internal/logging/test_case_recorder.js';
import { extractPublicParams, mergeParams } from '../internal/params_utils.js';
import { compareQueries, Ordering } from '../internal/query/compare.js';
import { TestQuerySingleCase } from '../internal/query/query.js';
import { kPathSeparator } from '../internal/query/separators.js';
import {
stringifyPublicParams,
stringifyPublicParamsUniquely,
} from '../internal/query/stringify_params.js';
import { validQueryPart } from '../internal/query/validQueryPart.js';
import { assert, unreachable } from '../util/util.js';
export function makeTestGroup(fixture) {
return new TestGroup(fixture);
}
// Interfaces for running tests
export function makeTestGroupForUnitTesting(fixture) {
return new TestGroup(fixture);
}
export class TestGroup {
seen = new Set();
tests = [];
constructor(fixture) {
this.fixture = fixture;
}
iterate() {
return this.tests;
}
checkName(name) {
assert(
// Shouldn't happen due to the rule above. Just makes sure that treating
// unencoded strings as encoded strings is OK.
name === decodeURIComponent(name),
`Not decodeURIComponent-idempotent: ${name} !== ${decodeURIComponent(name)}`
);
assert(!this.seen.has(name), `Duplicate test name: ${name}`);
this.seen.add(name);
}
test(name) {
const testCreationStack = new Error(`Test created: ${name}`);
this.checkName(name);
const parts = name.split(kPathSeparator);
for (const p of parts) {
assert(validQueryPart.test(p), `Invalid test name part ${p}; must match ${validQueryPart}`);
}
const test = new TestBuilder(parts, this.fixture, testCreationStack);
this.tests.push(test);
return test;
}
validate() {
for (const test of this.tests) {
test.validate();
}
}
}
class TestBuilder {
testCases = undefined;
batchSize = 0;
constructor(testPath, fixture, testCreationStack) {
this.testPath = testPath;
this.isUnimplemented = false;
this.fixture = fixture;
this.testCreationStack = testCreationStack;
}
desc(description) {
this.description = description.trim();
return this;
}
specURL(url) {
return this;
}
beforeAllSubcases(fn) {
assert(this.beforeFn === undefined);
this.beforeFn = fn;
return this;
}
fn(fn) {
// MAINTENANCE_TODO: add "TODO" if there's no description? (and make sure it only ends up on
// actual tests, not on test parents in the tree, which is what happens if you do it here, not
// sure why)
assert(this.testFn === undefined);
this.testFn = fn;
}
batch(b) {
this.batchSize = b;
return this;
}
unimplemented() {
assert(this.testFn === undefined);
this.description =
(this.description ? this.description + '\n\n' : '') + 'TODO: .unimplemented()';
this.isUnimplemented = true;
this.testFn = () => {
throw new SkipTestCase('test unimplemented');
};
}
validate() {
const testPathString = this.testPath.join(kPathSeparator);
assert(this.testFn !== undefined, () => {
let s = `Test is missing .fn(): ${testPathString}`;
if (this.testCreationStack.stack) {
s += `\n-> test created at:\n${this.testCreationStack.stack}`;
}
return s;
});
if (this.testCases === undefined) {
return;
}
const seen = new Set();
for (const [caseParams, subcases] of builderIterateCasesWithSubcases(this.testCases)) {
for (const subcaseParams of subcases ?? [{}]) {
const params = mergeParams(caseParams, subcaseParams);
assert(this.batchSize === 0 || !('batch__' in params));
// stringifyPublicParams also checks for invalid params values
const testcaseString = stringifyPublicParams(params);
// A (hopefully) unique representation of a params value.
const testcaseStringUnique = stringifyPublicParamsUniquely(params);
assert(
!seen.has(testcaseStringUnique),
`Duplicate public test case params for test ${testPathString}: ${testcaseString}`
);
seen.add(testcaseStringUnique);
}
}
}
params(cases) {
assert(this.testCases === undefined, 'test case is already parameterized');
if (cases instanceof Function) {
this.testCases = cases(kUnitCaseParamsBuilder);
} else {
this.testCases = cases;
}
return this;
}
paramsSimple(cases) {
assert(this.testCases === undefined, 'test case is already parameterized');
this.testCases = kUnitCaseParamsBuilder.combineWithParams(cases);
return this;
}
paramsSubcasesOnly(subcases) {
if (subcases instanceof Function) {
return this.params(subcases(kUnitCaseParamsBuilder.beginSubcases()));
} else {
return this.params(kUnitCaseParamsBuilder.beginSubcases().combineWithParams(subcases));
}
}
*iterate() {
assert(this.testFn !== undefined, 'No test function (.fn()) for test');
this.testCases ??= kUnitCaseParamsBuilder;
for (const [caseParams, subcases] of builderIterateCasesWithSubcases(this.testCases)) {
if (this.batchSize === 0 || subcases === undefined) {
yield new RunCaseSpecific(
this.testPath,
caseParams,
this.isUnimplemented,
subcases,
this.fixture,
this.testFn,
this.beforeFn,
this.testCreationStack
);
} else {
const subcaseArray = Array.from(subcases);
if (subcaseArray.length <= this.batchSize) {
yield new RunCaseSpecific(
this.testPath,
caseParams,
this.isUnimplemented,
subcaseArray,
this.fixture,
this.testFn,
this.beforeFn,
this.testCreationStack
);
} else {
for (let i = 0; i < subcaseArray.length; i = i + this.batchSize) {
yield new RunCaseSpecific(
this.testPath,
{ ...caseParams, batch__: i / this.batchSize },
this.isUnimplemented,
subcaseArray.slice(i, Math.min(subcaseArray.length, i + this.batchSize)),
this.fixture,
this.testFn,
this.beforeFn,
this.testCreationStack
);
}
}
}
}
}
}
class RunCaseSpecific {
constructor(
testPath,
params,
isUnimplemented,
subcases,
fixture,
fn,
beforeFn,
testCreationStack
) {
this.id = { test: testPath, params: extractPublicParams(params) };
this.isUnimplemented = isUnimplemented;
this.params = params;
this.subcases = subcases;
this.fixture = fixture;
this.fn = fn;
this.beforeFn = beforeFn;
this.testCreationStack = testCreationStack;
}
async runTest(rec, sharedState, params, throwSkip, expectedStatus) {
try {
rec.beginSubCase();
if (expectedStatus === 'skip') {
throw new SkipTestCase('Skipped by expectations');
}
const inst = new this.fixture(sharedState, rec, params);
try {
await inst.init();
await this.fn(inst);
} finally {
// Runs as long as constructor succeeded, even if initialization or the test failed.
await inst.finalize();
}
} catch (ex) {
// There was an exception from constructor, init, test, or finalize.
// An error from init or test may have been a SkipTestCase.
// An error from finalize may have been an eventualAsyncExpectation failure
// or unexpected validation/OOM error from the GPUDevice.
if (throwSkip && ex instanceof SkipTestCase) {
throw ex;
}
rec.threw(ex);
} finally {
try {
rec.endSubCase(expectedStatus);
} catch (ex) {
assert(ex instanceof UnexpectedPassError);
ex.message = `Testcase passed unexpectedly.`;
ex.stack = this.testCreationStack.stack;
rec.warn(ex);
}
}
}
async run(rec, selfQuery, expectations) {
const getExpectedStatus = selfQueryWithSubParams => {
let didSeeFail = false;
for (const exp of expectations) {
const ordering = compareQueries(exp.query, selfQueryWithSubParams);
if (ordering === Ordering.Unordered || ordering === Ordering.StrictSubset) {
continue;
}
switch (exp.expectation) {
// Skip takes precedence. If there is any expectation indicating a skip,
// signal it immediately.
case 'skip':
return 'skip';
case 'fail':
// Otherwise, indicate that we might expect a failure.
didSeeFail = true;
break;
default:
unreachable();
}
}
return didSeeFail ? 'fail' : 'pass';
};
const { testHeartbeatCallback, maxSubcasesInFlight } = globalTestConfig;
try {
rec.start();
const sharedState = this.fixture.MakeSharedState(rec, this.params);
try {
await sharedState.init();
if (this.beforeFn) {
await this.beforeFn(sharedState);
}
await sharedState.postInit();
testHeartbeatCallback();
let allPreviousSubcasesFinalizedPromise = Promise.resolve();
if (this.subcases) {
let totalCount = 0;
let skipCount = 0;
// If there are too many subcases in flight, starting the next subcase will register
// `resolvePromiseBlockingSubcase` and wait until `subcaseFinishedCallback` is called.
let subcasesInFlight = 0;
let resolvePromiseBlockingSubcase = undefined;
const subcaseFinishedCallback = () => {
subcasesInFlight -= 1;
// If there is any subcase waiting on a previous subcase to finish,
// unblock it now, and clear the resolve callback.
if (resolvePromiseBlockingSubcase) {
resolvePromiseBlockingSubcase();
resolvePromiseBlockingSubcase = undefined;
}
};
for (const subParams of this.subcases) {
// Make a recorder that will defer all calls until `allPreviousSubcasesFinalizedPromise`
// resolves. Waiting on `allPreviousSubcasesFinalizedPromise` ensures that
// logs from all the previous subcases have been flushed before flushing new logs.
const subcasePrefix = 'subcase: ' + stringifyPublicParams(subParams);
const subRec = new Proxy(rec, {
get: (target, k) => {
const prop = TestCaseRecorder.prototype[k];
if (typeof prop === 'function') {
testHeartbeatCallback();
return function (...args) {
void allPreviousSubcasesFinalizedPromise.then(() => {
// Prepend the subcase name to all error messages.
for (const arg of args) {
if (arg instanceof Error) {
try {
arg.message = subcasePrefix + '\n' + arg.message;
} catch {
// If that fails (e.g. on DOMException), try to put it in the stack:
let stack = subcasePrefix;
if (arg.stack) stack += '\n' + arg.stack;
try {
arg.stack = stack;
} catch {
// If that fails too, just silence it.
}
}
}
}
const rv = prop.apply(target, args);
// Because this proxy executes functions in a deferred manner,
// it should never be used for functions that need to return a value.
assert(rv === undefined);
});
};
}
return prop;
},
});
const params = mergeParams(this.params, subParams);
const subcaseQuery = new TestQuerySingleCase(
selfQuery.suite,
selfQuery.filePathParts,
selfQuery.testPathParts,
params
);
// Limit the maximum number of subcases in flight.
if (subcasesInFlight >= maxSubcasesInFlight) {
await new Promise(resolve => {
// There should only be one subcase waiting at a time.
assert(resolvePromiseBlockingSubcase === undefined);
resolvePromiseBlockingSubcase = resolve;
});
}
subcasesInFlight += 1;
// Runs async without waiting so that subsequent subcases can start.
// All finalization steps will be waited on at the end of the testcase.
const finalizePromise = this.runTest(
subRec,
sharedState,
params,
/* throwSkip */ true,
getExpectedStatus(subcaseQuery)
)
.then(() => {
subRec.info(new Error('OK'));
})
.catch(ex => {
if (ex instanceof SkipTestCase) {
// Convert SkipTestCase to info messages
ex.message = 'subcase skipped: ' + ex.message;
subRec.info(ex);
++skipCount;
} else {
// Since we are catching all error inside runTest(), this should never happen
subRec.threw(ex);
}
})
.finally(subcaseFinishedCallback);
allPreviousSubcasesFinalizedPromise = allPreviousSubcasesFinalizedPromise.then(
() => finalizePromise
);
++totalCount;
}
// Wait for all subcases to finalize and report their results.
await allPreviousSubcasesFinalizedPromise;
if (skipCount === totalCount) {
rec.skipped(new SkipTestCase('all subcases were skipped'));
}
} else {
await this.runTest(
rec,
sharedState,
this.params,
/* throwSkip */ false,
getExpectedStatus(selfQuery)
);
}
} finally {
testHeartbeatCallback();
// Runs as long as the shared state constructor succeeded, even if initialization or a test failed.
await sharedState.finalize();
testHeartbeatCallback();
}
} catch (ex) {
// There was an exception from sharedState/fixture constructor, init, beforeFn, or test.
// An error from beforeFn may have been SkipTestCase.
// An error from finalize may have been an eventualAsyncExpectation failure
// or unexpected validation/OOM error from the GPUDevice.
rec.threw(ex);
} finally {
rec.finish();
}
}
}

View file

@ -1,3 +1,3 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/
**/ export {};

View file

@ -1,18 +1,8 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ function _defineProperty(obj, key, value) {
if (key in obj) {
Object.defineProperty(obj, key, {
value: value,
enumerable: true,
configurable: true,
writable: true,
});
} else {
obj[key] = value;
}
return obj;
}
**/ import { globalTestConfig } from '../framework/test_config.js';
import { assert, now } from '../util/util.js';
import { compareQueries, Ordering } from './query/compare.js';
import {
TestQueryMultiCase,
@ -22,8 +12,7 @@ import {
} from './query/query.js';
import { kBigSeparator, kWildcard, kPathSeparator, kParamSeparator } from './query/separators.js';
import { stringifySingleParam } from './query/stringify_params.js';
import { assert } from './util/util.js';
import { StacklessError } from './util.js';
// `loadTreeForQuery()` loads a TestTree for a given queryToLoad.
// The resulting tree is a linked-list all the way from `suite:*` to queryToLoad,
@ -33,13 +22,13 @@ import { assert } from './util/util.js';
// A node is considered "collapsible" if none of the subqueriesToExpand is a StrictSubset
// of that node.
//
// In WebKit/Blink-style web_tests, an expectation file marks individual cts.html "variants" as
// "Failure", "Crash", etc.
// By passing in the list of expectations as the subqueriesToExpand, we can programmatically
// subdivide the cts.html "variants" list to be able to implement arbitrarily-fine suppressions
// (instead of having to suppress entire test files, which would lose a lot of coverage).
// In WebKit/Blink-style web_tests, an expectation file marks individual cts.https.html "variants
// as "Failure", "Crash", etc. By passing in the list of expectations as the subqueriesToExpand,
// we can programmatically subdivide the cts.https.html "variants" list to be able to implement
// arbitrarily-fine suppressions (instead of having to suppress entire test files, which would
// lose a lot of coverage).
//
// `iterateCollapsedQueries()` produces the list of queries for the variants list.
// `iterateCollapsedNodes()` produces the list of queries for the variants list.
//
// Though somewhat complicated, this system has important benefits:
// - Avoids having to suppress entire test files, which would cause large test coverage loss.
@ -50,13 +39,42 @@ import { assert } from './util/util.js';
// about expectation granularity.
export class TestTree {
constructor(root) {
_defineProperty(this, 'root', void 0);
/**
* The `queryToLoad` that this test tree was created for.
* Test trees are always rooted at `suite:*`, but they only contain nodes that fit
* within `forQuery`.
*
* This is used for `iterateCollapsedNodes` which only starts collapsing at the next
* `TestQueryLevel` after `forQuery`.
*/
constructor(forQuery, root) {
this.forQuery = forQuery;
TestTree.propagateCounts(root);
this.root = root;
assert(
root.query.level === 1 && root.query.depthInLevel === 0,
'TestTree root must be the root (suite:*)'
);
}
iterateCollapsedQueries() {
return TestTree.iterateSubtreeCollapsedQueries(this.root);
/**
* Iterate through the leaves of a version of the tree which has been pruned to exclude
* subtrees which:
* - are at a deeper `TestQueryLevel` than `this.forQuery`, and
* - were not a `Ordering.StrictSubset` of any of the `subqueriesToExpand` during tree creation.
*/
iterateCollapsedNodes({
includeIntermediateNodes = false,
includeEmptySubtrees = false,
alwaysExpandThroughLevel,
}) {
const expandThroughLevel = Math.max(this.forQuery.level, alwaysExpandThroughLevel);
return TestTree.iterateSubtreeNodes(this.root, {
includeIntermediateNodes,
includeEmptySubtrees,
expandThroughLevel,
});
}
iterateLeaves() {
@ -64,15 +82,14 @@ export class TestTree {
}
/**
* If a parent and its child are at different levels, then
* generally the parent has only one child, i.e.:
* Dissolve nodes which have only one child, e.g.:
* a,* { a,b,* { a,b:* { ... } } }
* Collapse that down into:
* collapses down into:
* a,* { a,b:* { ... } }
* which is less needlessly verbose when displaying the tree in the standalone runner.
*/
dissolveLevelBoundaries() {
const newRoot = dissolveLevelBoundaries(this.root);
dissolveSingleChildTrees() {
const newRoot = dissolveSingleChildTrees(this.root);
assert(newRoot === this.root);
}
@ -80,12 +97,24 @@ export class TestTree {
return TestTree.subtreeToString('(root)', this.root, '');
}
static *iterateSubtreeCollapsedQueries(subtree) {
static *iterateSubtreeNodes(subtree, opts) {
if (opts.includeIntermediateNodes) {
yield subtree;
}
for (const [, child] of subtree.children) {
if ('children' in child && !child.collapsible) {
yield* TestTree.iterateSubtreeCollapsedQueries(child);
if ('children' in child) {
// Is a subtree
const collapsible = child.collapsible && child.query.level > opts.expandThroughLevel;
if (child.children.size > 0 && !collapsible) {
yield* TestTree.iterateSubtreeNodes(child, opts);
} else if (child.children.size > 0 || opts.includeEmptySubtrees) {
// Don't yield empty subtrees (e.g. files with no tests) unless includeEmptySubtrees
yield child;
}
} else {
yield child.query;
// Is a leaf
yield child;
}
}
}
@ -100,9 +129,33 @@ export class TestTree {
}
}
/** Propagate the subtreeTODOs/subtreeTests state upward from leaves to parent nodes. */
static propagateCounts(subtree) {
subtree.subtreeCounts ??= { tests: 0, nodesWithTODO: 0 };
for (const [, child] of subtree.children) {
if ('children' in child) {
const counts = TestTree.propagateCounts(child);
subtree.subtreeCounts.tests += counts.tests;
subtree.subtreeCounts.nodesWithTODO += counts.nodesWithTODO;
}
}
return subtree.subtreeCounts;
}
/** Displays counts in the format `(Nodes with TODOs) / (Total test count)`. */
static countsToString(tree) {
if (tree.subtreeCounts) {
return `${tree.subtreeCounts.nodesWithTODO} / ${tree.subtreeCounts.tests}`;
} else {
return '';
}
}
static subtreeToString(name, tree, indent) {
const collapsible = 'run' in tree ? '>' : tree.collapsible ? '+' : '-';
let s = indent + `${collapsible} ${JSON.stringify(name)} => ${tree.query}`;
let s =
indent +
`${collapsible} ${TestTree.countsToString(tree)} ${JSON.stringify(name)} => ${tree.query}`;
if ('children' in tree) {
if (tree.description !== undefined) {
s += `\n${indent} | ${JSON.stringify(tree.description)}`;
@ -116,7 +169,8 @@ export class TestTree {
}
}
// TODO: Consider having subqueriesToExpand actually impact the depth-order of params in the tree.
// MAINTENANCE_TODO: Consider having subqueriesToExpand actually impact the depth-order of params
// in the tree.
export async function loadTreeForQuery(loader, queryToLoad, subqueriesToExpand) {
const suite = queryToLoad.suite;
const specs = await loader.listing(suite);
@ -140,13 +194,14 @@ export async function loadTreeForQuery(loader, queryToLoad, subqueriesToExpand)
// L3 = case-level, e.g. suite:a,b:c,d:
let foundCase = false;
// L0 is suite:*
const subtreeL0 = makeTreeForSuite(suite);
isCollapsible(subtreeL0.query); // mark seenSubqueriesToExpand
const subtreeL0 = makeTreeForSuite(suite, isCollapsible);
const imports_start = now();
const pEntriesWithImports = []; // Promise<entry with importedSpec>[]
for (const entry of specs) {
if (entry.file.length === 0 && 'readme' in entry) {
// Suite-level readme.
assert(subtreeL0.description === undefined);
subtreeL0.description = entry.readme.trim();
setSubtreeDescriptionAndCountTODOs(subtreeL0, entry.readme);
continue;
}
@ -159,6 +214,34 @@ export async function loadTreeForQuery(loader, queryToLoad, subqueriesToExpand)
}
}
// We're going to be fetching+importing a bunch of things, so do it in async.
const pEntryWithImport = (async () => {
if ('readme' in entry) {
return entry;
} else {
return {
...entry,
importedSpec: await loader.importSpecFile(queryToLoad.suite, entry.file),
};
}
})();
const kForceSerialImporting = false;
if (kForceSerialImporting) {
await pEntryWithImport;
}
pEntriesWithImports.push(pEntryWithImport);
}
const entriesWithImports = await Promise.all(pEntriesWithImports);
if (globalTestConfig.frameworkDebugLog) {
const imported_time = performance.now() - imports_start;
globalTestConfig.frameworkDebugLog(
`Imported importedSpecFiles[${entriesWithImports.length}] in ${imported_time}ms.`
);
}
for (const entry of entriesWithImports) {
if ('readme' in entry) {
// Entry is a README that is an ancestor or descendant of the query.
// (It's included for display in the standalone runner.)
@ -166,64 +249,102 @@ export async function loadTreeForQuery(loader, queryToLoad, subqueriesToExpand)
// readmeSubtree is suite:a,b,*
// (This is always going to dedup with a file path, if there are any test spec files under
// the directory that has the README).
const readmeSubtree = addSubtreeForDirPath(subtreeL0, entry.file);
const readmeSubtree = addSubtreeForDirPath(subtreeL0, entry.file, isCollapsible);
assert(readmeSubtree.description === undefined);
readmeSubtree.description = entry.readme.trim();
setSubtreeDescriptionAndCountTODOs(readmeSubtree, entry.readme);
continue;
}
// Entry is a spec file.
const spec = await loader.importSpecFile(queryToLoad.suite, entry.file);
const description = spec.description.trim();
const spec = entry.importedSpec;
// subtreeL1 is suite:a,b:*
const subtreeL1 = addSubtreeForFilePath(subtreeL0, entry.file, description, isCollapsible);
const subtreeL1 = addSubtreeForFilePath(subtreeL0, entry.file, isCollapsible);
// TODO: If tree generation gets too slow, avoid actually iterating the cases in a file
// if there's no need to (based on the subqueriesToExpand).
setSubtreeDescriptionAndCountTODOs(subtreeL1, spec.description);
let groupHasTests = false;
for (const t of spec.g.iterate()) {
groupHasTests = true;
{
const queryL3 = new TestQuerySingleCase(suite, entry.file, t.id.test, t.id.params);
const orderingL3 = compareQueries(queryL3, queryToLoad);
if (orderingL3 === Ordering.Unordered || orderingL3 === Ordering.StrictSuperset) {
// Case is not matched by this query.
const queryL2 = new TestQueryMultiCase(suite, entry.file, t.testPath, {});
const orderingL2 = compareQueries(queryL2, queryToLoad);
if (orderingL2 === Ordering.Unordered) {
// Test path is not matched by this query.
continue;
}
}
// subtreeL2 is suite:a,b:c,d:*
const subtreeL2 = addSubtreeForTestPath(subtreeL1, t.id.test, isCollapsible);
const subtreeL2 = addSubtreeForTestPath(
subtreeL1,
t.testPath,
t.testCreationStack,
isCollapsible
);
// Leaf for case is suite:a,b:c,d:x=1;y=2
addLeafForCase(subtreeL2, t, isCollapsible);
// This is 1 test. Set tests=1 then count TODOs.
subtreeL2.subtreeCounts ??= { tests: 1, nodesWithTODO: 0 };
if (t.description) setSubtreeDescriptionAndCountTODOs(subtreeL2, t.description);
foundCase = true;
// MAINTENANCE_TODO: If tree generation gets too slow, avoid actually iterating the cases in a
// file if there's no need to (based on the subqueriesToExpand).
for (const c of t.iterate()) {
{
const queryL3 = new TestQuerySingleCase(suite, entry.file, c.id.test, c.id.params);
const orderingL3 = compareQueries(queryL3, queryToLoad);
if (orderingL3 === Ordering.Unordered || orderingL3 === Ordering.StrictSuperset) {
// Case is not matched by this query.
continue;
}
}
// Leaf for case is suite:a,b:c,d:x=1;y=2
addLeafForCase(subtreeL2, c, isCollapsible);
foundCase = true;
}
}
if (!groupHasTests && !subtreeL1.subtreeCounts) {
throw new StacklessError(
`${subtreeL1.query} has no tests - it must have "TODO" in its description`
);
}
}
for (const [i, sq] of subqueriesToExpandEntries) {
const seen = seenSubqueriesToExpand[i];
assert(
seen,
`subqueriesToExpand entry did not match anything \
(can happen due to overlap with another subquery): ${sq.toString()}`
);
const subquerySeen = seenSubqueriesToExpand[i];
if (!subquerySeen) {
throw new StacklessError(
`subqueriesToExpand entry did not match anything \
(could be wrong, or could be redundant with a previous subquery):\n ${sq.toString()}`
);
}
}
assert(foundCase, 'Query does not match any cases');
assert(foundCase, `Query \`${queryToLoad.toString()}\` does not match any cases`);
return new TestTree(subtreeL0);
return new TestTree(queryToLoad, subtreeL0);
}
function makeTreeForSuite(suite) {
function setSubtreeDescriptionAndCountTODOs(subtree, description) {
assert(subtree.description === undefined);
subtree.description = description.trim();
subtree.subtreeCounts ??= { tests: 0, nodesWithTODO: 0 };
if (subtree.description.indexOf('TODO') !== -1) {
subtree.subtreeCounts.nodesWithTODO++;
}
}
function makeTreeForSuite(suite, isCollapsible) {
const query = new TestQueryMultiFile(suite, []);
return {
readableRelativeName: suite + kBigSeparator,
query: new TestQueryMultiFile(suite, []),
query,
children: new Map(),
collapsible: false,
collapsible: isCollapsible(query),
};
}
function addSubtreeForDirPath(tree, file) {
function addSubtreeForDirPath(tree, file, isCollapsible) {
const subqueryFile = [];
// To start, tree is suite:*
// This loop goes from that -> suite:a,* -> suite:a,b,*
@ -231,16 +352,20 @@ function addSubtreeForDirPath(tree, file) {
subqueryFile.push(part);
tree = getOrInsertSubtree(part, tree, () => {
const query = new TestQueryMultiFile(tree.query.suite, subqueryFile);
return { readableRelativeName: part + kPathSeparator + kWildcard, query, collapsible: false };
return {
readableRelativeName: part + kPathSeparator + kWildcard,
query,
collapsible: isCollapsible(query),
};
});
}
return tree;
}
function addSubtreeForFilePath(tree, file, description, checkCollapsible) {
function addSubtreeForFilePath(tree, file, isCollapsible) {
// To start, tree is suite:*
// This goes from that -> suite:a,* -> suite:a,b,*
tree = addSubtreeForDirPath(tree, file);
tree = addSubtreeForDirPath(tree, file, isCollapsible);
// This goes from that -> suite:a,b:*
const subtree = getOrInsertSubtree('', tree, () => {
const query = new TestQueryMultiTest(tree.query.suite, tree.query.filePathParts, []);
@ -248,14 +373,13 @@ function addSubtreeForFilePath(tree, file, description, checkCollapsible) {
return {
readableRelativeName: file[file.length - 1] + kBigSeparator + kWildcard,
query,
description,
collapsible: checkCollapsible(query),
collapsible: isCollapsible(query),
};
});
return subtree;
}
function addSubtreeForTestPath(tree, test, isCollapsible) {
function addSubtreeForTestPath(tree, test, testCreationStack, isCollapsible) {
const subqueryTest = [];
// To start, tree is suite:a,b:*
// This loop goes from that -> suite:a,b:c,* -> suite:a,b:c,d,*
@ -289,6 +413,7 @@ function addSubtreeForTestPath(tree, test, isCollapsible) {
readableRelativeName: subqueryTest[subqueryTest.length - 1] + kBigSeparator + kWildcard,
kWildcard,
query,
testCreationStack,
collapsible: isCollapsible(query),
};
});
@ -347,32 +472,34 @@ function getOrInsertSubtree(key, parent, createSubtree) {
}
function insertLeaf(parent, query, t) {
const key = '';
const leaf = {
readableRelativeName: readableNameForCase(query),
query,
run: rec => t.run(rec),
run: (rec, expectations) => t.run(rec, query, expectations || []),
isUnimplemented: t.isUnimplemented,
};
assert(!parent.children.has(key));
// This is a leaf (e.g. s:f:t:x=1;* -> s:f:t:x=1). The key is always ''.
const key = '';
assert(!parent.children.has(key), `Duplicate testcase: ${query}`);
parent.children.set(key, leaf);
}
function dissolveLevelBoundaries(tree) {
function dissolveSingleChildTrees(tree) {
if ('children' in tree) {
if (tree.children.size === 1 && tree.description === undefined) {
const shouldDissolveThisTree =
tree.children.size === 1 && tree.query.depthInLevel !== 0 && tree.description === undefined;
if (shouldDissolveThisTree) {
// Loops exactly once
for (const [, child] of tree.children) {
if (child.query.level > tree.query.level) {
const newtree = dissolveLevelBoundaries(child);
return newtree;
}
// Recurse on child
return dissolveSingleChildTrees(child);
}
}
for (const [k, child] of tree.children) {
const newChild = dissolveLevelBoundaries(child);
// Recurse on each child
const newChild = dissolveSingleChildTrees(child);
if (newChild !== child) {
tree.children.set(k, newChild);
}

View file

@ -0,0 +1,11 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ /**
* Error without a stack, which can be used to fatally exit from `tool/` scripts with a
* user-friendly message (and no confusing stack).
*/ export class StacklessError extends Error {
constructor(message) {
super(message);
this.stack = undefined;
}
}

View file

@ -0,0 +1,3 @@
// AUTO-GENERATED - DO NOT EDIT. See tools/gen_version.
export const version = '480edec387e8cd5bf5934680050c59a3f7a01438';

View file

@ -1,7 +1,18 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ const url = new URL(window.location.toString());
export function optionEnabled(opt) {
const val = url.searchParams.get(opt);
**/ let windowURL = undefined;
function getWindowURL() {
if (windowURL === undefined) {
windowURL = new URL(window.location.toString());
}
return windowURL;
}
export function optionEnabled(opt, searchParams = getWindowURL().searchParams) {
const val = searchParams.get(opt);
return val !== null && val !== '0';
}
export function optionString(opt, searchParams = getWindowURL().searchParams) {
return searchParams.get(opt) || '';
}

View file

@ -1,26 +1,36 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { DefaultTestFileLoader } from '../../framework/file_loader.js';
import { Logger } from '../../framework/logging/logger.js';
import { parseQuery } from '../../framework/query/parseQuery.js';
import { assert } from '../../framework/util/util.js';
**/ import { setBaseResourcePath } from '../../framework/resources.js';
import { DefaultTestFileLoader } from '../../internal/file_loader.js';
import { Logger } from '../../internal/logging/logger.js';
import { parseQuery } from '../../internal/query/parseQuery.js';
// should be DedicatedWorkerGlobalScope
import { setDefaultRequestAdapterOptions } from '../../util/navigator_gpu.js';
import { assert } from '../../util/util.js';
// Should be DedicatedWorkerGlobalScope, but importing lib "webworker" conflicts with lib "dom".
const loader = new DefaultTestFileLoader();
setBaseResourcePath('../../../resources');
self.onmessage = async ev => {
const query = ev.data.query;
const expectations = ev.data.expectations;
const defaultRequestAdapterOptions = ev.data.defaultRequestAdapterOptions;
const debug = ev.data.debug;
const log = new Logger(debug);
setDefaultRequestAdapterOptions(defaultRequestAdapterOptions);
Logger.globalDebugMode = debug;
const log = new Logger();
const testcases = Array.from(await loader.loadCases(parseQuery(query)));
assert(testcases.length === 1, 'worker query resulted in != 1 cases');
const testcase = testcases[0];
const [rec, result] = log.record(testcase.query.toString());
await testcase.run(rec);
await testcase.run(rec, expectations);
self.postMessage({ query, result });
};

View file

@ -1,25 +1,13 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ function _defineProperty(obj, key, value) {
if (key in obj) {
Object.defineProperty(obj, key, {
value: value,
enumerable: true,
configurable: true,
writable: true,
});
} else {
obj[key] = value;
}
return obj;
}
import { LogMessageWithStack } from '../../framework/logging/log_message.js';
**/ import { LogMessageWithStack } from '../../internal/logging/log_message.js';
import { getDefaultRequestAdapterOptions } from '../../util/navigator_gpu.js';
export class TestWorker {
resolvers = new Map();
constructor(debug) {
_defineProperty(this, 'debug', void 0);
_defineProperty(this, 'worker', void 0);
_defineProperty(this, 'resolvers', new Map());
this.debug = debug;
const selfPath = import.meta.url;
@ -36,13 +24,18 @@ export class TestWorker {
}
this.resolvers.get(query)(result);
// TODO(kainino0x): update the Logger with this result (or don't have a logger and update the
// entire results JSON somehow at some point).
// MAINTENANCE_TODO(kainino0x): update the Logger with this result (or don't have a logger and
// update the entire results JSON somehow at some point).
};
}
async run(rec, query) {
this.worker.postMessage({ query, debug: this.debug });
async run(rec, query, expectations = []) {
this.worker.postMessage({
query,
expectations,
debug: this.debug,
defaultRequestAdapterOptions: getDefaultRequestAdapterOptions(),
});
const workerResult = await new Promise(resolve => {
this.resolvers.set(query, resolve);
});

View file

@ -1,58 +1,73 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { DefaultTestFileLoader } from '../framework/file_loader.js';
import { Logger } from '../framework/logging/logger.js';
import { parseQuery } from '../framework/query/parseQuery.js';
import { AsyncMutex } from '../framework/util/async_mutex.js';
import { assert } from '../framework/util/util.js';
**/ // Implements the wpt-embedded test runner (see also: wpt/cts.https.html).
import { globalTestConfig } from '../framework/test_config.js';
import { DefaultTestFileLoader } from '../internal/file_loader.js';
import { prettyPrintLog } from '../internal/logging/log_message.js';
import { Logger } from '../internal/logging/logger.js';
import { parseQuery } from '../internal/query/parseQuery.js';
import { parseExpectationsForTestQuery, relativeQueryString } from '../internal/query/query.js';
import { assert } from '../util/util.js';
import { optionEnabled } from './helper/options.js';
import { TestWorker } from './helper/test_worker.js';
(async () => {
// testharness.js API (https://web-platform-tests.org/writing-tests/testharness-api.html)
setup({
// It's convenient for us to asynchronously add tests to the page. Prevent done() from being
// called implicitly when the page is finished loading.
explicit_done: true,
});
void (async () => {
const workerEnabled = optionEnabled('worker');
const worker = workerEnabled ? new TestWorker(false) : undefined;
globalTestConfig.unrollConstEvalLoops = optionEnabled('unroll_const_eval_loops');
const failOnWarnings =
typeof shouldWebGPUCTSFailOnWarnings !== 'undefined' && (await shouldWebGPUCTSFailOnWarnings);
const loader = new DefaultTestFileLoader();
const qs = new URLSearchParams(window.location.search).getAll('q');
assert(qs.length === 1, 'currently, there must be exactly one ?q=');
const testcases = await loader.loadCases(parseQuery(qs[0]));
const filterQuery = parseQuery(qs[0]);
const testcases = await loader.loadCases(filterQuery);
await addWPTTests(testcases);
})();
const expectations =
typeof loadWebGPUExpectations !== 'undefined'
? parseExpectationsForTestQuery(
await loadWebGPUExpectations,
filterQuery,
new URL(window.location.href)
)
: [];
// Note: `async_test`s must ALL be added within the same task. This function *must not* be async.
function addWPTTests(testcases) {
const worker = optionEnabled('worker') ? new TestWorker(false) : undefined;
const log = new Logger(false);
const mutex = new AsyncMutex();
const running = [];
const log = new Logger();
for (const testcase of testcases) {
const name = testcase.query.toString();
const wpt_fn = function () {
const p = mutex.with(async () => {
const [rec, res] = log.record(name);
if (worker) {
await worker.run(rec, name);
} else {
await testcase.run(rec);
}
// For brevity, display the case name "relative" to the ?q= path.
const shortName = relativeQueryString(filterQuery, testcase.query) || '(case)';
this.step(() => {
// Unfortunately, it seems not possible to surface any logs for warn/skip.
if (res.status === 'fail') {
throw (res.logs || []).map(s => s.toJSON()).join('\n\n');
}
});
this.done();
});
const wpt_fn = async () => {
const [rec, res] = log.record(name);
if (worker) {
await worker.run(rec, name, expectations);
} else {
await testcase.run(rec, expectations);
}
running.push(p);
return p;
// Unfortunately, it seems not possible to surface any logs for warn/skip.
if (res.status === 'fail' || (res.status === 'warn' && failOnWarnings)) {
const logs = (res.logs ?? []).map(prettyPrintLog);
assert_unreached('\n' + logs.join('\n') + '\n');
}
};
async_test(wpt_fn, name);
promise_test(wpt_fn, shortName);
}
return Promise.all(running).then(() => log);
}
done();
})();

View file

@ -2,8 +2,13 @@
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { resolveOnTimeout } from './util.js';
/**
* Attempts to trigger JavaScript garbage collection, either using explicit methods if exposed
* (may be available in testing environments with special browser runtime flags set), or using
* some weird tricks to incur GC pressure. Adopted from the WebGL CTS.
*/
export async function attemptGarbageCollection() {
const w = self;
const w = globalThis;
if (w.GCController) {
w.GCController.collect();
return;
@ -19,8 +24,9 @@ export async function attemptGarbageCollection() {
.getInterface(Components.interfaces.nsIDOMWindowUtils)
.garbageCollect();
return;
} catch (e) {}
} catch (e) {
// ignore any failure
}
if (w.gc) {
w.gc();
return;
@ -34,7 +40,9 @@ export async function attemptGarbageCollection() {
let i;
function gcRec(n) {
if (n < 1) return;
let temp = { i: 'ab' + i + i / 100000 };
temp = temp + 'foo';
temp; // dummy use of unused variable
gcRec(n - 1);

View file

@ -0,0 +1,63 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/
/**
* The interface used for formatting strings with color metadata.
*
* Currently Colors will use the 'ansi-colors' module if it can be loaded.
* If it cannot be loaded, then the Colors implementation is a straight pass-through.
*
* Colors may also be a no-op if the current environment does not support colors.
*/
export let Colors;
try {
Colors = require('ansi-colors');
} catch {
const passthrough = s => s;
passthrough.enabled = false;
passthrough.reset = passthrough;
passthrough.bold = passthrough;
passthrough.dim = passthrough;
passthrough.italic = passthrough;
passthrough.underline = passthrough;
passthrough.inverse = passthrough;
passthrough.hidden = passthrough;
passthrough.strikethrough = passthrough;
passthrough.black = passthrough;
passthrough.red = passthrough;
passthrough.green = passthrough;
passthrough.yellow = passthrough;
passthrough.blue = passthrough;
passthrough.magenta = passthrough;
passthrough.cyan = passthrough;
passthrough.white = passthrough;
passthrough.gray = passthrough;
passthrough.grey = passthrough;
passthrough.blackBright = passthrough;
passthrough.redBright = passthrough;
passthrough.greenBright = passthrough;
passthrough.yellowBright = passthrough;
passthrough.blueBright = passthrough;
passthrough.magentaBright = passthrough;
passthrough.cyanBright = passthrough;
passthrough.whiteBright = passthrough;
passthrough.bgBlack = passthrough;
passthrough.bgRed = passthrough;
passthrough.bgGreen = passthrough;
passthrough.bgYellow = passthrough;
passthrough.bgBlue = passthrough;
passthrough.bgMagenta = passthrough;
passthrough.bgCyan = passthrough;
passthrough.bgWhite = passthrough;
passthrough.bgBlackBright = passthrough;
passthrough.bgRedBright = passthrough;
passthrough.bgGreenBright = passthrough;
passthrough.bgYellowBright = passthrough;
passthrough.bgBlueBright = passthrough;
passthrough.bgMagentaBright = passthrough;
passthrough.bgCyanBright = passthrough;
passthrough.bgWhiteBright = passthrough;
Colors = passthrough;
}

View file

@ -0,0 +1,29 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/
export function keysOf(obj) {
return Object.keys(obj);
}
export function numericKeysOf(obj) {
return Object.keys(obj).map(n => Number(n));
}
/**
* Creates an info lookup object from a more nicely-formatted table. See below for examples.
*
* Note: Using `as const` on the arguments to this function is necessary to infer the correct type.
*/
export function makeTable(members, defaults, table) {
const result = {};
for (const [k, v] of Object.entries(table)) {
const item = {};
for (let i = 0; i < members.length; ++i) {
item[members[i]] = v[i] ?? defaults[i];
}
result[k] = item;
}
return result;
}

View file

@ -0,0 +1,79 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/
import { ErrorWithExtra, assert } from './util.js';
/**
* Finds and returns the `navigator.gpu` object (or equivalent, for non-browser implementations).
* Throws an exception if not found.
*/
function defaultGPUProvider() {
assert(
typeof navigator !== 'undefined' && navigator.gpu !== undefined,
'No WebGPU implementation found'
);
return navigator.gpu;
}
/**
* GPUProvider is a function that creates and returns a new GPU instance.
* May throw an exception if a GPU cannot be created.
*/
let gpuProvider = defaultGPUProvider;
/**
* Sets the function to create and return a new GPU instance.
*/
export function setGPUProvider(provider) {
assert(impl === undefined, 'setGPUProvider() should not be after getGPU()');
gpuProvider = provider;
}
let impl = undefined;
let defaultRequestAdapterOptions;
export function setDefaultRequestAdapterOptions(options) {
if (impl) {
throw new Error('must call setDefaultRequestAdapterOptions before getGPU');
}
defaultRequestAdapterOptions = { ...options };
}
export function getDefaultRequestAdapterOptions() {
return defaultRequestAdapterOptions;
}
/**
* Finds and returns the `navigator.gpu` object (or equivalent, for non-browser implementations).
* Throws an exception if not found.
*/
export function getGPU(recorder) {
if (impl) {
return impl;
}
impl = gpuProvider();
if (defaultRequestAdapterOptions) {
const oldFn = impl.requestAdapter;
impl.requestAdapter = function (options) {
const promise = oldFn.call(this, { ...defaultRequestAdapterOptions, ...options });
if (recorder) {
void promise.then(async adapter => {
if (adapter) {
const info = await adapter.requestAdapterInfo();
const infoString = `Adapter: ${info.vendor} / ${info.architecture} / ${info.device}`;
recorder.debug(new ErrorWithExtra(infoString, () => ({ adapterInfo: info })));
}
});
}
return promise;
};
}
return impl;
}

View file

@ -0,0 +1,142 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { assert } from './util.js'; // The state of the preprocessor is a stack of States.
var State;
// Have already seen a passing condition; now skipping the rest
// The transitions in the state space are the following preprocessor directives:
// - Sibling elif
// - Sibling else
// - Sibling endif
// - Child if
(function (State) {
State[(State['Seeking'] = 0)] = 'Seeking';
State[(State['Passing'] = 1)] = 'Passing';
State[(State['Skipping'] = 2)] = 'Skipping';
})(State || (State = {}));
class Directive {
constructor(depth) {
this.depth = depth;
}
checkDepth(stack) {
assert(
stack.length === this.depth,
`Number of "$"s must match nesting depth, currently ${stack.length} (e.g. $if $$if $$endif $endif)`
);
}
}
class If extends Directive {
constructor(depth, predicate) {
super(depth);
this.predicate = predicate;
}
applyTo(stack) {
this.checkDepth(stack);
const parentState = stack[stack.length - 1].state;
stack.push({
allowsFollowingElse: true,
state:
parentState !== State.Passing
? State.Skipping
: this.predicate
? State.Passing
: State.Seeking,
});
}
}
class ElseIf extends If {
applyTo(stack) {
assert(stack.length >= 1);
const { allowsFollowingElse, state: siblingState } = stack.pop();
this.checkDepth(stack);
assert(allowsFollowingElse, 'pp.elif after pp.else');
if (siblingState !== State.Seeking) {
stack.push({ allowsFollowingElse: true, state: State.Skipping });
} else {
super.applyTo(stack);
}
}
}
class Else extends Directive {
applyTo(stack) {
assert(stack.length >= 1);
const { allowsFollowingElse, state: siblingState } = stack.pop();
this.checkDepth(stack);
assert(allowsFollowingElse, 'pp.else after pp.else');
stack.push({
allowsFollowingElse: false,
state: siblingState === State.Seeking ? State.Passing : State.Skipping,
});
}
}
class EndIf extends Directive {
applyTo(stack) {
stack.pop();
this.checkDepth(stack);
}
}
/**
* A simple template-based, non-line-based preprocessor implementing if/elif/else/endif.
*
* @example
* ```
* const shader = pp`
* ${pp._if(expr)}
* const x: ${type} = ${value};
* ${pp._elif(expr)}
* ${pp.__if(expr)}
* ...
* ${pp.__else}
* ...
* ${pp.__endif}
* ${pp._endif}`;
* ```
*
* @param strings - The array of constant string chunks of the template string.
* @param ...values - The array of interpolated `${}` values within the template string.
*/
export function pp(strings, ...values) {
let result = '';
const stateStack = [{ allowsFollowingElse: false, state: State.Passing }];
for (let i = 0; i < values.length; ++i) {
const passing = stateStack[stateStack.length - 1].state === State.Passing;
if (passing) {
result += strings[i];
}
const value = values[i];
if (value instanceof Directive) {
value.applyTo(stateStack);
} else {
if (passing) {
result += value;
}
}
}
assert(stateStack.length === 1, 'Unterminated preprocessor condition at end of file');
result += strings[values.length];
return result;
}
pp._if = predicate => new If(1, predicate);
pp._elif = predicate => new ElseIf(1, predicate);
pp._else = new Else(1);
pp._endif = new EndIf(1);
pp.__if = predicate => new If(2, predicate);
pp.__elif = predicate => new ElseIf(2, predicate);
pp.__else = new Else(2);
pp.__endif = new EndIf(2);
pp.___if = predicate => new If(3, predicate);
pp.___elif = predicate => new ElseIf(3, predicate);
pp.___else = new Else(3);
pp.___endif = new EndIf(3);
// Add more if needed.

View file

@ -0,0 +1,7 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/
/**
* Equivalent of `setTimeout`, but redirects to WPT's `step_timeout` when it is defined.
*/
export const timeout = typeof step_timeout !== 'undefined' ? step_timeout : setTimeout;

View file

@ -0,0 +1,13 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/
export function assertTypeTrue() {}
/**
* Computes the intersection of a set of types, given the union of those types.
*
* From: https://stackoverflow.com/a/56375136
*/
// K exhausted

View file

@ -0,0 +1,309 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { Float16Array } from '../../external/petamoriken/float16/float16.js';
import { SkipTestCase } from '../framework/fixture.js';
import { globalTestConfig } from '../framework/test_config.js';
import { Logger } from '../internal/logging/logger.js';
import { keysOf } from './data_tables.js';
import { timeout } from './timeout.js';
/**
* Error with arbitrary `extra` data attached, for debugging.
* The extra data is omitted if not running the test in debug mode (`?debug=1`).
*/
export class ErrorWithExtra extends Error {
/**
* `extra` function is only called if in debug mode.
* If an `ErrorWithExtra` is passed, its message is used and its extras are passed through.
*/
constructor(baseOrMessage, newExtra) {
const message = typeof baseOrMessage === 'string' ? baseOrMessage : baseOrMessage.message;
super(message);
const oldExtras = baseOrMessage instanceof ErrorWithExtra ? baseOrMessage.extra : {};
this.extra = Logger.globalDebugMode
? { ...oldExtras, ...newExtra() }
: { omitted: 'pass ?debug=1' };
}
}
/**
* Asserts `condition` is true. Otherwise, throws an `Error` with the provided message.
*/
export function assert(condition, msg) {
if (!condition) {
throw new Error(msg && (typeof msg === 'string' ? msg : msg()));
}
}
/** If the argument is an Error, throw it. Otherwise, pass it back. */
export function assertOK(value) {
if (value instanceof Error) {
throw value;
}
return value;
}
/**
* Resolves if the provided promise rejects; rejects if it does not.
*/
export async function assertReject(p, msg) {
try {
await p;
unreachable(msg);
} catch (ex) {
// Assertion OK
}
}
/**
* Assert this code is unreachable. Unconditionally throws an `Error`.
*/
export function unreachable(msg) {
throw new Error(msg);
}
/**
* Throw a `SkipTestCase` exception, which skips the test case.
*/
export function skipTestCase(msg) {
throw new SkipTestCase(msg);
}
/**
* The `performance` interface.
* It is available in all browsers, but it is not in scope by default in Node.
*/
const perf = typeof performance !== 'undefined' ? performance : require('perf_hooks').performance;
/**
* Calls the appropriate `performance.now()` depending on whether running in a browser or Node.
*/
export function now() {
return perf.now();
}
/**
* Returns a promise which resolves after the specified time.
*/
export function resolveOnTimeout(ms) {
return new Promise(resolve => {
timeout(() => {
resolve();
}, ms);
});
}
export class PromiseTimeoutError extends Error {}
/**
* Returns a promise which rejects after the specified time.
*/
export function rejectOnTimeout(ms, msg) {
return new Promise((_resolve, reject) => {
timeout(() => {
reject(new PromiseTimeoutError(msg));
}, ms);
});
}
/**
* Takes a promise `p`, and returns a new one which rejects if `p` takes too long,
* and otherwise passes the result through.
*/
export function raceWithRejectOnTimeout(p, ms, msg) {
if (globalTestConfig.noRaceWithRejectOnTimeout) {
return p;
}
// Setup a promise that will reject after `ms` milliseconds. We cancel this timeout when
// `p` is finalized, so the JavaScript VM doesn't hang around waiting for the timer to
// complete, once the test runner has finished executing the tests.
const timeoutPromise = new Promise((_resolve, reject) => {
const handle = timeout(() => {
reject(new PromiseTimeoutError(msg));
}, ms);
p = p.finally(() => clearTimeout(handle));
});
return Promise.race([p, timeoutPromise]);
}
/**
* Takes a promise `p` and returns a new one which rejects if `p` resolves or rejects,
* and otherwise resolves after the specified time.
*/
export function assertNotSettledWithinTime(p, ms, msg) {
// Rejects regardless of whether p resolves or rejects.
const rejectWhenSettled = p.then(() => Promise.reject(new Error(msg)));
// Resolves after `ms` milliseconds.
const timeoutPromise = new Promise(resolve => {
const handle = timeout(() => {
resolve(undefined);
}, ms);
p.finally(() => clearTimeout(handle));
});
return Promise.race([rejectWhenSettled, timeoutPromise]);
}
/**
* Returns a `Promise.reject()`, but also registers a dummy `.catch()` handler so it doesn't count
* as an uncaught promise rejection in the runtime.
*/
export function rejectWithoutUncaught(err) {
const p = Promise.reject(err);
// Suppress uncaught promise rejection.
p.catch(() => {});
return p;
}
/**
* Makes a copy of a JS `object`, with the keys reordered into sorted order.
*/
export function sortObjectByKey(v) {
const sortedObject = {};
for (const k of Object.keys(v).sort()) {
sortedObject[k] = v[k];
}
return sortedObject;
}
/**
* Determines whether two JS values are equal, recursing into objects and arrays.
* NaN is treated specially, such that `objectEquals(NaN, NaN)`.
*/
export function objectEquals(x, y) {
if (typeof x !== 'object' || typeof y !== 'object') {
if (typeof x === 'number' && typeof y === 'number' && Number.isNaN(x) && Number.isNaN(y)) {
return true;
}
return x === y;
}
if (x === null || y === null) return x === y;
if (x.constructor !== y.constructor) return false;
if (x instanceof Function) return x === y;
if (x instanceof RegExp) return x === y;
if (x === y || x.valueOf() === y.valueOf()) return true;
if (Array.isArray(x) && Array.isArray(y) && x.length !== y.length) return false;
if (x instanceof Date) return false;
if (!(x instanceof Object)) return false;
if (!(y instanceof Object)) return false;
const x1 = x;
const y1 = y;
const p = Object.keys(x);
return Object.keys(y).every(i => p.indexOf(i) !== -1) && p.every(i => objectEquals(x1[i], y1[i]));
}
/**
* Generates a range of values `fn(0)..fn(n-1)`.
*/
export function range(n, fn) {
return [...new Array(n)].map((_, i) => fn(i));
}
/**
* Generates a range of values `fn(0)..fn(n-1)`.
*/
export function* iterRange(n, fn) {
for (let i = 0; i < n; ++i) {
yield fn(i);
}
}
/** Creates a (reusable) iterable object that maps `f` over `xs`, lazily. */
export function mapLazy(xs, f) {
return {
*[Symbol.iterator]() {
for (const x of xs) {
yield f(x);
}
},
};
}
const ReorderOrders = {
forward: true,
backward: true,
shiftByHalf: true,
};
export const kReorderOrderKeys = keysOf(ReorderOrders);
/**
* Creates a new array from the given array with the first half
* swapped with the last half.
*/
export function shiftByHalf(arr) {
const len = arr.length;
const half = (len / 2) | 0;
const firstHalf = arr.splice(0, half);
return [...arr, ...firstHalf];
}
/**
* Creates a reordered array from the input array based on the Order
*/
export function reorder(order, arr) {
switch (order) {
case 'forward':
return arr.slice();
case 'backward':
return arr.slice().reverse();
case 'shiftByHalf': {
// should this be pseudo random?
return shiftByHalf(arr);
}
}
}
const TypedArrayBufferViewInstances = [
new Uint8Array(),
new Uint8ClampedArray(),
new Uint16Array(),
new Uint32Array(),
new Int8Array(),
new Int16Array(),
new Int32Array(),
new Float16Array(),
new Float32Array(),
new Float64Array(),
];
export const kTypedArrayBufferViews = {
...(() => {
const result = {};
for (const v of TypedArrayBufferViewInstances) {
result[v.constructor.name] = v.constructor;
}
return result;
})(),
};
export const kTypedArrayBufferViewKeys = keysOf(kTypedArrayBufferViews);
export const kTypedArrayBufferViewConstructors = Object.values(kTypedArrayBufferViews);
function subarrayAsU8(buf, { start = 0, length }) {
if (buf instanceof ArrayBuffer) {
return new Uint8Array(buf, start, length);
} else if (buf instanceof Uint8Array || buf instanceof Uint8ClampedArray) {
// Don't wrap in new views if we don't need to.
if (start === 0 && (length === undefined || length === buf.byteLength)) {
return buf;
}
}
const byteOffset = buf.byteOffset + start * buf.BYTES_PER_ELEMENT;
const byteLength =
length !== undefined
? length * buf.BYTES_PER_ELEMENT
: buf.byteLength - (byteOffset - buf.byteOffset);
return new Uint8Array(buf.buffer, byteOffset, byteLength);
}
/**
* Copy a range of bytes from one ArrayBuffer or TypedArray to another.
*
* `start`/`length` are in elements (or in bytes, if ArrayBuffer).
*/
export function memcpy(src, dst) {
subarrayAsU8(dst.dst, dst).set(subarrayAsU8(src.src, src));
}

View file

@ -0,0 +1,24 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { timeout } from './timeout.js'; // Copied from https://github.com/web-platform-tests/wpt/blob/master/common/reftest-wait.js
/**
* Remove the `reftest-wait` class on the document element.
* The reftest runner will wait with taking a screenshot while
* this class is present.
*
* See https://web-platform-tests.org/writing-tests/reftests.html#controlling-when-comparison-occurs
*/
export function takeScreenshot() {
document.documentElement.classList.remove('reftest-wait');
}
/**
* Call `takeScreenshot()` after a delay of at least `ms` milliseconds.
* @param {number} ms - milliseconds
*/
export function takeScreenshotDelayed(ms) {
timeout(() => {
takeScreenshot();
}, ms);
}

View file

@ -1,63 +0,0 @@
<!-- AUTO-GENERATED - DO NOT EDIT. See WebGPU CTS: tools/gen_wpt_cts_html. -->
<!--
This test suite is built from the TypeScript sources at:
https://github.com/gpuweb/cts
If you are debugging WebGPU conformance tests, it's highly recommended that
you use the standalone interactive runner in that repository, which
provides tools for easier debugging and editing (source maps, debug
logging, warn/skip functionality, etc.)
NOTE:
The WPT version of this file is generated with *one variant per test spec
file*. If your harness needs more fine-grained suppressions, you'll need to
generate your own variants list from your suppression list.
See `tools/gen_wpt_cts_html` to do this.
When run under browser CI, the original cts.html should be skipped, and
this alternate version should be run instead, under a non-exported WPT test
directory (e.g. Chromium's wpt_internal).
-->
<!doctype html>
<title>WebGPU CTS</title>
<meta charset=utf-8>
<link rel=help href='https://gpuweb.github.io/gpuweb/'>
<script src=/resources/testharness.js></script>
<script src=/resources/testharnessreport.js></script>
<script type=module src=../webgpu/common/runtime/wpt.js></script>
<!--<meta name=variant content='?q=webgpu:api,operation,buffers,map:*'>-->
<meta name=variant content='?q=webgpu:api,operation,buffers,map_detach:*'>
<meta name=variant content='?q=webgpu:api,operation,buffers,map_oom:*'>
<meta name=variant content='?q=webgpu:api,operation,command_buffer,basic:*'>
<meta name=variant content='?q=webgpu:api,operation,command_buffer,copies:*'>
<meta name=variant content='?q=webgpu:api,operation,command_buffer,render,basic:*'>
<!--<meta name=variant content='?q=webgpu:api,operation,copyBetweenLinearDataAndTexture:*'>-->
<meta name=variant content='?q=webgpu:api,operation,fences:*'>
<!--<meta name=variant content='?q=webgpu:api,operation,render_pass,storeOp:*'>-->
<!--<meta name=variant content='?q=webgpu:api,operation,resource_init,copied_texture_clear:*'>-->
<meta name=variant content='?q=webgpu:api,validation,copyBufferToBuffer:*'>
<meta name=variant content='?q=webgpu:api,validation,copy_between_linear_data_and_texture,copyBetweenLinearDataAndTexture_dataRelated:*'>
<meta name=variant content='?q=webgpu:api,validation,copy_between_linear_data_and_texture,copyBetweenLinearDataAndTexture_textureRelated:*'>
<meta name=variant content='?q=webgpu:api,validation,createBindGroup:*'>
<!--<meta name=variant content='?q=webgpu:api,validation,createBindGroupLayout:*'>-->
<meta name=variant content='?q=webgpu:api,validation,createPipelineLayout:*'>
<!--<meta name=variant content='?q=webgpu:api,validation,createTexture:*'>-->
<!--<meta name=variant content='?q=webgpu:api,validation,createView:*'>-->
<meta name=variant content='?q=webgpu:api,validation,error_scope:*'>
<meta name=variant content='?q=webgpu:api,validation,fences:*'>
<meta name=variant content='?q=webgpu:api,validation,queue_submit:*'>
<meta name=variant content='?q=webgpu:api,validation,render_pass,resolve:*'>
<meta name=variant content='?q=webgpu:api,validation,render_pass,storeOp:*'>
<meta name=variant content='?q=webgpu:api,validation,render_pass_descriptor:*'>
<meta name=variant content='?q=webgpu:api,validation,resource_usages,textureUsageInRender:*'>
<meta name=variant content='?q=webgpu:api,validation,setBindGroup:*'>
<meta name=variant content='?q=webgpu:api,validation,setBlendColor:*'>
<meta name=variant content='?q=webgpu:api,validation,setScissorRect:*'>
<meta name=variant content='?q=webgpu:api,validation,setStencilReference:*'>
<meta name=variant content='?q=webgpu:api,validation,setViewport:*'>
<meta name=variant content='?q=webgpu:examples:*'>
<meta name=variant content='?q=webgpu:idl,constants,flags:*'>
<meta name=variant content='?q=webgpu:web-platform,canvas,context_creation:*'>
<meta name=variant content='?q=webgpu:web-platform,copyImageBitmapToTexture:*'>

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,3 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export {};

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,15 @@
Always use `getResourcePath()` to get the appropriate path to these resources depending
on the context (WPT, standalone, worker, etc.)
The test video files were generated with the ffmpeg cmds below:
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libvpx -pix_fmt yuv420p -frames 500 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv four-colors-vp8-bt601.webm
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libtheora -pix_fmt yuv420p -frames 500 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv four-colors-theora-bt601.ogv
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libx264 -pix_fmt yuv420p -frames 500 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv four-colors-h264-bt601.mp4
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libvpx-vp9 -pix_fmt yuv420p -frames 500 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv four-colors-vp9-bt601.webm
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libvpx-vp9 -pix_fmt yuv420p -frames 500 -colorspace bt709 -color_primaries bt709 -color_trc bt709 -color_range tv -vf scale=out_color_matrix=bt709:out_range=tv four-colors-vp9-bt709.webm
These rotation test files are copies of four-colors-h264-bt601.mp4 with metadata changes.
ffmpeg.exe -i .\four-colors-h264-bt601.mp4 -c copy -metadata:s:v rotate=90 four-colors-h264-bt601-rotate-90.mp4
ffmpeg.exe -i .\four-colors-h264-bt601.mp4 -c copy -metadata:s:v rotate=180 four-colors-h264-bt601-rotate-180.mp4
ffmpeg.exe -i .\four-colors-h264-bt601.mp4 -c copy -metadata:s:v rotate=270 four-colors-h264-bt601-rotate-270.mp4

Binary file not shown.

After

Width:  |  Height:  |  Size: 840 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

View file

@ -0,0 +1,122 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Tests for GPU.requestAdapter.
Test all possible options to requestAdapter.
default, low-power, and high performance should all always return adapters.
forceFallbackAdapter may or may not return an adapter.
GPU.requestAdapter can technically return null for any reason
but we need test functionality so the test requires an adapter except
when forceFallbackAdapter is true.
The test runs simple compute shader is run that fills a buffer with consecutive
values and then checks the result to test the adapter for basic functionality.
`;
import { Fixture } from '../../../../common/framework/fixture.js';
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { getGPU } from '../../../../common/util/navigator_gpu.js';
import { assert, objectEquals, iterRange } from '../../../../common/util/util.js';
export const g = makeTestGroup(Fixture);
const powerPreferenceModes = [undefined, 'low-power', 'high-performance'];
const forceFallbackOptions = [undefined, false, true];
async function testAdapter(adapter) {
assert(adapter !== null, 'Failed to get adapter.');
const device = await adapter.requestDevice();
assert(device !== null, 'Failed to get device.');
const kOffset = 1230000;
const pipeline = device.createComputePipeline({
layout: 'auto',
compute: {
module: device.createShaderModule({
code: `
struct Buffer { data: array<u32>, };
@group(0) @binding(0) var<storage, read_write> buffer: Buffer;
@compute @workgroup_size(1u) fn main(
@builtin(global_invocation_id) id: vec3<u32>) {
buffer.data[id.x] = id.x + ${kOffset}u;
}
`,
}),
entryPoint: 'main',
},
});
const kNumElements = 64;
const kBufferSize = kNumElements * 4;
const buffer = device.createBuffer({
size: kBufferSize,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
});
const resultBuffer = device.createBuffer({
size: kBufferSize,
usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
});
const bindGroup = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [{ binding: 0, resource: { buffer } }],
});
const encoder = device.createCommandEncoder();
const pass = encoder.beginComputePass();
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.dispatchWorkgroups(kNumElements);
pass.end();
encoder.copyBufferToBuffer(buffer, 0, resultBuffer, 0, kBufferSize);
device.queue.submit([encoder.finish()]);
const expected = new Uint32Array([...iterRange(kNumElements, x => x + kOffset)]);
await resultBuffer.mapAsync(GPUMapMode.READ);
const actual = new Uint32Array(resultBuffer.getMappedRange());
assert(objectEquals(actual, expected), 'compute pipeline ran');
resultBuffer.destroy();
buffer.destroy();
device.destroy();
}
g.test('requestAdapter')
.desc(`request adapter with all possible options and check for basic functionality`)
.params(u =>
u
.combine('powerPreference', powerPreferenceModes)
.combine('forceFallbackAdapter', forceFallbackOptions)
)
.fn(async t => {
const { powerPreference, forceFallbackAdapter } = t.params;
const adapter = await getGPU(t.rec).requestAdapter({
...(powerPreference !== undefined && { powerPreference }),
...(forceFallbackAdapter !== undefined && { forceFallbackAdapter }),
});
// failing to create an adapter when forceFallbackAdapter is true is ok.
if (forceFallbackAdapter && !adapter) {
t.skip('No adapter available');
return;
}
await testAdapter(adapter);
});
g.test('requestAdapter_no_parameters')
.desc(`request adapter with no parameters`)
.fn(async t => {
const adapter = await getGPU(t.rec).requestAdapter();
await testAdapter(adapter);
});

View file

@ -0,0 +1,55 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Tests various ways of calling GPUAdapter.requestAdapterInfo.
TODO:
- Find a way to perform tests with and without user activation
`;
import { Fixture } from '../../../../common/framework/fixture.js';
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { getGPU } from '../../../../common/util/navigator_gpu.js';
import { assert } from '../../../../common/util/util.js';
export const g = makeTestGroup(Fixture);
const normalizedIdentifierRegex = /^$|^[a-z0-9]+(-[a-z0-9]+)*$/;
g.test('adapter_info')
.desc(
`
Test that calling requestAdapterInfo with no arguments:
- Returns a GPUAdapterInfo structure
- Every member in the structure except description is properly formatted`
)
.fn(async t => {
const gpu = getGPU(t.rec);
const adapter = await gpu.requestAdapter();
assert(adapter !== null);
const adapterInfo = await adapter.requestAdapterInfo();
t.expect(
normalizedIdentifierRegex.test(adapterInfo.vendor),
`adapterInfo.vendor should be a normalized identifier. But it's '${adapterInfo.vendor}'`
);
t.expect(
normalizedIdentifierRegex.test(adapterInfo.architecture),
`adapterInfo.architecture should be a normalized identifier. But it's '${adapterInfo.architecture}'`
);
t.expect(
normalizedIdentifierRegex.test(adapterInfo.device),
`adapterInfo.device should be a normalized identifier. But it's '${adapterInfo.device}'`
);
});
g.test('adapter_info_with_hints')
.desc(
`
Test that calling requestAdapterInfo with hints:
- Rejects without user activation
- Succeed with user activation`
)
.unimplemented();

View file

@ -0,0 +1,361 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Test GPUAdapter.requestDevice.
Note tests explicitly destroy created devices so that tests don't have to wait for GC to clean up
potentially limited native resources.
`;
import { Fixture } from '../../../../common/framework/fixture.js';
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { getGPU } from '../../../../common/util/navigator_gpu.js';
import { assert, assertReject, raceWithRejectOnTimeout } from '../../../../common/util/util.js';
import { kFeatureNames, kLimitInfo, kLimits } from '../../../capability_info.js';
import { clamp, isPowerOfTwo } from '../../../util/math.js';
export const g = makeTestGroup(Fixture);
g.test('default')
.desc(
`
Test requesting the device with a variation of default parameters.
- No features listed in default device
- Default limits`
)
.paramsSubcasesOnly(u =>
u.combine('args', [[], [undefined], [{}], [{ requiredFeatures: [], requiredLimits: {} }]])
)
.fn(async t => {
const { args } = t.params;
const gpu = getGPU(t.rec);
const adapter = await gpu.requestAdapter();
assert(adapter !== null);
const device = await adapter.requestDevice(...args);
assert(device !== null);
// Default device should have no features.
t.expect(device.features.size === 0, 'Default device should not have any features');
// All limits should be defaults.
for (const limit of kLimits) {
t.expect(
device.limits[limit] === kLimitInfo[limit].default,
`Expected ${limit} == default: ${device.limits[limit]} != ${kLimitInfo[limit].default}`
);
}
device.destroy();
});
g.test('invalid')
.desc(
`
Test that requesting device on an invalid adapter resolves with lost device.
- Induce invalid adapter via a device lost from a device.destroy()
- Check the device is lost with reason 'destroyed'
- Try creating another device on the now-stale adapter
- Check that returns a device lost with 'unknown'
`
)
.fn(async t => {
const gpu = getGPU(t.rec);
const adapter = await gpu.requestAdapter();
assert(adapter !== null);
{
// Request a device and destroy it immediately afterwards.
const device = await adapter.requestDevice();
assert(device !== null);
device.destroy();
const lostInfo = await device.lost;
t.expect(lostInfo.reason === 'destroyed');
}
// The adapter should now be invalid since a device was lost. Requesting another device should
// return an already lost device.
const kTimeoutMS = 1000;
const device = await adapter.requestDevice();
const lost = await raceWithRejectOnTimeout(device.lost, kTimeoutMS, 'device was not lost');
t.expect(lost.reason === 'unknown');
});
g.test('stale')
.desc(
`
Test that adapter.requestDevice() can successfully return a device once, and once only.
- Tests that we can successfully resolve after serial and concurrent rejections.
- Tests that consecutive valid attempts only succeeds the first time, returning lost device otherwise.`
)
.paramsSubcasesOnly(u =>
u
.combine('initialError', [undefined, 'TypeError', 'OperationError'])
.combine('awaitInitialError', [true, false])
.combine('awaitSuccess', [true, false])
.unless(
({ initialError, awaitInitialError }) => initialError === undefined && awaitInitialError
)
)
.fn(async t => {
const gpu = getGPU(t.rec);
const adapter = await gpu.requestAdapter();
assert(adapter !== null);
const { initialError, awaitInitialError, awaitSuccess } = t.params;
switch (initialError) {
case undefined:
break;
case 'TypeError':
// Cause a type error by requesting with an unknown feature.
if (awaitInitialError) {
await assertReject(adapter.requestDevice({ requiredFeatures: ['unknown-feature'] }));
} else {
t.shouldReject(
'TypeError',
adapter.requestDevice({ requiredFeatures: ['unknown-feature'] })
);
}
break;
case 'OperationError':
// Cause an operation error by requesting with an alignment limit that is not a power of 2.
if (awaitInitialError) {
await assertReject(
adapter.requestDevice({ requiredLimits: { minUniformBufferOffsetAlignment: 255 } })
);
} else {
t.shouldReject(
'OperationError',
adapter.requestDevice({ requiredLimits: { minUniformBufferOffsetAlignment: 255 } })
);
}
break;
}
let device = undefined;
const promise = adapter.requestDevice();
if (awaitSuccess) {
device = await promise;
assert(device !== null);
} else {
t.shouldResolve(
(async () => {
const device = await promise;
device.destroy();
})()
);
}
const kTimeoutMS = 1000;
const lostDevice = await adapter.requestDevice();
const lost = await raceWithRejectOnTimeout(
lostDevice.lost,
kTimeoutMS,
'adapter was not stale'
);
t.expect(lost.reason === 'unknown');
// Make sure to destroy the valid device after trying to get a second one. Otherwise, the second
// device may fail because the adapter is put into an invalid state from the destroy.
if (device) {
device.destroy();
}
});
g.test('features,unknown')
.desc(
`
Test requesting device with an unknown feature.`
)
.fn(async t => {
const gpu = getGPU(t.rec);
const adapter = await gpu.requestAdapter();
assert(adapter !== null);
t.shouldReject('TypeError', adapter.requestDevice({ requiredFeatures: ['unknown-feature'] }));
});
g.test('features,known')
.desc(
`
Test requesting device with all features.
- Succeeds with device supporting feature if adapter supports the feature.
- Rejects if the adapter does not support the feature.`
)
.params(u => u.combine('feature', kFeatureNames))
.fn(async t => {
const { feature } = t.params;
const gpu = getGPU(t.rec);
const adapter = await gpu.requestAdapter();
assert(adapter !== null);
const promise = adapter.requestDevice({ requiredFeatures: [feature] });
if (adapter.features.has(feature)) {
const device = await promise;
t.expect(device.features.has(feature), 'Device should include the required feature');
} else {
t.shouldReject('TypeError', promise);
}
});
g.test('limits,unknown')
.desc(
`
Test that specifying limits that aren't part of the supported limit set causes
requestDevice to reject.`
)
.fn(async t => {
const gpu = getGPU(t.rec);
const adapter = await gpu.requestAdapter();
assert(adapter !== null);
const requiredLimits = { unknownLimitName: 9000 };
t.shouldReject('OperationError', adapter.requestDevice({ requiredLimits }));
});
g.test('limits,supported')
.desc(
`
Test that each supported limit can be specified with valid values.
- Tests each limit with the default values given by the spec
- Tests each limit with the supported values given by the adapter`
)
.params(u =>
u.combine('limit', kLimits).beginSubcases().combine('limitValue', ['default', 'adapter'])
)
.fn(async t => {
const { limit, limitValue } = t.params;
const gpu = getGPU(t.rec);
const adapter = await gpu.requestAdapter();
assert(adapter !== null);
let value = -1;
switch (limitValue) {
case 'default':
value = kLimitInfo[limit].default;
break;
case 'adapter':
value = adapter.limits[limit];
break;
}
const device = await adapter.requestDevice({ requiredLimits: { [limit]: value } });
assert(device !== null);
t.expect(
device.limits[limit] === value,
'Devices reported limit should match the required limit'
);
device.destroy();
});
g.test('limit,better_than_supported')
.desc(
`
Test that specifying a better limit than what the adapter supports causes requestDevice to
reject.
- Tests each limit
- Tests requesting better limits by various amounts`
)
.params(u =>
u
.combine('limit', kLimits)
.beginSubcases()
.expandWithParams(p => {
switch (kLimitInfo[p.limit].class) {
case 'maximum':
return [
{ mul: 1, add: 1 },
{ mul: 1, add: 100 },
];
case 'alignment':
return [
{ mul: 1, add: -1 },
{ mul: 1 / 2, add: 0 },
{ mul: 1 / 1024, add: 0 },
];
}
})
)
.fn(async t => {
const { limit, mul, add } = t.params;
const gpu = getGPU(t.rec);
const adapter = await gpu.requestAdapter();
assert(adapter !== null);
const value = adapter.limits[limit] * mul + add;
const requiredLimits = {
[limit]: clamp(value, { min: 0, max: kLimitInfo[limit].maximumValue }),
};
t.shouldReject('OperationError', adapter.requestDevice({ requiredLimits }));
});
g.test('limit,worse_than_default')
.desc(
`
Test that specifying a worse limit than the default values required by the spec cause the value
to clamp.
- Tests each limit
- Tests requesting worse limits by various amounts`
)
.params(u =>
u
.combine('limit', kLimits)
.beginSubcases()
.expandWithParams(p => {
switch (kLimitInfo[p.limit].class) {
case 'maximum':
return [
{ mul: 1, add: -1 },
{ mul: 1, add: -100 },
];
case 'alignment':
return [
{ mul: 1, add: 1 },
{ mul: 2, add: 0 },
{ mul: 1024, add: 0 },
];
}
})
)
.fn(async t => {
const { limit, mul, add } = t.params;
const gpu = getGPU(t.rec);
const adapter = await gpu.requestAdapter();
assert(adapter !== null);
const value = kLimitInfo[limit].default * mul + add;
const requiredLimits = {
[limit]: clamp(value, { min: 0, max: kLimitInfo[limit].maximumValue }),
};
let success;
switch (kLimitInfo[limit].class) {
case 'alignment':
success = isPowerOfTwo(value);
break;
case 'maximum':
success = true;
break;
}
if (success) {
const device = await adapter.requestDevice({ requiredLimits });
assert(device !== null);
t.expect(
device.limits[limit] === kLimitInfo[limit].default,
'Devices reported limit should match the default limit'
);
device.destroy();
} else {
t.shouldReject('OperationError', adapter.requestDevice({ requiredLimits }));
}
});

View file

@ -1,15 +1,27 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = '';
import { pbool, params } from '../../../../common/framework/params_builder.js';
**/ export const description = `
Test the operation of buffer mapping, specifically the data contents written via
map-write/mappedAtCreation, and the contents of buffers returned by getMappedRange on
buffers which are mapped-read/mapped-write/mappedAtCreation.
range: used for getMappedRange
mapRegion: used for mapAsync
mapRegionBoundModes is used to get mapRegion from range:
- default-expand: expand mapRegion to buffer bound by setting offset/size to undefined
- explicit-expand: expand mapRegion to buffer bound by explicitly calculating offset/size
- minimal: make mapRegion to be the same as range which is the minimal range to make getMappedRange input valid
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { assert } from '../../../../common/framework/util/util.js';
import { assert, memcpy } from '../../../../common/util/util.js';
import { checkElementsEqual } from '../../../util/check_contents.js';
import { MappingTest } from './mapping_test.js';
export const g = makeTestGroup(MappingTest);
const kCases = [
const kSubcases = [
{ size: 0, range: [] },
{ size: 0, range: [undefined] },
{ size: 0, range: [undefined, undefined] },
@ -32,16 +44,33 @@ const kCases = [
];
function reifyMapRange(bufferSize, range) {
var _range$, _range$2;
const offset = (_range$ = range[0]) !== null && _range$ !== void 0 ? _range$ : 0;
const offset = range[0] ?? 0;
return [offset, range[1] ?? bufferSize - offset];
}
const mapRegionBoundModes = ['default-expand', 'explicit-expand', 'minimal'];
function getRegionForMap(bufferSize, range, { mapAsyncRegionLeft, mapAsyncRegionRight }) {
const regionLeft = mapAsyncRegionLeft === 'minimal' ? range[0] : 0;
const regionRight = mapAsyncRegionRight === 'minimal' ? range[0] + range[1] : bufferSize;
return [
offset,
(_range$2 = range[1]) !== null && _range$2 !== void 0 ? _range$2 : bufferSize - offset,
mapAsyncRegionLeft === 'default-expand' ? undefined : regionLeft,
mapAsyncRegionRight === 'default-expand' ? undefined : regionRight - regionLeft,
];
}
g.test('mapAsync,write')
.params(kCases)
.desc(
`Use map-write to write to various ranges of variously-sized buffers, then expectContents
(which does copyBufferToBuffer + map-read) to ensure the contents were written.`
)
.params(u =>
u
.combine('mapAsyncRegionLeft', mapRegionBoundModes)
.combine('mapAsyncRegionRight', mapRegionBoundModes)
.beginSubcases()
.combineWithParams(kSubcases)
)
.fn(async t => {
const { size, range } = t.params;
const [rangeOffset, rangeSize] = reifyMapRange(size, range);
@ -51,23 +80,109 @@ g.test('mapAsync,write')
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.MAP_WRITE,
});
await buffer.mapAsync(GPUMapMode.WRITE);
const mapRegion = getRegionForMap(size, [rangeOffset, rangeSize], t.params);
await buffer.mapAsync(GPUMapMode.WRITE, ...mapRegion);
const arrayBuffer = buffer.getMappedRange(...range);
t.checkMapWrite(buffer, rangeOffset, arrayBuffer, rangeSize);
});
g.test('mapAsync,write,unchanged_ranges_preserved')
.desc(
`Use mappedAtCreation or mapAsync to write to various ranges of variously-sized buffers, then
use mapAsync to map a different range and zero it out. Finally use expectGPUBufferValuesEqual
(which does copyBufferToBuffer + map-read) to verify that contents originally written outside the
second mapped range were not altered.`
)
.params(u =>
u
.beginSubcases()
.combine('mappedAtCreation', [false, true])
.combineWithParams([
{ size: 12, range1: [], range2: [8] },
{ size: 12, range1: [], range2: [0, 8] },
{ size: 12, range1: [0, 8], range2: [8] },
{ size: 12, range1: [8], range2: [0, 8] },
{ size: 28, range1: [], range2: [8, 8] },
{ size: 28, range1: [8, 16], range2: [16, 8] },
{ size: 32, range1: [16, 12], range2: [8, 16] },
{ size: 32, range1: [8, 8], range2: [24, 4] },
])
)
.fn(async t => {
const { size, range1, range2, mappedAtCreation } = t.params;
const [rangeOffset1, rangeSize1] = reifyMapRange(size, range1);
const [rangeOffset2, rangeSize2] = reifyMapRange(size, range2);
const buffer = t.device.createBuffer({
mappedAtCreation,
size,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.MAP_WRITE,
});
// If the buffer is not mappedAtCreation map it now.
if (!mappedAtCreation) {
await buffer.mapAsync(GPUMapMode.WRITE);
}
// Set the initial contents of the buffer.
const init = buffer.getMappedRange(...range1);
assert(init.byteLength === rangeSize1);
const expectedBuffer = new ArrayBuffer(size);
const expected = new Uint32Array(
expectedBuffer,
rangeOffset1,
rangeSize1 / Uint32Array.BYTES_PER_ELEMENT
);
const data = new Uint32Array(init);
for (let i = 0; i < data.length; ++i) {
data[i] = expected[i] = i + 1;
}
buffer.unmap();
// Write to a second range of the buffer
await buffer.mapAsync(GPUMapMode.WRITE, ...range2);
const init2 = buffer.getMappedRange(...range2);
assert(init2.byteLength === rangeSize2);
const expected2 = new Uint32Array(
expectedBuffer,
rangeOffset2,
rangeSize2 / Uint32Array.BYTES_PER_ELEMENT
);
const data2 = new Uint32Array(init2);
for (let i = 0; i < data2.length; ++i) {
data2[i] = expected2[i] = 0;
}
buffer.unmap();
// Verify that the range of the buffer which was not overwritten was preserved.
t.expectGPUBufferValuesEqual(buffer, expected, rangeOffset1);
});
g.test('mapAsync,read')
.params(kCases)
.desc(
`Use mappedAtCreation to initialize various ranges of variously-sized buffers, then
map-read and check the read-back result.`
)
.params(u =>
u
.combine('mapAsyncRegionLeft', mapRegionBoundModes)
.combine('mapAsyncRegionRight', mapRegionBoundModes)
.beginSubcases()
.combineWithParams(kSubcases)
)
.fn(async t => {
const { size, range } = t.params;
const [, rangeSize] = reifyMapRange(size, range);
const [rangeOffset, rangeSize] = reifyMapRange(size, range);
const buffer = t.device.createBuffer({
mappedAtCreation: true,
size,
usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ,
});
const init = buffer.getMappedRange(...range);
assert(init.byteLength === rangeSize);
@ -78,19 +193,108 @@ g.test('mapAsync,read')
}
buffer.unmap();
await buffer.mapAsync(GPUMapMode.READ);
const mapRegion = getRegionForMap(size, [rangeOffset, rangeSize], t.params);
await buffer.mapAsync(GPUMapMode.READ, ...mapRegion);
const actual = new Uint8Array(buffer.getMappedRange(...range));
t.expectBuffer(actual, new Uint8Array(expected.buffer));
t.expectOK(checkElementsEqual(actual, new Uint8Array(expected.buffer)));
});
g.test('mapAsync,read,typedArrayAccess')
.desc(`Use various TypedArray types to read back from a mapped buffer`)
.params(u =>
u
.combine('mapAsyncRegionLeft', mapRegionBoundModes)
.combine('mapAsyncRegionRight', mapRegionBoundModes)
.beginSubcases()
.combineWithParams([
{ size: 80, range: [] },
{ size: 160, range: [] },
{ size: 160, range: [0, 80] },
{ size: 160, range: [80] },
{ size: 160, range: [40, 120] },
{ size: 160, range: [40] },
])
)
.fn(async t => {
const { size, range } = t.params;
const [rangeOffset, rangeSize] = reifyMapRange(size, range);
// Fill an array buffer with a variety of values of different types.
const expectedArrayBuffer = new ArrayBuffer(80);
const uint8Expected = new Uint8Array(expectedArrayBuffer, 0, 2);
uint8Expected[0] = 1;
uint8Expected[1] = 255;
const int8Expected = new Int8Array(expectedArrayBuffer, 2, 2);
int8Expected[0] = -1;
int8Expected[1] = 127;
const uint16Expected = new Uint16Array(expectedArrayBuffer, 4, 2);
uint16Expected[0] = 1;
uint16Expected[1] = 65535;
const int16Expected = new Int16Array(expectedArrayBuffer, 8, 2);
int16Expected[0] = -1;
int16Expected[1] = 32767;
const uint32Expected = new Uint32Array(expectedArrayBuffer, 12, 2);
uint32Expected[0] = 1;
uint32Expected[1] = 4294967295;
const int32Expected = new Int32Array(expectedArrayBuffer, 20, 2);
int32Expected[2] = -1;
int32Expected[3] = 2147483647;
const float32Expected = new Float32Array(expectedArrayBuffer, 28, 3);
float32Expected[0] = 1;
float32Expected[1] = -1;
float32Expected[2] = 12345.6789;
const float64Expected = new Float64Array(expectedArrayBuffer, 40, 5);
float64Expected[0] = 1;
float64Expected[1] = -1;
float64Expected[2] = 12345.6789;
float64Expected[3] = Number.MAX_VALUE;
float64Expected[4] = Number.MIN_VALUE;
const buffer = t.device.createBuffer({
mappedAtCreation: true,
size,
usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ,
});
const init = buffer.getMappedRange(...range);
// Copy the expected values into the mapped range.
assert(init.byteLength === rangeSize);
memcpy({ src: expectedArrayBuffer }, { dst: init });
buffer.unmap();
const mapRegion = getRegionForMap(size, [rangeOffset, rangeSize], t.params);
await buffer.mapAsync(GPUMapMode.READ, ...mapRegion);
const mappedArrayBuffer = buffer.getMappedRange(...range);
t.expectOK(checkElementsEqual(new Uint8Array(mappedArrayBuffer, 0, 2), uint8Expected));
t.expectOK(checkElementsEqual(new Int8Array(mappedArrayBuffer, 2, 2), int8Expected));
t.expectOK(checkElementsEqual(new Uint16Array(mappedArrayBuffer, 4, 2), uint16Expected));
t.expectOK(checkElementsEqual(new Int16Array(mappedArrayBuffer, 8, 2), int16Expected));
t.expectOK(checkElementsEqual(new Uint32Array(mappedArrayBuffer, 12, 2), uint32Expected));
t.expectOK(checkElementsEqual(new Int32Array(mappedArrayBuffer, 20, 2), int32Expected));
t.expectOK(checkElementsEqual(new Float32Array(mappedArrayBuffer, 28, 3), float32Expected));
t.expectOK(checkElementsEqual(new Float64Array(mappedArrayBuffer, 40, 5), float64Expected));
});
g.test('mappedAtCreation')
.params(
params()
.combine(kCases) //
.combine(pbool('mappable'))
.desc(
`Use mappedAtCreation to write to various ranges of variously-sized buffers created either
with or without the MAP_WRITE usage (since this could affect the mappedAtCreation upload path),
then expectContents (which does copyBufferToBuffer + map-read) to ensure the contents were written.`
)
.fn(async t => {
var _range$3;
.params(u =>
u //
.combine('mappable', [false, true])
.beginSubcases()
.combineWithParams(kSubcases)
)
.fn(t => {
const { size, range, mappable } = t.params;
const [, rangeSize] = reifyMapRange(size, range);
@ -99,12 +303,206 @@ g.test('mappedAtCreation')
size,
usage: GPUBufferUsage.COPY_SRC | (mappable ? GPUBufferUsage.MAP_WRITE : 0),
});
const arrayBuffer = buffer.getMappedRange(...range);
t.checkMapWrite(
buffer,
(_range$3 = range[0]) !== null && _range$3 !== void 0 ? _range$3 : 0,
arrayBuffer,
rangeSize
);
t.checkMapWrite(buffer, range[0] ?? 0, arrayBuffer, rangeSize);
});
g.test('remapped_for_write')
.desc(
`Use mappedAtCreation or mapAsync to write to various ranges of variously-sized buffers created
with the MAP_WRITE usage, then mapAsync again and ensure that the previously written values are
still present in the mapped buffer.`
)
.params(u =>
u //
.combine('mapAsyncRegionLeft', mapRegionBoundModes)
.combine('mapAsyncRegionRight', mapRegionBoundModes)
.beginSubcases()
.combine('mappedAtCreation', [false, true])
.combineWithParams(kSubcases)
)
.fn(async t => {
const { size, range, mappedAtCreation } = t.params;
const [rangeOffset, rangeSize] = reifyMapRange(size, range);
const buffer = t.device.createBuffer({
mappedAtCreation,
size,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.MAP_WRITE,
});
// If the buffer is not mappedAtCreation map it now.
if (!mappedAtCreation) {
await buffer.mapAsync(GPUMapMode.WRITE);
}
// Set the initial contents of the buffer.
const init = buffer.getMappedRange(...range);
assert(init.byteLength === rangeSize);
const expected = new Uint32Array(new ArrayBuffer(rangeSize));
const data = new Uint32Array(init);
for (let i = 0; i < data.length; ++i) {
data[i] = expected[i] = i + 1;
}
buffer.unmap();
// Check that upon remapping the for WRITE the values in the buffer are
// still the same.
const mapRegion = getRegionForMap(size, [rangeOffset, rangeSize], t.params);
await buffer.mapAsync(GPUMapMode.WRITE, ...mapRegion);
const actual = new Uint8Array(buffer.getMappedRange(...range));
t.expectOK(checkElementsEqual(actual, new Uint8Array(expected.buffer)));
});
g.test('mappedAtCreation,mapState')
.desc('Test that exposed map state of buffer created with mappedAtCreation has expected values.')
.params(u =>
u
.combine('usageType', ['invalid', 'read', 'write'])
.combine('afterUnmap', [false, true])
.combine('afterDestroy', [false, true])
)
.fn(t => {
const { usageType, afterUnmap, afterDestroy } = t.params;
const usage =
usageType === 'read'
? GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ
: usageType === 'write'
? GPUBufferUsage.COPY_SRC | GPUBufferUsage.MAP_WRITE
: 0;
const validationError = usage === 0;
const size = 8;
const range = [0, 8];
let buffer;
t.expectValidationError(() => {
buffer = t.device.createBuffer({
mappedAtCreation: true,
size,
usage,
});
}, validationError);
// mapState must be "mapped" regardless of validation error
t.expect(buffer.mapState === 'mapped');
// getMappedRange must not change the map state
buffer.getMappedRange(...range);
t.expect(buffer.mapState === 'mapped');
if (afterUnmap) {
buffer.unmap();
t.expect(buffer.mapState === 'unmapped');
}
if (afterDestroy) {
buffer.destroy();
t.expect(buffer.mapState === 'unmapped');
}
});
g.test('mapAsync,mapState')
.desc('Test that exposed map state of buffer mapped with mapAsync has expected values.')
.params(u =>
u
.combine('usageType', ['invalid', 'read', 'write'])
.combine('mapModeType', ['READ', 'WRITE'])
.combine('beforeUnmap', [false, true])
.combine('beforeDestroy', [false, true])
.combine('afterUnmap', [false, true])
.combine('afterDestroy', [false, true])
)
.fn(async t => {
const {
usageType,
mapModeType,
beforeUnmap,
beforeDestroy,
afterUnmap,
afterDestroy,
} = t.params;
const size = 8;
const range = [0, 8];
const usage =
usageType === 'read'
? GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ
: usageType === 'write'
? GPUBufferUsage.COPY_SRC | GPUBufferUsage.MAP_WRITE
: 0;
const bufferCreationValidationError = usage === 0;
const mapMode = GPUMapMode[mapModeType];
let buffer;
t.expectValidationError(() => {
buffer = t.device.createBuffer({
mappedAtCreation: false,
size,
usage,
});
}, bufferCreationValidationError);
t.expect(buffer.mapState === 'unmapped');
{
const mapAsyncValidationError =
bufferCreationValidationError ||
(mapMode === GPUMapMode.READ && !(usage & GPUBufferUsage.MAP_READ)) ||
(mapMode === GPUMapMode.WRITE && !(usage & GPUBufferUsage.MAP_WRITE));
let promise;
t.expectValidationError(() => {
promise = buffer.mapAsync(mapMode);
}, mapAsyncValidationError);
t.expect(buffer.mapState === 'pending');
try {
if (beforeUnmap) {
buffer.unmap();
t.expect(buffer.mapState === 'unmapped');
}
if (beforeDestroy) {
buffer.destroy();
t.expect(buffer.mapState === 'unmapped');
}
await promise;
t.expect(buffer.mapState === 'mapped');
// getMappedRange must not change the map state
buffer.getMappedRange(...range);
t.expect(buffer.mapState === 'mapped');
} catch {
// unmapped before resolve, destroyed before resolve, or mapAsync validation error
// will end up with rejection and 'unmapped'
t.expect(buffer.mapState === 'unmapped');
}
}
// If buffer is already mapped test mapAsync on already mapped buffer
if (buffer.mapState === 'mapped') {
// mapAsync on already mapped buffer must be rejected with a validation error
// and the map state must keep 'mapped'
let promise;
t.expectValidationError(() => {
promise = buffer.mapAsync(GPUMapMode.WRITE);
}, true);
t.expect(buffer.mapState === 'mapped');
try {
await promise;
t.fail('mapAsync on already mapped buffer must not succeed.');
} catch {
t.expect(buffer.mapState === 'mapped');
}
}
if (afterUnmap) {
buffer.unmap();
t.expect(buffer.mapState === 'unmapped');
}
if (afterDestroy) {
buffer.destroy();
t.expect(buffer.mapState === 'unmapped');
}
});

View file

@ -0,0 +1,90 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Tests for the behavior of ArrayBuffers returned by getMappedRange.
TODO: Add tests that transfer to another thread instead of just using MessageChannel.
TODO: Add tests for any other Web APIs that can detach ArrayBuffers.
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { timeout } from '../../../../common/util/timeout.js';
import { GPUTest } from '../../../gpu_test.js';
import { checkElementsEqual } from '../../../util/check_contents.js';
export const g = makeTestGroup(GPUTest);
g.test('postMessage')
.desc(
`Using postMessage to send a getMappedRange-returned ArrayBuffer throws a TypeError
if it was included in the transfer list. Otherwise, it makes a copy.
Test combinations of transfer={false, true}, mapMode={read,write}.`
)
.params(u =>
u //
.combine('transfer', [false, true])
.combine('mapMode', ['READ', 'WRITE'])
)
.fn(async t => {
const { transfer, mapMode } = t.params;
const kSize = 1024;
// Populate initial data.
const initialData = new Uint32Array(new ArrayBuffer(kSize));
for (let i = 0; i < initialData.length; ++i) {
initialData[i] = i;
}
const buf = t.makeBufferWithContents(
initialData,
mapMode === 'WRITE' ? GPUBufferUsage.MAP_WRITE : GPUBufferUsage.MAP_READ
);
await buf.mapAsync(GPUMapMode[mapMode]);
const ab1 = buf.getMappedRange();
t.expect(ab1.byteLength === kSize, 'ab1 should have the size of the buffer');
const mc = new MessageChannel();
const ab2Promise = new Promise(resolve => {
mc.port2.onmessage = ev => {
if (transfer) {
t.fail(
`postMessage with ab1 in transfer list should not be received. Unexpected message: ${ev.data}`
);
} else {
resolve(ev.data);
}
};
});
if (transfer) {
t.shouldThrow('TypeError', () => mc.port1.postMessage(ab1, [ab1]));
// Wait to make sure the postMessage isn't received.
await new Promise(resolve => timeout(resolve, 100));
} else {
mc.port1.postMessage(ab1);
}
t.expect(ab1.byteLength === kSize, 'after postMessage, ab1 should not be detached');
if (!transfer) {
const ab2 = await ab2Promise;
t.expect(ab2.byteLength === kSize, 'ab2 should be the same size');
const ab2Data = new Uint32Array(ab2, 0, initialData.length);
// ab2 should have the same initial contents.
t.expectOK(checkElementsEqual(ab2Data, initialData));
// Mutations to ab2 should not be visible in ab1.
const ab1Data = new Uint32Array(ab1, 0, initialData.length);
const abs2NewData = initialData.slice().reverse();
for (let i = 0; i < ab2Data.length; ++i) {
ab2Data[i] = abs2NewData[i];
}
t.expectOK(checkElementsEqual(ab1Data, initialData));
t.expectOK(checkElementsEqual(ab2Data, abs2NewData));
}
buf.unmap();
t.expect(ab1.byteLength === 0, 'after unmap, ab1 should be detached');
// Transferring an already-detached ArrayBuffer is a DataCloneError.
t.shouldThrow('DataCloneError', () => mc.port1.postMessage(ab1, [ab1]));
});

View file

@ -1,73 +1,80 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = '';
**/ export const description = `
Tests that TypedArrays created when mapping a GPUBuffer are detached when the
buffer is unmapped or destroyed.
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { getGPU } from '../../../../common/util/navigator_gpu.js';
import { assert } from '../../../../common/util/util.js';
import { GPUConst } from '../../../constants.js';
import { GPUTest } from '../../../gpu_test.js';
class F extends GPUTest {
checkDetach(buffer, arrayBuffer, unmap, destroy) {
const view = new Uint8Array(arrayBuffer);
this.expect(arrayBuffer.byteLength === 4);
this.expect(view.length === 4);
export const g = makeTestGroup(GPUTest);
if (unmap) buffer.unmap();
if (destroy) buffer.destroy();
this.expect(arrayBuffer.byteLength === 0, 'ArrayBuffer should be detached');
this.expect(view.byteLength === 0, 'ArrayBufferView should be detached');
}
}
export const g = makeTestGroup(F);
g.test('mapAsync,write')
.params([
{ unmap: true, destroy: false }, //
{ unmap: false, destroy: true },
{ unmap: true, destroy: true },
])
g.test('while_mapped')
.desc(
`
Test that a mapped buffers are able to properly detach.
- Tests {mappable, unmappable mapAtCreation, mappable mapAtCreation}
- Tests while {mapped, mapped at creation, mapped at creation then unmapped and mapped again}
- When {unmap, destroy, unmap && destroy, device.destroy} is called`
)
.paramsSubcasesOnly(u =>
u
.combine('mappedAtCreation', [false, true])
.combineWithParams([
{ usage: GPUConst.BufferUsage.COPY_SRC },
{ usage: GPUConst.BufferUsage.MAP_WRITE | GPUConst.BufferUsage.COPY_SRC },
{ usage: GPUConst.BufferUsage.COPY_DST | GPUConst.BufferUsage.MAP_READ },
{
usage: GPUConst.BufferUsage.MAP_WRITE | GPUConst.BufferUsage.COPY_SRC,
mapMode: GPUConst.MapMode.WRITE,
},
{
usage: GPUConst.BufferUsage.COPY_DST | GPUConst.BufferUsage.MAP_READ,
mapMode: GPUConst.MapMode.READ,
},
])
.combineWithParams([
{ unmap: true, destroy: false },
{ unmap: false, destroy: true },
{ unmap: true, destroy: true },
{ unmap: false, destroy: false, deviceDestroy: true },
])
.unless(p => p.mappedAtCreation === false && p.mapMode === undefined)
)
.fn(async t => {
const buffer = t.device.createBuffer({ size: 4, usage: GPUBufferUsage.MAP_WRITE });
await buffer.mapAsync(GPUMapMode.WRITE);
const arrayBuffer = buffer.getMappedRange();
t.checkDetach(buffer, arrayBuffer, t.params.unmap, t.params.destroy);
});
const { usage, mapMode, mappedAtCreation, unmap, destroy, deviceDestroy } = t.params;
g.test('mapAsync,read')
.params([
{ unmap: true, destroy: false }, //
{ unmap: false, destroy: true },
{ unmap: true, destroy: true },
])
.fn(async t => {
const buffer = t.device.createBuffer({ size: 4, usage: GPUBufferUsage.MAP_READ });
await buffer.mapAsync(GPUMapMode.READ);
const arrayBuffer = buffer.getMappedRange();
t.checkDetach(buffer, arrayBuffer, t.params.unmap, t.params.destroy);
});
g.test('create_mapped')
.params([
{ unmap: true, destroy: false },
{ unmap: false, destroy: true },
{ unmap: true, destroy: true },
])
.fn(async t => {
const desc = {
mappedAtCreation: true,
let device = t.device;
if (deviceDestroy) {
const adapter = await getGPU(t.rec).requestAdapter();
assert(adapter !== null);
device = await adapter.requestDevice();
}
const buffer = device.createBuffer({
size: 4,
usage: GPUBufferUsage.MAP_WRITE,
};
usage,
mappedAtCreation,
});
if (mapMode !== undefined) {
if (mappedAtCreation) {
buffer.unmap();
}
await buffer.mapAsync(mapMode);
}
const buffer = t.device.createBuffer(desc);
const arrayBuffer = buffer.getMappedRange();
const view = new Uint8Array(arrayBuffer);
t.expect(arrayBuffer.byteLength === 4);
t.expect(view.length === 4);
if (t.params.unmap) buffer.unmap();
if (t.params.destroy) buffer.destroy();
if (unmap) buffer.unmap();
if (destroy) buffer.destroy();
if (deviceDestroy) device.destroy();
t.expect(arrayBuffer.byteLength === 0, 'ArrayBuffer should be detached');
t.expect(view.byteLength === 0, 'ArrayBufferView should be detached');
});

View file

@ -1,95 +1,51 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = '';
import { poptions, params, pbool } from '../../../../common/framework/params_builder.js';
**/ export const description =
'Test out-of-memory conditions creating large mappable/mappedAtCreation buffers.';
import { kUnitCaseParamsBuilder } from '../../../../common/framework/params_builder.js';
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { kBufferUsages } from '../../../capability_info.js';
import { GPUTest } from '../../../gpu_test.js';
import { kMaxSafeMultipleOf8 } from '../../../util/math.js';
// A multiple of 8 guaranteed to be way too large to allocate (just under 8 pebibytes).
// (Note this is likely to exceed limitations other than just the system's
// physical memory - so may test codepaths other than "true" OOM.)
const MAX_ALIGNED_SAFE_INTEGER = Number.MAX_SAFE_INTEGER - 7;
const oomAndSizeParams = kUnitCaseParamsBuilder
.combine('oom', [false, true])
.expand('size', ({ oom }) => {
return oom
? [
kMaxSafeMultipleOf8,
0x20_0000_0000, // 128 GB
]
: [16];
});
export const g = makeTestGroup(GPUTest);
g.test('mapAsync')
.params(
params()
.combine(pbool('oom')) //
.combine(pbool('write'))
)
.fn(async t => {
const { oom, write } = t.params;
const size = oom ? MAX_ALIGNED_SAFE_INTEGER : 16;
const buffer = t.expectGPUError(
'out-of-memory',
() =>
t.device.createBuffer({
size,
usage: write ? GPUBufferUsage.MAP_WRITE : GPUBufferUsage.MAP_READ,
}),
oom
);
const promise = t.expectGPUError(
'validation', // Should be a validation error since the buffer is invalid.
() => buffer.mapAsync(write ? GPUMapMode.WRITE : GPUMapMode.READ),
oom
);
if (oom) {
// Should also reject in addition to the validation error.
t.shouldReject('OperationError', promise);
} else {
await promise;
const arraybuffer = buffer.getMappedRange();
t.expect(arraybuffer.byteLength === size);
buffer.unmap();
t.expect(arraybuffer.byteLength === 0);
}
});
g.test('mappedAtCreation')
.params(
params()
.combine(pbool('oom')) //
.combine(poptions('usage', kBufferUsages))
.desc(
`Test creating a very large buffer mappedAtCreation buffer should throw a RangeError only
because such a large allocation cannot be created when we initialize an active buffer mapping.
`
)
.fn(async t => {
const { oom, usage } = t.params;
const size = oom ? MAX_ALIGNED_SAFE_INTEGER : 16;
.params(
oomAndSizeParams //
.beginSubcases()
.combine('usage', kBufferUsages)
)
.fn(t => {
const { oom, usage, size } = t.params;
const buffer = t.expectGPUError(
'out-of-memory',
() => t.device.createBuffer({ mappedAtCreation: true, size, usage }),
oom
);
const f = () => buffer.getMappedRange(0, size);
const f = () => t.device.createBuffer({ mappedAtCreation: true, size, usage });
if (oom) {
// getMappedRange is normally valid on OOM buffers, but this one fails because the
// (default) range is too large to create the returned ArrayBuffer.
t.shouldThrow('RangeError', f);
} else {
f();
const buffer = f();
const mapping = buffer.getMappedRange();
t.expect(mapping.byteLength === size, 'Mapping should be successful');
buffer.unmap();
t.expect(mapping.byteLength === 0, 'Mapping should be detached');
}
});
g.test('mappedAtCreation,smaller_getMappedRange')
.params(poptions('usage', kBufferUsages))
.fn(async t => {
const { usage } = t.params;
const size = MAX_ALIGNED_SAFE_INTEGER;
const buffer = t.expectGPUError('out-of-memory', () =>
t.device.createBuffer({ mappedAtCreation: true, size, usage })
);
// Smaller range inside a too-big mapping
const mapping = buffer.getMappedRange(0, 16);
t.expect(mapping.byteLength === 16);
buffer.unmap();
t.expect(mapping.byteLength === 0);
});

View file

@ -1,6 +1,6 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { assert } from '../../../../common/framework/util/util.js';
**/ import { assert } from '../../../../common/util/util.js';
import { GPUTest } from '../../../gpu_test.js';
export class MappingTest extends GPUTest {
checkMapWrite(buffer, offset, mappedContents, size) {
@ -14,7 +14,7 @@ export class MappingTest extends GPUTest {
}
buffer.unmap();
this.expectContents(buffer, expected, offset);
this.expectGPUBufferValuesEqual(buffer, expected, offset);
}
checkMapWriteZeroed(arrayBuffer, expectedSize) {

View file

@ -0,0 +1,30 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Tests for valid operations with various client-side thread-shared state of GPUBuffers.
States to test:
- mapping pending
- mapped
- mapped at creation
- mapped at creation, then unmapped
- mapped at creation, then unmapped, then re-mapped
- destroyed
TODO: Look for more things to test.
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { GPUTest } from '../../../gpu_test.js';
export const g = makeTestGroup(GPUTest);
g.test('serialize')
.desc(
`Copy a GPUBuffer to another thread while it is in various states on
{the sending thread, yet another thread}.`
)
.unimplemented();
g.test('destroyed')
.desc(`Destroy on one thread while in various states in another thread.`)
.unimplemented();

View file

@ -4,12 +4,101 @@
Basic tests.
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { memcpy } from '../../../../common/util/util.js';
import { GPUTest } from '../../../gpu_test.js';
export const g = makeTestGroup(GPUTest);
g.test('empty').fn(async t => {
g.test('empty').fn(t => {
const encoder = t.device.createCommandEncoder();
const cmd = encoder.finish();
t.device.defaultQueue.submit([cmd]);
t.device.queue.submit([cmd]);
});
g.test('b2t2b').fn(t => {
const data = new Uint32Array([0x01020304]);
const src = t.device.createBuffer({
mappedAtCreation: true,
size: 4,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
});
memcpy({ src: data }, { dst: src.getMappedRange() });
src.unmap();
const dst = t.device.createBuffer({
size: 4,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
});
const mid = t.device.createTexture({
size: { width: 1, height: 1, depthOrArrayLayers: 1 },
format: 'rgba8uint',
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
});
const encoder = t.device.createCommandEncoder();
encoder.copyBufferToTexture(
{ buffer: src, bytesPerRow: 256 },
{ texture: mid, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
{ width: 1, height: 1, depthOrArrayLayers: 1 }
);
encoder.copyTextureToBuffer(
{ texture: mid, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
{ buffer: dst, bytesPerRow: 256 },
{ width: 1, height: 1, depthOrArrayLayers: 1 }
);
t.device.queue.submit([encoder.finish()]);
t.expectGPUBufferValuesEqual(dst, data);
});
g.test('b2t2t2b').fn(t => {
const data = new Uint32Array([0x01020304]);
const src = t.device.createBuffer({
mappedAtCreation: true,
size: 4,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
});
memcpy({ src: data }, { dst: src.getMappedRange() });
src.unmap();
const dst = t.device.createBuffer({
size: 4,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
});
const midDesc = {
size: { width: 1, height: 1, depthOrArrayLayers: 1 },
format: 'rgba8uint',
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
};
const mid1 = t.device.createTexture(midDesc);
const mid2 = t.device.createTexture(midDesc);
const encoder = t.device.createCommandEncoder();
encoder.copyBufferToTexture(
{ buffer: src, bytesPerRow: 256 },
{ texture: mid1, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
{ width: 1, height: 1, depthOrArrayLayers: 1 }
);
encoder.copyTextureToTexture(
{ texture: mid1, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
{ texture: mid2, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
{ width: 1, height: 1, depthOrArrayLayers: 1 }
);
encoder.copyTextureToBuffer(
{ texture: mid2, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
{ buffer: dst, bytesPerRow: 256 },
{ width: 1, height: 1, depthOrArrayLayers: 1 }
);
t.device.queue.submit([encoder.finish()]);
t.expectGPUBufferValuesEqual(dst, data);
});

View file

@ -0,0 +1,55 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
API operations tests for clearBuffer.
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { GPUTest } from '../../../gpu_test.js';
export const g = makeTestGroup(GPUTest);
g.test('clear')
.desc(
`Validate the correctness of the clear by filling the srcBuffer with testable data, doing
clearBuffer(), and verifying the content of the whole srcBuffer with MapRead:
Clear {4 bytes, part of, the whole} buffer {with, without} a non-zero valid offset that
- covers the whole buffer
- covers the beginning of the buffer
- covers the end of the buffer
- covers neither the beginning nor the end of the buffer`
)
.paramsSubcasesOnly(u =>
u //
.combine('offset', [0, 4, 8, 16, undefined])
.combine('size', [0, 4, 8, 16, undefined])
.expand('bufferSize', p => [
(p.offset ?? 0) + (p.size ?? 16),
(p.offset ?? 0) + (p.size ?? 16) + 8,
])
)
.fn(t => {
const { offset, size, bufferSize } = t.params;
const bufferData = new Uint8Array(bufferSize);
for (let i = 0; i < bufferSize; ++i) {
bufferData[i] = i + 1;
}
const buffer = t.makeBufferWithContents(
bufferData,
GPUBufferUsage.COPY_DST | GPUBufferUsage.COPY_SRC
);
const encoder = t.device.createCommandEncoder();
encoder.clearBuffer(buffer, offset, size);
t.device.queue.submit([encoder.finish()]);
const expectOffset = offset ?? 0;
const expectSize = size ?? bufferSize - expectOffset;
for (let i = 0; i < expectSize; ++i) {
bufferData[expectOffset + i] = 0;
}
t.expectGPUBufferValuesEqual(buffer, bufferData);
});

View file

@ -1,124 +0,0 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
copy{Buffer,Texture}To{Buffer,Texture} tests.
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { GPUTest } from '../../../gpu_test.js';
export const g = makeTestGroup(GPUTest);
g.test('b2b').fn(async t => {
const data = new Uint32Array([0x01020304]);
const src = t.device.createBuffer({
mappedAtCreation: true,
size: 4,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
});
new Uint32Array(src.getMappedRange()).set(data);
src.unmap();
const dst = t.device.createBuffer({
size: 4,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
});
const encoder = t.device.createCommandEncoder();
encoder.copyBufferToBuffer(src, 0, dst, 0, 4);
t.device.defaultQueue.submit([encoder.finish()]);
t.expectContents(dst, data);
});
g.test('b2t2b').fn(async t => {
const data = new Uint32Array([0x01020304]);
const src = t.device.createBuffer({
mappedAtCreation: true,
size: 4,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
});
new Uint32Array(src.getMappedRange()).set(data);
src.unmap();
const dst = t.device.createBuffer({
size: 4,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
});
const mid = t.device.createTexture({
size: { width: 1, height: 1, depth: 1 },
format: 'rgba8uint',
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
});
const encoder = t.device.createCommandEncoder();
encoder.copyBufferToTexture(
{ buffer: src, bytesPerRow: 256 },
{ texture: mid, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
{ width: 1, height: 1, depth: 1 }
);
encoder.copyTextureToBuffer(
{ texture: mid, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
{ buffer: dst, bytesPerRow: 256 },
{ width: 1, height: 1, depth: 1 }
);
t.device.defaultQueue.submit([encoder.finish()]);
t.expectContents(dst, data);
});
g.test('b2t2t2b').fn(async t => {
const data = new Uint32Array([0x01020304]);
const src = t.device.createBuffer({
mappedAtCreation: true,
size: 4,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
});
new Uint32Array(src.getMappedRange()).set(data);
src.unmap();
const dst = t.device.createBuffer({
size: 4,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
});
const midDesc = {
size: { width: 1, height: 1, depth: 1 },
format: 'rgba8uint',
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
};
const mid1 = t.device.createTexture(midDesc);
const mid2 = t.device.createTexture(midDesc);
const encoder = t.device.createCommandEncoder();
encoder.copyBufferToTexture(
{ buffer: src, bytesPerRow: 256 },
{ texture: mid1, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
{ width: 1, height: 1, depth: 1 }
);
encoder.copyTextureToTexture(
{ texture: mid1, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
{ texture: mid2, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
{ width: 1, height: 1, depth: 1 }
);
encoder.copyTextureToBuffer(
{ texture: mid2, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
{ buffer: dst, bytesPerRow: 256 },
{ width: 1, height: 1, depth: 1 }
);
t.device.defaultQueue.submit([encoder.finish()]);
t.expectContents(dst, data);
});

View file

@ -0,0 +1,110 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = 'copyBufferToBuffer operation tests';
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { GPUTest } from '../../../gpu_test.js';
export const g = makeTestGroup(GPUTest);
g.test('single')
.desc(
`Validate the correctness of the copy by filling the srcBuffer with testable data, doing
CopyBufferToBuffer() copy, and verifying the content of the whole dstBuffer with MapRead:
Copy {4 bytes, part of, the whole} srcBuffer to the dstBuffer {with, without} a non-zero valid
srcOffset that
- covers the whole dstBuffer
- covers the beginning of the dstBuffer
- covers the end of the dstBuffer
- covers neither the beginning nor the end of the dstBuffer`
)
.paramsSubcasesOnly(u =>
u //
.combine('srcOffset', [0, 4, 8, 16])
.combine('dstOffset', [0, 4, 8, 16])
.combine('copySize', [0, 4, 8, 16])
.expand('srcBufferSize', p => [p.srcOffset + p.copySize, p.srcOffset + p.copySize + 8])
.expand('dstBufferSize', p => [p.dstOffset + p.copySize, p.dstOffset + p.copySize + 8])
)
.fn(t => {
const { srcOffset, dstOffset, copySize, srcBufferSize, dstBufferSize } = t.params;
const srcData = new Uint8Array(srcBufferSize);
for (let i = 0; i < srcBufferSize; ++i) {
srcData[i] = i + 1;
}
const src = t.makeBufferWithContents(srcData, GPUBufferUsage.COPY_SRC);
const dst = t.device.createBuffer({
size: dstBufferSize,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
});
t.trackForCleanup(dst);
const encoder = t.device.createCommandEncoder();
encoder.copyBufferToBuffer(src, srcOffset, dst, dstOffset, copySize);
t.device.queue.submit([encoder.finish()]);
const expectedDstData = new Uint8Array(dstBufferSize);
for (let i = 0; i < copySize; ++i) {
expectedDstData[dstOffset + i] = srcData[srcOffset + i];
}
t.expectGPUBufferValuesEqual(dst, expectedDstData);
});
g.test('state_transitions')
.desc(
`Test proper state transitions/barriers happen between copy commands.
Copy part of src to dst, then a different part of dst to src, and check contents of both.`
)
.fn(t => {
const srcData = new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8]);
const dstData = new Uint8Array([10, 20, 30, 40, 50, 60, 70, 80]);
const src = t.makeBufferWithContents(
srcData,
GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST
);
const dst = t.makeBufferWithContents(
dstData,
GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST
);
const encoder = t.device.createCommandEncoder();
encoder.copyBufferToBuffer(src, 0, dst, 4, 4);
encoder.copyBufferToBuffer(dst, 0, src, 4, 4);
t.device.queue.submit([encoder.finish()]);
const expectedSrcData = new Uint8Array([1, 2, 3, 4, 10, 20, 30, 40]);
const expectedDstData = new Uint8Array([10, 20, 30, 40, 1, 2, 3, 4]);
t.expectGPUBufferValuesEqual(src, expectedSrcData);
t.expectGPUBufferValuesEqual(dst, expectedDstData);
});
g.test('copy_order')
.desc(
`Test copy commands in one command buffer occur in the correct order.
First copies one region from src to dst, then another region from src to an overlapping region
of dst, then checks the dst buffer's contents.`
)
.fn(t => {
const srcData = new Uint32Array([1, 2, 3, 4, 5, 6, 7, 8]);
const src = t.makeBufferWithContents(srcData, GPUBufferUsage.COPY_SRC);
const dst = t.device.createBuffer({
size: srcData.length * 4,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
});
t.trackForCleanup(dst);
const encoder = t.device.createCommandEncoder();
encoder.copyBufferToBuffer(src, 0, dst, 0, 16);
encoder.copyBufferToBuffer(src, 16, dst, 8, 16);
t.device.queue.submit([encoder.finish()]);
const expectedDstData = new Uint32Array([1, 2, 5, 6, 7, 8, 0, 0]);
t.expectGPUBufferValuesEqual(dst, expectedDstData);
});

View file

@ -0,0 +1,144 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { unreachable } from '../../../../../common/util/util.js';
import { GPUTest } from '../../../../gpu_test.js';
export class ProgrammableStateTest extends GPUTest {
commonBindGroupLayouts = new Map();
getBindGroupLayout(type) {
if (!this.commonBindGroupLayouts.has(type)) {
this.commonBindGroupLayouts.set(
type,
this.device.createBindGroupLayout({
entries: [
{
binding: 0,
visibility: GPUShaderStage.COMPUTE | GPUShaderStage.FRAGMENT,
buffer: { type },
},
],
})
);
}
return this.commonBindGroupLayouts.get(type);
}
getBindGroupLayouts(indices) {
const bindGroupLayouts = [];
bindGroupLayouts[indices.a] = this.getBindGroupLayout('read-only-storage');
bindGroupLayouts[indices.b] = this.getBindGroupLayout('read-only-storage');
bindGroupLayouts[indices.out] = this.getBindGroupLayout('storage');
return bindGroupLayouts;
}
createBindGroup(buffer, type) {
return this.device.createBindGroup({
layout: this.getBindGroupLayout(type),
entries: [{ binding: 0, resource: { buffer } }],
});
}
setBindGroup(encoder, index, factory) {
encoder.setBindGroup(index, factory(index));
}
// Create a compute pipeline that performs an operation on data from two bind groups,
// then writes the result to a third bind group.
createBindingStatePipeline(encoderType, groups, algorithm = 'a.value - b.value') {
switch (encoderType) {
case 'compute pass': {
const wgsl = `struct Data {
value : i32
};
@group(${groups.a}) @binding(0) var<storage> a : Data;
@group(${groups.b}) @binding(0) var<storage> b : Data;
@group(${groups.out}) @binding(0) var<storage, read_write> out : Data;
@compute @workgroup_size(1) fn main() {
out.value = ${algorithm};
return;
}
`;
return this.device.createComputePipeline({
layout: this.device.createPipelineLayout({
bindGroupLayouts: this.getBindGroupLayouts(groups),
}),
compute: {
module: this.device.createShaderModule({
code: wgsl,
}),
entryPoint: 'main',
},
});
}
case 'render pass':
case 'render bundle': {
const wgslShaders = {
vertex: `
@vertex fn vert_main() -> @builtin(position) vec4<f32> {
return vec4<f32>(0.5, 0.5, 0.0, 1.0);
}
`,
fragment: `
struct Data {
value : i32
};
@group(${groups.a}) @binding(0) var<storage> a : Data;
@group(${groups.b}) @binding(0) var<storage> b : Data;
@group(${groups.out}) @binding(0) var<storage, read_write> out : Data;
@fragment fn frag_main() -> @location(0) vec4<f32> {
out.value = ${algorithm};
return vec4<f32>(1.0, 0.0, 0.0, 1.0);
}
`,
};
return this.device.createRenderPipeline({
layout: this.device.createPipelineLayout({
bindGroupLayouts: this.getBindGroupLayouts(groups),
}),
vertex: {
module: this.device.createShaderModule({
code: wgslShaders.vertex,
}),
entryPoint: 'vert_main',
},
fragment: {
module: this.device.createShaderModule({
code: wgslShaders.fragment,
}),
entryPoint: 'frag_main',
targets: [{ format: 'rgba8unorm' }],
},
primitive: { topology: 'point-list' },
});
}
default:
unreachable();
}
}
setPipeline(pass, pipeline) {
if (pass instanceof GPUComputePassEncoder) {
pass.setPipeline(pipeline);
} else if (pass instanceof GPURenderPassEncoder || pass instanceof GPURenderBundleEncoder) {
pass.setPipeline(pipeline);
}
}
dispatchOrDraw(pass) {
if (pass instanceof GPUComputePassEncoder) {
pass.dispatchWorkgroups(1);
} else if (pass instanceof GPURenderPassEncoder) {
pass.draw(1);
} else if (pass instanceof GPURenderBundleEncoder) {
pass.draw(1);
}
}
}

View file

@ -0,0 +1,319 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Ensure state is set correctly. Tries to stress state caching (setting different states multiple
times in different orders) for setBindGroup and setPipeline.
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUConst } from '../../../../constants.js';
import { kProgrammableEncoderTypes } from '../../../../util/command_buffer_maker.js';
import { ProgrammableStateTest } from './programmable_state_test.js';
export const g = makeTestGroup(ProgrammableStateTest);
const kBufferUsage = GPUConst.BufferUsage.COPY_SRC | GPUConst.BufferUsage.STORAGE;
g.test('bind_group_indices')
.desc(
`
Test that bind group indices can be declared in any order, regardless of their order in the shader.
- Test places the value of buffer a - buffer b into the out buffer, then reads the result.
`
)
.params(u =>
u //
.combine('encoderType', kProgrammableEncoderTypes)
.beginSubcases()
.combine('groupIndices', [
{ a: 0, b: 1, out: 2 },
{ a: 1, b: 2, out: 0 },
{ a: 2, b: 0, out: 1 },
{ a: 0, b: 2, out: 1 },
{ a: 2, b: 1, out: 0 },
{ a: 1, b: 0, out: 2 },
])
)
.fn(t => {
const { encoderType, groupIndices } = t.params;
const pipeline = t.createBindingStatePipeline(encoderType, groupIndices);
const out = t.makeBufferWithContents(new Int32Array([0]), kBufferUsage);
const bindGroups = {
a: t.createBindGroup(
t.makeBufferWithContents(new Int32Array([3]), kBufferUsage),
'read-only-storage'
),
b: t.createBindGroup(
t.makeBufferWithContents(new Int32Array([2]), kBufferUsage),
'read-only-storage'
),
out: t.createBindGroup(out, 'storage'),
};
const { encoder, validateFinishAndSubmit } = t.createEncoder(encoderType);
t.setPipeline(encoder, pipeline);
encoder.setBindGroup(groupIndices.a, bindGroups.a);
encoder.setBindGroup(groupIndices.b, bindGroups.b);
encoder.setBindGroup(groupIndices.out, bindGroups.out);
t.dispatchOrDraw(encoder);
validateFinishAndSubmit(true, true);
t.expectGPUBufferValuesEqual(out, new Int32Array([1]));
});
g.test('bind_group_order')
.desc(
`
Test that the order in which you set the bind groups doesn't matter.
`
)
.params(u =>
u //
.combine('encoderType', kProgrammableEncoderTypes)
.beginSubcases()
.combine('setOrder', [
['a', 'b', 'out'],
['b', 'out', 'a'],
['out', 'a', 'b'],
['b', 'a', 'out'],
['a', 'out', 'b'],
['out', 'b', 'a'],
])
)
.fn(t => {
const { encoderType, setOrder } = t.params;
const groupIndices = { a: 0, b: 1, out: 2 };
const pipeline = t.createBindingStatePipeline(encoderType, groupIndices);
const out = t.makeBufferWithContents(new Int32Array([0]), kBufferUsage);
const bindGroups = {
a: t.createBindGroup(
t.makeBufferWithContents(new Int32Array([3]), kBufferUsage),
'read-only-storage'
),
b: t.createBindGroup(
t.makeBufferWithContents(new Int32Array([2]), kBufferUsage),
'read-only-storage'
),
out: t.createBindGroup(out, 'storage'),
};
const { encoder, validateFinishAndSubmit } = t.createEncoder(encoderType);
t.setPipeline(encoder, pipeline);
for (const bindingName of setOrder) {
encoder.setBindGroup(groupIndices[bindingName], bindGroups[bindingName]);
}
t.dispatchOrDraw(encoder);
validateFinishAndSubmit(true, true);
t.expectGPUBufferValuesEqual(out, new Int32Array([1]));
});
g.test('bind_group_before_pipeline')
.desc(
`
Test that setting bind groups prior to setting the pipeline is still valid.
`
)
.params(u =>
u //
.combine('encoderType', kProgrammableEncoderTypes)
.beginSubcases()
.combineWithParams([
{ setBefore: ['a', 'b'], setAfter: ['out'] },
{ setBefore: ['a'], setAfter: ['b', 'out'] },
{ setBefore: ['out', 'b'], setAfter: ['a'] },
{ setBefore: ['a', 'b', 'out'], setAfter: [] },
])
)
.fn(t => {
const { encoderType, setBefore, setAfter } = t.params;
const groupIndices = { a: 0, b: 1, out: 2 };
const pipeline = t.createBindingStatePipeline(encoderType, groupIndices);
const out = t.makeBufferWithContents(new Int32Array([0]), kBufferUsage);
const bindGroups = {
a: t.createBindGroup(
t.makeBufferWithContents(new Int32Array([3]), kBufferUsage),
'read-only-storage'
),
b: t.createBindGroup(
t.makeBufferWithContents(new Int32Array([2]), kBufferUsage),
'read-only-storage'
),
out: t.createBindGroup(out, 'storage'),
};
const { encoder, validateFinishAndSubmit } = t.createEncoder(encoderType);
for (const bindingName of setBefore) {
encoder.setBindGroup(groupIndices[bindingName], bindGroups[bindingName]);
}
t.setPipeline(encoder, pipeline);
for (const bindingName of setAfter) {
encoder.setBindGroup(groupIndices[bindingName], bindGroups[bindingName]);
}
t.dispatchOrDraw(encoder);
validateFinishAndSubmit(true, true);
t.expectGPUBufferValuesEqual(out, new Int32Array([1]));
});
g.test('one_bind_group_multiple_slots')
.desc(
`
Test that a single bind group may be bound to more than one slot.
`
)
.params(u =>
u //
.combine('encoderType', kProgrammableEncoderTypes)
)
.fn(t => {
const { encoderType } = t.params;
const pipeline = t.createBindingStatePipeline(encoderType, { a: 0, b: 1, out: 2 });
const out = t.makeBufferWithContents(new Int32Array([1]), kBufferUsage);
const bindGroups = {
ab: t.createBindGroup(
t.makeBufferWithContents(new Int32Array([3]), kBufferUsage),
'read-only-storage'
),
out: t.createBindGroup(out, 'storage'),
};
const { encoder, validateFinishAndSubmit } = t.createEncoder(encoderType);
t.setPipeline(encoder, pipeline);
encoder.setBindGroup(0, bindGroups.ab);
encoder.setBindGroup(1, bindGroups.ab);
encoder.setBindGroup(2, bindGroups.out);
t.dispatchOrDraw(encoder);
validateFinishAndSubmit(true, true);
t.expectGPUBufferValuesEqual(out, new Int32Array([0]));
});
g.test('bind_group_multiple_sets')
.desc(
`
Test that the last bind group set to a given slot is used when dispatching.
`
)
.params(u =>
u //
.combine('encoderType', kProgrammableEncoderTypes)
)
.fn(t => {
const { encoderType } = t.params;
const pipeline = t.createBindingStatePipeline(encoderType, { a: 0, b: 1, out: 2 });
const badOut = t.makeBufferWithContents(new Int32Array([-1]), kBufferUsage);
const out = t.makeBufferWithContents(new Int32Array([0]), kBufferUsage);
const bindGroups = {
a: t.createBindGroup(
t.makeBufferWithContents(new Int32Array([3]), kBufferUsage),
'read-only-storage'
),
b: t.createBindGroup(
t.makeBufferWithContents(new Int32Array([2]), kBufferUsage),
'read-only-storage'
),
c: t.createBindGroup(
t.makeBufferWithContents(new Int32Array([5]), kBufferUsage),
'read-only-storage'
),
badOut: t.createBindGroup(badOut, 'storage'),
out: t.createBindGroup(out, 'storage'),
};
const { encoder, validateFinishAndSubmit } = t.createEncoder(encoderType);
encoder.setBindGroup(1, bindGroups.c);
t.setPipeline(encoder, pipeline);
encoder.setBindGroup(0, bindGroups.c);
encoder.setBindGroup(0, bindGroups.a);
encoder.setBindGroup(2, bindGroups.badOut);
encoder.setBindGroup(1, bindGroups.b);
encoder.setBindGroup(2, bindGroups.out);
t.dispatchOrDraw(encoder);
validateFinishAndSubmit(true, true);
t.expectGPUBufferValuesEqual(out, new Int32Array([1]));
t.expectGPUBufferValuesEqual(badOut, new Int32Array([-1]));
});
g.test('compatible_pipelines')
.desc('Test that bind groups can be shared between compatible pipelines.')
.params(u =>
u //
.combine('encoderType', kProgrammableEncoderTypes)
)
.fn(t => {
const { encoderType } = t.params;
const pipelineA = t.createBindingStatePipeline(encoderType, { a: 0, b: 1, out: 2 });
const pipelineB = t.createBindingStatePipeline(
encoderType,
{ a: 0, b: 1, out: 2 },
'a.value + b.value'
);
const outA = t.makeBufferWithContents(new Int32Array([0]), kBufferUsage);
const outB = t.makeBufferWithContents(new Int32Array([0]), kBufferUsage);
const bindGroups = {
a: t.createBindGroup(
t.makeBufferWithContents(new Int32Array([3]), kBufferUsage),
'read-only-storage'
),
b: t.createBindGroup(
t.makeBufferWithContents(new Int32Array([2]), kBufferUsage),
'read-only-storage'
),
outA: t.createBindGroup(outA, 'storage'),
outB: t.createBindGroup(outB, 'storage'),
};
const { encoder, validateFinishAndSubmit } = t.createEncoder(encoderType);
encoder.setBindGroup(0, bindGroups.a);
encoder.setBindGroup(1, bindGroups.b);
t.setPipeline(encoder, pipelineA);
encoder.setBindGroup(2, bindGroups.outA);
t.dispatchOrDraw(encoder);
t.setPipeline(encoder, pipelineB);
encoder.setBindGroup(2, bindGroups.outB);
t.dispatchOrDraw(encoder);
validateFinishAndSubmit(true, true);
t.expectGPUBufferValuesEqual(outA, new Int32Array([1]));
t.expectGPUBufferValuesEqual(outB, new Int32Array([5]));
});

View file

@ -0,0 +1,985 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
API operations tests for occlusion queries.
- test query with
- scissor
- sample mask
- alpha to coverage
- stencil
- depth test
- test empty query (no draw) (should be cleared?)
- test via render bundle
- test resolveQuerySet with non-zero firstIndex
- test no queries is zero
- test 0x0 -> 0x3 sample mask
- test 0 -> 1 alpha to coverage
- test resolving twice in same pass keeps values
- test resolving twice across pass keeps values
- test resolveQuerySet destinationOffset
`;
import { kUnitCaseParamsBuilder } from '../../../../../common/framework/params_builder.js';
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { assert, range, unreachable } from '../../../../../common/util/util.js';
import { kMaxQueryCount } from '../../../../capability_info.js';
import { GPUTest } from '../../../../gpu_test.js';
const kRequiredQueryBufferOffsetAlignment = 256;
const kBytesPerQuery = 8;
const kTextureSize = [4, 4];
const kRenderModes = ['direct', 'render-bundle'];
const kBufferOffsets = ['zero', 'non-zero'];
/**
* This class helps use a render pass encoder or a render bundle encoder
* in the correct way given the order that operations must happen, in order to be
* compatible across both paths.
*/
class RenderPassHelper {
constructor(pass, helper) {
this._pass = pass;
this._helper = helper;
}
setScissorRect(x, y, width, height) {
assert(!this._queryHelper);
this._pass.setScissorRect(x, y, width, height);
}
setStencilReference(ref) {
assert(!this._queryHelper);
this._pass.setStencilReference(ref);
}
beginOcclusionQuery(queryIndex) {
assert(!this._queryHelper);
this._pass.beginOcclusionQuery(queryIndex);
this._queryHelper = this._helper.begin(() => {
assert(!!this._queryHelper);
this._queryHelper = undefined;
this._pass.endOcclusionQuery();
});
return this._queryHelper;
}
}
/**
* Helper class for using a render pass encoder directly
*/
class QueryHelperDirect {
constructor(pass, endFn) {
this._pass = pass;
this._endFn = endFn;
}
setPipeline(pipeline) {
assert(!!this._pass);
this._pass.setPipeline(pipeline);
}
setVertexBuffer(buffer) {
assert(!!this._pass);
this._pass.setVertexBuffer(0, buffer);
}
draw(count) {
assert(!!this._pass);
this._pass.draw(count);
}
end() {
// make this object impossible to use after calling end
const fn = this._endFn;
this._endFn = unreachable;
this._pass = undefined;
fn();
}
}
/**
* Helper class for starting a query on a render pass encoder directly
*/
class QueryStarterDirect {
constructor(pass) {
this._pass = pass;
}
begin(endFn) {
assert(!this._helper);
this._helper = new QueryHelperDirect(this._pass, () => {
this._helper = undefined;
endFn();
});
return this._helper;
}
}
/**
* Helper class for using a render bundle encoder.
*/
class QueryHelperRenderBundle {
constructor(pass, endFn) {
this._encoder = pass;
this._endFn = endFn;
}
setPipeline(pipeline) {
assert(!!this._encoder);
this._encoder.setPipeline(pipeline);
}
setVertexBuffer(buffer) {
assert(!!this._encoder);
this._encoder.setVertexBuffer(0, buffer);
}
draw(count) {
assert(!!this._encoder);
this._encoder.draw(count);
}
end() {
// make this object impossible to use after calling end
const fn = this._endFn;
this._endFn = unreachable;
this._encoder = undefined;
fn();
}
}
/**
* Helper class for starting a query on a render bundle encoder
*/
class QueryStarterRenderBundle {
constructor(device, pass, renderPassDescriptor) {
this._device = device;
this._pass = pass;
const colorAttachment = renderPassDescriptor.colorAttachments[0];
this._renderBundleEncoderDescriptor = {
colorFormats: ['rgba8unorm'],
depthStencilFormat: renderPassDescriptor.depthStencilAttachment?.depthLoadOp
? 'depth24plus'
: renderPassDescriptor.depthStencilAttachment?.stencilLoadOp
? 'stencil8'
: undefined,
sampleCount: colorAttachment.resolveTarget ? 4 : 1,
};
}
begin(endFn) {
assert(!this._encoder);
this._encoder = this._device.createRenderBundleEncoder(this._renderBundleEncoderDescriptor);
this._helper = new QueryHelperRenderBundle(this._encoder, () => {
assert(!!this._encoder);
assert(!!this._helper);
this._pass.executeBundles([this._encoder.finish()]);
this._helper = undefined;
this._encoder = undefined;
endFn();
});
return this._helper;
}
setPipeline(pipeline) {
assert(!!this._encoder);
this._encoder.setPipeline(pipeline);
}
setVertexBuffer(buffer) {
assert(!!this._encoder);
this._encoder.setVertexBuffer(0, buffer);
}
draw(count) {
assert(!!this._encoder);
this._encoder.draw(count);
}
}
class OcclusionQueryTest extends GPUTest {
createBuffer(desc) {
return this.trackForCleanup(this.device.createBuffer(desc));
}
createTexture(desc) {
return this.trackForCleanup(this.device.createTexture(desc));
}
createQuerySet(desc) {
return this.trackForCleanup(this.device.createQuerySet(desc));
}
createVertexBuffer(data) {
return this.makeBufferWithContents(data, GPUBufferUsage.VERTEX);
}
createSingleTriangleVertexBuffer(z) {
return this.createVertexBuffer(new Float32Array([-0.5, -0.5, z, 0.5, -0.5, z, -0.5, 0.5, z]));
}
async readBufferAsBigUint64(buffer) {
await buffer.mapAsync(GPUMapMode.READ);
const result = new BigUint64Array(buffer.getMappedRange().slice(0));
buffer.unmap();
return result;
}
setup(params) {
const {
numQueries,
depthStencilFormat,
sampleMask = 0xffffffff,
alpha,
sampleCount,
writeMask = 0xf,
bufferOffset,
renderMode,
} = params;
const { device } = this;
const queryResolveBufferOffset =
bufferOffset === 'non-zero' ? kRequiredQueryBufferOffsetAlignment : 0;
const queryResolveBuffer = this.createBuffer({
size: numQueries * 8 + queryResolveBufferOffset,
usage: GPUBufferUsage.QUERY_RESOLVE | GPUBufferUsage.COPY_SRC,
});
const readBuffer = this.createBuffer({
size: numQueries * kBytesPerQuery,
usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
});
const vertexBuffer = this.createSingleTriangleVertexBuffer(0);
const renderTargetTexture = this.createTexture({
format: 'rgba8unorm',
size: kTextureSize,
usage: GPUTextureUsage.RENDER_ATTACHMENT,
});
const multisampleRenderTarget = sampleCount
? this.createTexture({
size: kTextureSize,
format: 'rgba8unorm',
usage: GPUTextureUsage.RENDER_ATTACHMENT,
sampleCount,
})
: null;
const depthStencilTexture = depthStencilFormat
? this.createTexture({
format: depthStencilFormat,
size: kTextureSize,
usage: GPUTextureUsage.RENDER_ATTACHMENT,
})
: undefined;
const module = device.createShaderModule({
code: `
@vertex fn vs(@location(0) pos: vec4f) -> @builtin(position) vec4f {
return pos;
}
@fragment fn fs() -> @location(0) vec4f {
return vec4f(0, 0, 0, ${alpha === undefined ? 1 : alpha});
}
`,
});
const haveDepth = !!depthStencilFormat && depthStencilFormat.includes('depth');
const haveStencil = !!depthStencilFormat && depthStencilFormat.includes('stencil');
assert(!(haveDepth && haveStencil), 'code does not handle mixed depth-stencil');
const pipeline = device.createRenderPipeline({
layout: 'auto',
vertex: {
module,
entryPoint: 'vs',
buffers: [
{
arrayStride: 3 * 4,
attributes: [
{
shaderLocation: 0,
offset: 0,
format: 'float32x3',
},
],
},
],
},
fragment: {
module,
entryPoint: 'fs',
targets: [{ format: 'rgba8unorm', writeMask }],
},
...(sampleCount && {
multisample: {
count: sampleCount,
mask: alpha === undefined ? sampleMask : 0xffffffff,
alphaToCoverageEnabled: alpha !== undefined,
},
}),
...(depthStencilTexture && {
depthStencil: {
format: depthStencilFormat,
depthWriteEnabled: haveDepth,
depthCompare: haveDepth ? 'less-equal' : 'always',
...(haveStencil && {
stencilFront: {
compare: 'equal',
},
}),
},
}),
});
const querySetOffset = params?.querySetOffset === 'non-zero' ? 7 : 0;
const occlusionQuerySet = this.createQuerySet({
type: 'occlusion',
count: numQueries + querySetOffset,
});
const renderPassDescriptor = {
colorAttachments: sampleCount
? [
{
view: multisampleRenderTarget.createView(),
resolveTarget: renderTargetTexture.createView(),
loadOp: 'clear',
storeOp: 'store',
},
]
: [
{
view: renderTargetTexture.createView(),
loadOp: 'clear',
storeOp: 'store',
},
],
...(haveDepth && {
depthStencilAttachment: {
view: depthStencilTexture.createView(),
depthLoadOp: 'clear',
depthStoreOp: 'store',
depthClearValue: 0.5,
},
}),
...(haveStencil && {
depthStencilAttachment: {
view: depthStencilTexture.createView(),
stencilClearValue: 0,
stencilLoadOp: 'clear',
stencilStoreOp: 'store',
},
}),
occlusionQuerySet,
};
return {
readBuffer,
vertexBuffer,
queryResolveBuffer,
queryResolveBufferOffset,
occlusionQuerySet,
renderTargetTexture,
renderPassDescriptor,
pipeline,
depthStencilTexture,
querySetOffset,
renderMode,
};
}
async runQueryTest(resources, renderPassDescriptor, encodePassFn, checkQueryIndexResultFn) {
const { device } = this;
const {
readBuffer,
queryResolveBuffer,
queryResolveBufferOffset,
occlusionQuerySet,
querySetOffset,
renderMode = 'direct',
} = resources;
const numQueries = occlusionQuerySet.count - querySetOffset;
const queryIndices = range(numQueries, i => i + querySetOffset);
const encoder = device.createCommandEncoder();
if (renderPassDescriptor) {
const pass = encoder.beginRenderPass(renderPassDescriptor);
const helper = new RenderPassHelper(
pass,
renderMode === 'direct'
? new QueryStarterDirect(pass)
: new QueryStarterRenderBundle(device, pass, renderPassDescriptor)
);
for (const queryIndex of queryIndices) {
encodePassFn(helper, queryIndex);
}
pass.end();
}
encoder.resolveQuerySet(
occlusionQuerySet,
querySetOffset,
numQueries,
queryResolveBuffer,
queryResolveBufferOffset
);
encoder.copyBufferToBuffer(
queryResolveBuffer,
queryResolveBufferOffset,
readBuffer,
0,
readBuffer.size
);
device.queue.submit([encoder.finish()]);
const result = await this.readBufferAsBigUint64(readBuffer);
for (const queryIndex of queryIndices) {
const resultNdx = queryIndex - querySetOffset;
const passed = !!result[resultNdx];
checkQueryIndexResultFn(passed, queryIndex);
}
return result;
}
}
const kQueryTestBaseParams = kUnitCaseParamsBuilder
.combine('writeMask', [0xf, 0x0])
.combine('renderMode', kRenderModes)
.combine('bufferOffset', kBufferOffsets)
.combine('querySetOffset', kBufferOffsets);
export const g = makeTestGroup(OcclusionQueryTest);
g.test('occlusion_query,initial')
.desc(`Test getting contents of QuerySet without any queries.`)
.fn(async t => {
const kNumQueries = kMaxQueryCount;
const resources = t.setup({ numQueries: kNumQueries });
await t.runQueryTest(
resources,
null,
() => {},
passed => {
t.expect(!passed);
}
);
});
g.test('occlusion_query,basic')
.desc('Test all queries pass')
.params(kQueryTestBaseParams)
.fn(async t => {
const { writeMask, renderMode, bufferOffset, querySetOffset } = t.params;
const kNumQueries = 30;
const resources = t.setup({
writeMask,
renderMode,
bufferOffset,
querySetOffset,
numQueries: kNumQueries,
});
const { renderPassDescriptor, vertexBuffer, pipeline } = resources;
await t.runQueryTest(
resources,
renderPassDescriptor,
(helper, queryIndex) => {
const queryHelper = helper.beginOcclusionQuery(queryIndex);
queryHelper.setPipeline(pipeline);
queryHelper.setVertexBuffer(vertexBuffer);
queryHelper.draw(3);
queryHelper.end();
},
(passed, queryIndex) => {
const expectPassed = true;
t.expect(
!!passed === expectPassed,
`queryIndex: ${queryIndex}, was: ${!!passed}, expected: ${expectPassed}`
);
}
);
});
g.test('occlusion_query,empty')
.desc(
`
Test beginOcclusionQuery/endOcclusionQuery with nothing in between clears the queries
Calls beginOcclusionQuery/draw/endOcclusionQuery that should show passing fragments
and validates they passed. Then executes the same queries (same QuerySet) without drawing.
Those queries should have not passed.
`
)
.fn(async t => {
const kNumQueries = 30;
const resources = t.setup({ numQueries: kNumQueries });
const { vertexBuffer, renderPassDescriptor, pipeline } = resources;
const makeQueryRunner = draw => {
return (helper, queryIndex) => {
const queryHelper = helper.beginOcclusionQuery(queryIndex);
queryHelper.setPipeline(pipeline);
queryHelper.setVertexBuffer(vertexBuffer);
if (draw) {
queryHelper.draw(3);
}
queryHelper.end();
};
};
const makeQueryChecker = draw => {
return (passed, queryIndex) => {
const expectPassed = draw;
t.expect(
!!passed === expectPassed,
`draw: ${draw}, queryIndex: ${queryIndex}, was: ${!!passed}, expected: ${expectPassed}`
);
};
};
await t.runQueryTest(
resources,
renderPassDescriptor,
makeQueryRunner(true),
makeQueryChecker(true)
);
await t.runQueryTest(
resources,
renderPassDescriptor,
makeQueryRunner(false),
makeQueryChecker(false)
);
});
g.test('occlusion_query,scissor')
.desc(
`
Test beginOcclusionQuery/endOcclusionQuery using scissor to occlude
`
)
.params(kQueryTestBaseParams)
.fn(async t => {
const { writeMask, renderMode, bufferOffset, querySetOffset } = t.params;
const kNumQueries = 30;
const resources = t.setup({
writeMask,
renderMode,
bufferOffset,
querySetOffset,
numQueries: kNumQueries,
});
const { renderPassDescriptor, renderTargetTexture, vertexBuffer, pipeline } = resources;
const getScissorRect = i => {
const { width, height } = renderTargetTexture;
switch (i % 4) {
case 0: // whole target
return {
x: 0,
y: 0,
width,
height,
occluded: false,
name: 'whole target',
};
case 1: // center
return {
x: width / 4,
y: height / 4,
width: width / 2,
height: height / 2,
occluded: false,
name: 'center',
};
case 2: // none
return {
x: width / 4,
y: height / 4,
width: 0,
height: 0,
occluded: true,
name: 'none',
};
case 3: // top 1/4
return {
x: 0,
y: 0,
width,
height: height / 2,
occluded: true,
name: 'top quarter',
};
default:
unreachable();
}
};
await t.runQueryTest(
resources,
renderPassDescriptor,
(helper, queryIndex) => {
const { x, y, width, height } = getScissorRect(queryIndex);
helper.setScissorRect(x, y, width, height);
const queryHelper = helper.beginOcclusionQuery(queryIndex);
queryHelper.setPipeline(pipeline);
queryHelper.setVertexBuffer(vertexBuffer);
queryHelper.draw(3);
queryHelper.end();
},
(passed, queryIndex) => {
const { occluded, name: scissorCase } = getScissorRect(queryIndex);
const expectPassed = !occluded;
t.expect(
!!passed === expectPassed,
`queryIndex: ${queryIndex}, scissorCase: ${scissorCase}, was: ${!!passed}, expected: ${expectPassed}, ${name}`
);
}
);
});
g.test('occlusion_query,depth')
.desc(
`
Test beginOcclusionQuery/endOcclusionQuery using depth test to occlude
Compares depth against 0.5, with alternating vertex buffers which have a depth
of 0 and 1. When depth check passes, we expect non-zero successful fragments.
`
)
.params(kQueryTestBaseParams)
.fn(async t => {
const { writeMask, renderMode, bufferOffset, querySetOffset } = t.params;
const kNumQueries = 30;
const resources = t.setup({
writeMask,
renderMode,
bufferOffset,
querySetOffset,
numQueries: kNumQueries,
depthStencilFormat: 'depth24plus',
});
const { vertexBuffer: vertexBufferAtZ0, renderPassDescriptor, pipeline } = resources;
const vertexBufferAtZ1 = t.createSingleTriangleVertexBuffer(1);
await t.runQueryTest(
resources,
renderPassDescriptor,
(helper, queryIndex) => {
const queryHelper = helper.beginOcclusionQuery(queryIndex);
queryHelper.setPipeline(pipeline);
queryHelper.setVertexBuffer(queryIndex % 2 ? vertexBufferAtZ1 : vertexBufferAtZ0);
queryHelper.draw(3);
queryHelper.end();
},
(passed, queryIndex) => {
const expectPassed = queryIndex % 2 === 0;
t.expect(
!!passed === expectPassed,
`queryIndex: ${queryIndex}, was: ${!!passed}, expected: ${expectPassed}, ${name}`
);
}
);
});
g.test('occlusion_query,stencil')
.desc(
`
Test beginOcclusionQuery/endOcclusionQuery using stencil to occlude
Compares stencil against 0, with alternating stencil reference values of
of 0 and 1. When stencil test passes, we expect non-zero successful fragments.
`
)
.params(kQueryTestBaseParams)
.fn(async t => {
const { writeMask, renderMode, bufferOffset, querySetOffset } = t.params;
const kNumQueries = 30;
const resources = t.setup({
writeMask,
renderMode,
bufferOffset,
querySetOffset,
numQueries: kNumQueries,
depthStencilFormat: 'stencil8',
});
const { vertexBuffer, renderPassDescriptor, pipeline } = resources;
await t.runQueryTest(
resources,
renderPassDescriptor,
(helper, queryIndex) => {
helper.setStencilReference(queryIndex % 2);
const queryHelper = helper.beginOcclusionQuery(queryIndex);
queryHelper.setPipeline(pipeline);
queryHelper.setVertexBuffer(vertexBuffer);
queryHelper.draw(3);
queryHelper.end();
},
(passed, queryIndex) => {
const expectPassed = queryIndex % 2 === 0;
t.expect(
!!passed === expectPassed,
`queryIndex: ${queryIndex}, was: ${!!passed}, expected: ${expectPassed}, ${name}`
);
}
);
});
g.test('occlusion_query,sample_mask')
.desc(
`
Test beginOcclusionQuery/endOcclusionQuery using sample_mask to occlude
Set sampleMask to 0, 2, 4, 6 and draw quads in top right or bottom left corners of the texel.
If the corner we draw to matches the corner masked we expect non-zero successful fragments.
See: https://learn.microsoft.com/en-us/windows/win32/api/d3d11/ne-d3d11-d3d11_standard_multisample_quality_levels
`
)
.params(kQueryTestBaseParams.combine('sampleMask', [0, 2, 4, 6]))
.fn(async t => {
const { writeMask, renderMode, bufferOffset, querySetOffset, sampleMask } = t.params;
const kNumQueries = 30;
const sampleCount = 4;
const resources = t.setup({
writeMask,
renderMode,
bufferOffset,
querySetOffset,
numQueries: kNumQueries,
sampleCount,
sampleMask,
});
const { renderPassDescriptor, pipeline } = resources;
const createQuad = offset => {
return t.createVertexBuffer(
new Float32Array([
offset + 0,
offset + 0,
0,
offset + 0.25,
offset + 0,
0,
offset + 0,
offset + 0.25,
0,
offset + 0,
offset + 0.25,
0,
offset + 0.25,
offset + 0,
0,
offset + 0.25,
offset + 0.25,
0,
])
);
};
const vertexBufferBL = createQuad(0);
const vertexBufferTR = createQuad(0.25);
await t.runQueryTest(
resources,
renderPassDescriptor,
(helper, queryIndex) => {
const queryHelper = helper.beginOcclusionQuery(queryIndex);
queryHelper.setPipeline(pipeline);
queryHelper.setVertexBuffer(queryIndex % 2 ? vertexBufferTR : vertexBufferBL);
queryHelper.draw(6);
queryHelper.end();
},
(passed, queryIndex) => {
// Above we draw to a specific corner (sample) of a multi-sampled texel
// drawMask is the "sampleMask" representation of that corner.
// In other words, if drawMask is 2 (we drew to the top right) and
// sampleMask is 2 (drawing is allowed to the top right) then we expect
// passing fragments.
const drawMask = queryIndex % 2 ? 2 : 4;
const expectPassed = !!(sampleMask & drawMask);
t.expect(
!!passed === expectPassed,
`queryIndex: ${queryIndex}, was: ${!!passed}, expected: ${expectPassed}, ${name}`
);
}
);
});
g.test('occlusion_query,alpha_to_coverage')
.desc(
`
Test beginOcclusionQuery/endOcclusionQuery using alphaToCoverage to occlude
Set alpha to 0, 0.25, 0.5, 0.75, and 1, draw quads in 4 corners of texel.
Some should be culled. We count how many passed via queries. It's undefined which
will pass but it is defined how many will pass for a given alpha value.
Note: It seems like the result is well defined but if we find some devices/drivers
don't follow this exactly then we can relax check for the expected number of passed
queries.
See: https://bgolus.medium.com/anti-aliased-alpha-test-the-esoteric-alpha-to-coverage-8b177335ae4f
`
)
.params(kQueryTestBaseParams.combine('alpha', [0, 0.25, 0.5, 0.75, 1.0]))
.fn(async t => {
const { writeMask, renderMode, bufferOffset, querySetOffset, alpha } = t.params;
const kNumQueries = 32;
const sampleCount = 4;
const resources = t.setup({
writeMask,
renderMode,
bufferOffset,
querySetOffset,
numQueries: kNumQueries,
sampleCount,
alpha,
});
const { renderPassDescriptor, pipeline } = resources;
const createQuad = (xOffset, yOffset) => {
return t.createVertexBuffer(
new Float32Array([
xOffset + 0,
yOffset + 0,
0,
xOffset + 0.25,
yOffset + 0,
0,
xOffset + 0,
yOffset + 0.25,
0,
xOffset + 0,
yOffset + 0.25,
0,
xOffset + 0.25,
yOffset + 0,
0,
xOffset + 0.25,
yOffset + 0.25,
0,
])
);
};
const vertexBuffers = [
createQuad(0, 0),
createQuad(0.25, 0),
createQuad(0, 0.25),
createQuad(0.25, 0.25),
];
const numPassedPerGroup = new Array(kNumQueries / 4).fill(0);
// These tests can't use queryIndex to decide what to draw because which mask
// a particular alpha converts to is implementation defined. When querySetOffset is
// non-zero the queryIndex will go 7, 8, 9, 10, ... but we need to guarantee
// 4 queries per pixel and group those results so `queryIndex / 4 | 0` won't work.
// Instead we count the queries to get 4 draws per group, one to each quadrant of a pixel
// Then we total up the passes for those 4 queries by queryCount.
let queryCount = 0;
let resultCount = 0;
await t.runQueryTest(
resources,
renderPassDescriptor,
(helper, queryIndex) => {
const queryHelper = helper.beginOcclusionQuery(queryIndex);
queryHelper.setPipeline(pipeline);
queryHelper.setVertexBuffer(vertexBuffers[queryCount++ % 4]);
queryHelper.draw(6);
queryHelper.end();
},
passed => {
const groupIndex = (resultCount++ / 4) | 0;
numPassedPerGroup[groupIndex] += passed ? 1 : 0;
}
);
const expected = (alpha / 0.25) | 0;
numPassedPerGroup.forEach((numPassed, queryGroup) => {
t.expect(
numPassed === expected,
`queryGroup: ${queryGroup}, was: ${numPassed}, expected: ${expected}`
);
});
});
g.test('occlusion_query,multi_resolve')
.desc('Test calling resolveQuerySet more than once does not change results')
.fn(async t => {
const { device } = t;
const kNumQueries = 30;
const {
pipeline,
vertexBuffer,
occlusionQuerySet,
renderPassDescriptor,
renderTargetTexture,
queryResolveBuffer,
readBuffer,
} = t.setup({ numQueries: kNumQueries });
const readBuffer2 = t.createBuffer(readBuffer);
const readBuffer3 = t.createBuffer(readBuffer);
const renderSomething = encoder => {
const pass = encoder.beginRenderPass(renderPassDescriptor);
pass.setPipeline(pipeline);
pass.setVertexBuffer(0, vertexBuffer);
pass.setScissorRect(0, 0, renderTargetTexture.width, renderTargetTexture.height);
pass.draw(3);
pass.end();
};
{
const encoder = device.createCommandEncoder();
{
const pass = encoder.beginRenderPass(renderPassDescriptor);
pass.setPipeline(pipeline);
pass.setVertexBuffer(0, vertexBuffer);
for (let i = 0; i < kNumQueries; ++i) {
pass.beginOcclusionQuery(i);
if (i % 2) {
pass.setScissorRect(0, 0, renderTargetTexture.width, renderTargetTexture.height);
} else {
pass.setScissorRect(0, 0, 0, 0);
}
pass.draw(3);
pass.endOcclusionQuery();
}
pass.end();
}
// Intentionally call resolveQuerySet twice
encoder.resolveQuerySet(occlusionQuerySet, 0, kNumQueries, queryResolveBuffer, 0);
encoder.resolveQuerySet(occlusionQuerySet, 0, kNumQueries, queryResolveBuffer, 0);
encoder.copyBufferToBuffer(queryResolveBuffer, 0, readBuffer, 0, readBuffer.size);
// Rendering stuff unrelated should not affect results.
renderSomething(encoder);
encoder.resolveQuerySet(occlusionQuerySet, 0, kNumQueries, queryResolveBuffer, 0);
encoder.copyBufferToBuffer(queryResolveBuffer, 0, readBuffer2, 0, readBuffer2.size);
device.queue.submit([encoder.finish()]);
}
// Encode something else and draw again, then read the results
// They should not be affected.
{
const encoder = device.createCommandEncoder();
renderSomething(encoder);
encoder.resolveQuerySet(occlusionQuerySet, 0, kNumQueries, queryResolveBuffer, 0);
encoder.copyBufferToBuffer(queryResolveBuffer, 0, readBuffer3, 0, readBuffer3.size);
device.queue.submit([encoder.finish()]);
}
const results = await Promise.all([
t.readBufferAsBigUint64(readBuffer),
t.readBufferAsBigUint64(readBuffer2),
t.readBufferAsBigUint64(readBuffer3),
]);
results.forEach((result, r) => {
for (let i = 0; i < kNumQueries; ++i) {
const passed = !!result[i];
const expectPassed = !!(i % 2);
t.expect(
passed === expectPassed,
`result(${r}): queryIndex: ${i}, passed: ${passed}, expected: ${expectPassed}`
);
}
});
});

View file

@ -1,46 +0,0 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Basic command buffer rendering tests.
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../gpu_test.js';
export const g = makeTestGroup(GPUTest);
g.test('clear').fn(async t => {
const dst = t.device.createBuffer({
size: 4,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
});
const colorAttachment = t.device.createTexture({
format: 'rgba8unorm',
size: { width: 1, height: 1, depth: 1 },
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.OUTPUT_ATTACHMENT,
});
const colorAttachmentView = colorAttachment.createView();
const encoder = t.device.createCommandEncoder();
const pass = encoder.beginRenderPass({
colorAttachments: [
{
attachment: colorAttachmentView,
loadValue: { r: 0.0, g: 1.0, b: 0.0, a: 1.0 },
storeOp: 'store',
},
],
});
pass.endPass();
encoder.copyTextureToBuffer(
{ texture: colorAttachment, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
{ buffer: dst, bytesPerRow: 256 },
{ width: 1, height: 1, depth: 1 }
);
t.device.defaultQueue.submit([encoder.finish()]);
t.expectContents(dst, new Uint8Array([0x00, 0xff, 0x00, 0xff]));
});

View file

@ -0,0 +1,20 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Tests of the behavior of the viewport/scissor/blend/reference states.
TODO:
- {viewport, scissor rect, blend color, stencil reference}:
Test rendering result with {various values}.
- Set the state in different ways to make sure it gets the correct value in the end: {
- state unset (= default)
- state explicitly set once to {default value, another value}
- persistence: [set, draw, draw] (fn should differentiate from [set, draw] + [draw])
- overwriting: [set(1), draw, set(2), draw] (fn should differentiate from [set(1), set(2), draw, draw])
- overwriting: [set(1), set(2), draw] (fn should differentiate from [set(1), draw] but not [set(2), draw])
- }
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../gpu_test.js';
export const g = makeTestGroup(GPUTest);

View file

@ -0,0 +1,631 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Ensure state is set correctly. Tries to stress state caching (setting different states multiple
times in different orders) for setIndexBuffer and setVertexBuffer.
Equivalent tests for setBindGroup and setPipeline are in programmable/state_tracking.spec.ts.
Equivalent tests for viewport/scissor/blend/reference are in render/dynamic_state.spec.ts
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUTest, TextureTestMixin } from '../../../../gpu_test.js';
import { TexelView } from '../../../../util/texture/texel_view.js';
class VertexAndIndexStateTrackingTest extends TextureTestMixin(GPUTest) {
GetRenderPipelineForTest(arrayStride) {
return this.device.createRenderPipeline({
layout: 'auto',
vertex: {
module: this.device.createShaderModule({
code: `
struct Inputs {
@location(0) vertexPosition : f32,
@location(1) vertexColor : vec4<f32>,
};
struct Outputs {
@builtin(position) position : vec4<f32>,
@location(0) color : vec4<f32>,
};
@vertex
fn main(input : Inputs)-> Outputs {
var outputs : Outputs;
outputs.position =
vec4<f32>(input.vertexPosition, 0.5, 0.0, 1.0);
outputs.color = input.vertexColor;
return outputs;
}`,
}),
entryPoint: 'main',
buffers: [
{
arrayStride,
attributes: [
{
format: 'float32',
offset: 0,
shaderLocation: 0,
},
{
format: 'unorm8x4',
offset: 4,
shaderLocation: 1,
},
],
},
],
},
fragment: {
module: this.device.createShaderModule({
code: `
struct Input {
@location(0) color : vec4<f32>
};
@fragment
fn main(input : Input) -> @location(0) vec4<f32> {
return input.color;
}`,
}),
entryPoint: 'main',
targets: [{ format: 'rgba8unorm' }],
},
primitive: {
topology: 'point-list',
},
});
}
kVertexAttributeSize = 8;
}
export const g = makeTestGroup(VertexAndIndexStateTrackingTest);
g.test('set_index_buffer_without_changing_buffer')
.desc(
`
Test that setting index buffer states (index format, offset, size) multiple times in different
orders still keeps the correctness of each draw call.
`
)
.fn(t => {
// Initialize the index buffer with 5 uint16 indices (0, 1, 2, 3, 4).
const indexBuffer = t.makeBufferWithContents(
new Uint16Array([0, 1, 2, 3, 4]),
GPUBufferUsage.INDEX
);
// Initialize the vertex buffer with required vertex attributes (position: f32, color: f32x4)
// Note that the maximum index in the test is 0x10000.
const kVertexAttributesCount = 0x10000 + 1;
const vertexBuffer = t.device.createBuffer({
usage: GPUBufferUsage.VERTEX,
size: t.kVertexAttributeSize * kVertexAttributesCount,
mappedAtCreation: true,
});
t.trackForCleanup(vertexBuffer);
const vertexAttributes = vertexBuffer.getMappedRange();
const kPositions = [-0.8, -0.4, 0.0, 0.4, 0.8, -0.4];
const kColors = [
new Uint8Array([255, 0, 0, 255]),
new Uint8Array([255, 255, 255, 255]),
new Uint8Array([0, 0, 255, 255]),
new Uint8Array([255, 0, 255, 255]),
new Uint8Array([0, 255, 255, 255]),
new Uint8Array([0, 255, 0, 255]),
];
// Set vertex attributes at index {0..4} in Uint16.
// Note that the vertex attribute at index 1 will not be used.
for (let i = 0; i < kPositions.length - 1; ++i) {
const baseOffset = t.kVertexAttributeSize * i;
const vertexPosition = new Float32Array(vertexAttributes, baseOffset, 1);
vertexPosition[0] = kPositions[i];
const vertexColor = new Uint8Array(vertexAttributes, baseOffset + 4, 4);
vertexColor.set(kColors[i]);
}
// Set vertex attributes at index 0x10000.
const lastOffset = t.kVertexAttributeSize * (kVertexAttributesCount - 1);
const lastVertexPosition = new Float32Array(vertexAttributes, lastOffset, 1);
lastVertexPosition[0] = kPositions[kPositions.length - 1];
const lastVertexColor = new Uint8Array(vertexAttributes, lastOffset + 4, 4);
lastVertexColor.set(kColors[kColors.length - 1]);
vertexBuffer.unmap();
const renderPipeline = t.GetRenderPipelineForTest(t.kVertexAttributeSize);
const outputTextureSize = [kPositions.length - 1, 1, 1];
const outputTexture = t.device.createTexture({
format: 'rgba8unorm',
size: outputTextureSize,
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
});
const encoder = t.device.createCommandEncoder();
const renderPass = encoder.beginRenderPass({
colorAttachments: [
{
view: outputTexture.createView(),
clearValue: [0, 0, 0, 1],
loadOp: 'clear',
storeOp: 'store',
},
],
});
renderPass.setPipeline(renderPipeline);
renderPass.setVertexBuffer(0, vertexBuffer);
// 1st draw: indexFormat = 'uint32', offset = 0, size = 4 (index value: 0x10000)
renderPass.setIndexBuffer(indexBuffer, 'uint32', 0, 4);
renderPass.drawIndexed(1);
// 2nd draw: indexFormat = 'uint16', offset = 0, size = 4 (index value: 0)
renderPass.setIndexBuffer(indexBuffer, 'uint16', 0, 4);
renderPass.drawIndexed(1);
// 3rd draw: indexFormat = 'uint16', offset = 4, size = 2 (index value: 2)
renderPass.setIndexBuffer(indexBuffer, 'uint16', 0, 2);
renderPass.setIndexBuffer(indexBuffer, 'uint16', 4, 2);
renderPass.drawIndexed(1);
// 4th draw: indexformat = 'uint16', offset = 6, size = 4 (index values: 3, 4)
renderPass.setIndexBuffer(indexBuffer, 'uint16', 6, 2);
renderPass.setIndexBuffer(indexBuffer, 'uint16', 6, 4);
renderPass.drawIndexed(2);
renderPass.end();
t.queue.submit([encoder.finish()]);
t.expectTexelViewComparisonIsOkInTexture(
{ texture: outputTexture },
TexelView.fromTexelsAsBytes('rgba8unorm', coord =>
coord.x === 1 ? kColors[kPositions.length - 1] : kColors[coord.x]
),
outputTextureSize
);
});
g.test('set_vertex_buffer_without_changing_buffer')
.desc(
`
Test that setting vertex buffer states (offset, size) multiple times in different orders still
keeps the correctness of each draw call.
- Tries several different sequences of setVertexBuffer+draw commands, each of which draws vertices
in all 4 output pixels, and check they were drawn correctly.
`
)
.fn(t => {
const kPositions = [-0.875, -0.625, -0.375, -0.125, 0.125, 0.375, 0.625, 0.875];
const kColors = [
new Uint8Array([255, 0, 0, 255]),
new Uint8Array([0, 255, 0, 255]),
new Uint8Array([0, 0, 255, 255]),
new Uint8Array([51, 0, 0, 255]),
new Uint8Array([0, 51, 0, 255]),
new Uint8Array([0, 0, 51, 255]),
new Uint8Array([255, 0, 255, 255]),
new Uint8Array([255, 255, 0, 255]),
];
// Initialize the vertex buffer with required vertex attributes (position: f32, color: f32x4)
const kVertexAttributesCount = 8;
const vertexBuffer = t.device.createBuffer({
usage: GPUBufferUsage.VERTEX,
size: t.kVertexAttributeSize * kVertexAttributesCount,
mappedAtCreation: true,
});
t.trackForCleanup(vertexBuffer);
const vertexAttributes = vertexBuffer.getMappedRange();
for (let i = 0; i < kPositions.length; ++i) {
const baseOffset = t.kVertexAttributeSize * i;
const vertexPosition = new Float32Array(vertexAttributes, baseOffset, 1);
vertexPosition[0] = kPositions[i];
const vertexColor = new Uint8Array(vertexAttributes, baseOffset + 4, 4);
vertexColor.set(kColors[i]);
}
vertexBuffer.unmap();
const renderPipeline = t.GetRenderPipelineForTest(t.kVertexAttributeSize);
const outputTextureSize = [kPositions.length, 1, 1];
const outputTexture = t.device.createTexture({
format: 'rgba8unorm',
size: outputTextureSize,
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
});
const encoder = t.device.createCommandEncoder();
const renderPass = encoder.beginRenderPass({
colorAttachments: [
{
view: outputTexture.createView(),
clearValue: [0, 0, 0, 1],
loadOp: 'clear',
storeOp: 'store',
},
],
});
renderPass.setPipeline(renderPipeline);
// Change 'size' in setVertexBuffer()
renderPass.setVertexBuffer(0, vertexBuffer, 0, t.kVertexAttributeSize);
renderPass.setVertexBuffer(0, vertexBuffer, 0, t.kVertexAttributeSize * 2);
renderPass.draw(2);
// Change 'offset' in setVertexBuffer()
renderPass.setVertexBuffer(
0,
vertexBuffer,
t.kVertexAttributeSize * 2,
t.kVertexAttributeSize * 2
);
renderPass.draw(2);
// Change 'size' again in setVertexBuffer()
renderPass.setVertexBuffer(
0,
vertexBuffer,
t.kVertexAttributeSize * 4,
t.kVertexAttributeSize * 2
);
renderPass.setVertexBuffer(
0,
vertexBuffer,
t.kVertexAttributeSize * 4,
t.kVertexAttributeSize * 4
);
renderPass.draw(4);
renderPass.end();
t.queue.submit([encoder.finish()]);
t.expectTexelViewComparisonIsOkInTexture(
{ texture: outputTexture },
TexelView.fromTexelsAsBytes('rgba8unorm', coord => kColors[coord.x]),
outputTextureSize
);
});
g.test('change_pipeline_before_and_after_vertex_buffer')
.desc(
`
Test that changing the pipeline {before,after} the vertex buffers still keeps the correctness of
each draw call (In D3D12, the vertex buffer stride is part of SetVertexBuffer instead of the
pipeline.)
`
)
.fn(t => {
const kPositions = [-0.8, -0.4, 0.0, 0.4, 0.8, 0.9];
const kColors = [
new Uint8Array([255, 0, 0, 255]),
new Uint8Array([255, 255, 255, 255]),
new Uint8Array([0, 255, 0, 255]),
new Uint8Array([0, 0, 255, 255]),
new Uint8Array([255, 0, 255, 255]),
new Uint8Array([0, 255, 255, 255]),
];
// Initialize the vertex buffer with required vertex attributes (position: f32, color: f32x4)
const vertexBuffer = t.device.createBuffer({
usage: GPUBufferUsage.VERTEX,
size: t.kVertexAttributeSize * kPositions.length,
mappedAtCreation: true,
});
t.trackForCleanup(vertexBuffer);
// Note that kPositions[1], kColors[1], kPositions[5] and kColors[5] are not used.
const vertexAttributes = vertexBuffer.getMappedRange();
for (let i = 0; i < kPositions.length; ++i) {
const baseOffset = t.kVertexAttributeSize * i;
const vertexPosition = new Float32Array(vertexAttributes, baseOffset, 1);
vertexPosition[0] = kPositions[i];
const vertexColor = new Uint8Array(vertexAttributes, baseOffset + 4, 4);
vertexColor.set(kColors[i]);
}
vertexBuffer.unmap();
// Create two render pipelines with different vertex attribute strides
const renderPipeline1 = t.GetRenderPipelineForTest(t.kVertexAttributeSize);
const renderPipeline2 = t.GetRenderPipelineForTest(t.kVertexAttributeSize * 2);
const kPointsCount = kPositions.length - 1;
const outputTextureSize = [kPointsCount, 1, 1];
const outputTexture = t.device.createTexture({
format: 'rgba8unorm',
size: outputTextureSize,
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
});
const encoder = t.device.createCommandEncoder();
const renderPass = encoder.beginRenderPass({
colorAttachments: [
{
view: outputTexture.createView(),
clearValue: [0, 0, 0, 1],
loadOp: 'clear',
storeOp: 'store',
},
],
});
// Update render pipeline before setVertexBuffer. The applied vertex attribute stride should be
// 2 * kVertexAttributeSize.
renderPass.setPipeline(renderPipeline1);
renderPass.setPipeline(renderPipeline2);
renderPass.setVertexBuffer(0, vertexBuffer);
renderPass.draw(2);
// Update render pipeline after setVertexBuffer. The applied vertex attribute stride should be
// kVertexAttributeSize.
renderPass.setVertexBuffer(0, vertexBuffer, 3 * t.kVertexAttributeSize);
renderPass.setPipeline(renderPipeline1);
renderPass.draw(2);
renderPass.end();
t.queue.submit([encoder.finish()]);
t.expectTexelViewComparisonIsOkInTexture(
{ texture: outputTexture },
TexelView.fromTexelsAsBytes('rgba8unorm', coord =>
coord.x === 1 ? new Uint8Array([0, 0, 0, 255]) : kColors[coord.x]
),
outputTextureSize
);
});
g.test('set_vertex_buffer_but_not_used_in_draw')
.desc(
`
Test that drawing after having set vertex buffer slots not used by the pipeline works correctly.
- In the test there are 2 draw calls in the render pass. The first draw call uses 2 vertex buffers
(position and color), and the second draw call only uses 1 vertex buffer (for color, the vertex
position is defined as constant values in the vertex shader). The test verifies if both of these
two draw calls work correctly.
`
)
.fn(t => {
const kPositions = new Float32Array([-0.75, -0.25]);
const kColors = new Uint8Array([255, 0, 0, 255, 0, 255, 0, 255]);
// Initialize the vertex buffers with required vertex attributes (position: f32, color: f32x4)
const kAttributeStride = 4;
const positionBuffer = t.makeBufferWithContents(kPositions, GPUBufferUsage.VERTEX);
const colorBuffer = t.makeBufferWithContents(kColors, GPUBufferUsage.VERTEX);
const fragmentState = {
module: t.device.createShaderModule({
code: `
struct Input {
@location(0) color : vec4<f32>
};
@fragment
fn main(input : Input) -> @location(0) vec4<f32> {
return input.color;
}`,
}),
entryPoint: 'main',
targets: [{ format: 'rgba8unorm' }],
};
// Create renderPipeline1 that uses both positionBuffer and colorBuffer.
const renderPipeline1 = t.device.createRenderPipeline({
layout: 'auto',
vertex: {
module: t.device.createShaderModule({
code: `
struct Inputs {
@location(0) vertexColor : vec4<f32>,
@location(1) vertexPosition : f32,
};
struct Outputs {
@builtin(position) position : vec4<f32>,
@location(0) color : vec4<f32>,
};
@vertex
fn main(input : Inputs)-> Outputs {
var outputs : Outputs;
outputs.position =
vec4<f32>(input.vertexPosition, 0.5, 0.0, 1.0);
outputs.color = input.vertexColor;
return outputs;
}`,
}),
entryPoint: 'main',
buffers: [
{
arrayStride: kAttributeStride,
attributes: [
{
format: 'unorm8x4',
offset: 0,
shaderLocation: 0,
},
],
},
{
arrayStride: kAttributeStride,
attributes: [
{
format: 'float32',
offset: 0,
shaderLocation: 1,
},
],
},
],
},
fragment: fragmentState,
primitive: {
topology: 'point-list',
},
});
const renderPipeline2 = t.device.createRenderPipeline({
layout: 'auto',
vertex: {
module: t.device.createShaderModule({
code: `
struct Inputs {
@builtin(vertex_index) vertexIndex : u32,
@location(0) vertexColor : vec4<f32>,
};
struct Outputs {
@builtin(position) position : vec4<f32>,
@location(0) color : vec4<f32>,
};
@vertex
fn main(input : Inputs)-> Outputs {
var kPositions = array<f32, 2> (0.25, 0.75);
var outputs : Outputs;
outputs.position =
vec4(kPositions[input.vertexIndex], 0.5, 0.0, 1.0);
outputs.color = input.vertexColor;
return outputs;
}`,
}),
entryPoint: 'main',
buffers: [
{
arrayStride: kAttributeStride,
attributes: [
{
format: 'unorm8x4',
offset: 0,
shaderLocation: 0,
},
],
},
],
},
fragment: fragmentState,
primitive: {
topology: 'point-list',
},
});
const kPointsCount = 4;
const outputTextureSize = [kPointsCount, 1, 1];
const outputTexture = t.device.createTexture({
format: 'rgba8unorm',
size: [kPointsCount, 1, 1],
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
});
const encoder = t.device.createCommandEncoder();
const renderPass = encoder.beginRenderPass({
colorAttachments: [
{
view: outputTexture.createView(),
clearValue: [0, 0, 0, 1],
loadOp: 'clear',
storeOp: 'store',
},
],
});
renderPass.setVertexBuffer(0, colorBuffer);
renderPass.setVertexBuffer(1, positionBuffer);
renderPass.setPipeline(renderPipeline1);
renderPass.draw(2);
renderPass.setPipeline(renderPipeline2);
renderPass.draw(2);
renderPass.end();
t.queue.submit([encoder.finish()]);
const kExpectedColors = [
kColors.subarray(0, 4),
kColors.subarray(4),
kColors.subarray(0, 4),
kColors.subarray(4),
];
t.expectTexelViewComparisonIsOkInTexture(
{ texture: outputTexture },
TexelView.fromTexelsAsBytes('rgba8unorm', coord => kExpectedColors[coord.x]),
outputTextureSize
);
});
g.test('set_index_buffer_before_non_indexed_draw')
.desc(
`
Test that setting / not setting the index buffer does not impact a non-indexed draw.
`
)
.fn(t => {
const kPositions = [-0.75, -0.25, 0.25, 0.75];
const kColors = [
new Uint8Array([255, 0, 0, 255]),
new Uint8Array([0, 255, 0, 255]),
new Uint8Array([0, 0, 255, 255]),
new Uint8Array([255, 0, 255, 255]),
];
// Initialize the vertex buffer with required vertex attributes (position: f32, color: f32x4)
const vertexBuffer = t.device.createBuffer({
usage: GPUBufferUsage.VERTEX,
size: t.kVertexAttributeSize * kPositions.length,
mappedAtCreation: true,
});
t.trackForCleanup(vertexBuffer);
const vertexAttributes = vertexBuffer.getMappedRange();
for (let i = 0; i < kPositions.length; ++i) {
const baseOffset = t.kVertexAttributeSize * i;
const vertexPosition = new Float32Array(vertexAttributes, baseOffset, 1);
vertexPosition[0] = kPositions[i];
const vertexColor = new Uint8Array(vertexAttributes, baseOffset + 4, 4);
vertexColor.set(kColors[i]);
}
vertexBuffer.unmap();
// Initialize the index buffer with 2 uint16 indices (2, 3).
const indexBuffer = t.makeBufferWithContents(new Uint16Array([2, 3]), GPUBufferUsage.INDEX);
const renderPipeline = t.GetRenderPipelineForTest(t.kVertexAttributeSize);
const kPointsCount = 4;
const outputTextureSize = [kPointsCount, 1, 1];
const outputTexture = t.device.createTexture({
format: 'rgba8unorm',
size: [kPointsCount, 1, 1],
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
});
const encoder = t.device.createCommandEncoder();
const renderPass = encoder.beginRenderPass({
colorAttachments: [
{
view: outputTexture.createView(),
clearValue: [0, 0, 0, 1],
loadOp: 'clear',
storeOp: 'store',
},
],
});
// The first draw call is an indexed one (the third and fourth color are involved)
renderPass.setVertexBuffer(0, vertexBuffer);
renderPass.setIndexBuffer(indexBuffer, 'uint16');
renderPass.setPipeline(renderPipeline);
renderPass.drawIndexed(2);
// The second draw call is a non-indexed one (the first and second color are involved)
renderPass.draw(2);
renderPass.end();
t.queue.submit([encoder.finish()]);
t.expectTexelViewComparisonIsOkInTexture(
{ texture: outputTexture },
TexelView.fromTexelsAsBytes('rgba8unorm', coord => kColors[coord.x]),
outputTextureSize
);
});

View file

@ -0,0 +1,166 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Basic command buffer compute tests.
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { kLimitInfo } from '../../../capability_info.js';
import { GPUTest } from '../../../gpu_test.js';
import { checkElementsEqualGenerated } from '../../../util/check_contents.js';
export const g = makeTestGroup(GPUTest);
const kMaxComputeWorkgroupSize = [
kLimitInfo.maxComputeWorkgroupSizeX.default,
kLimitInfo.maxComputeWorkgroupSizeY.default,
kLimitInfo.maxComputeWorkgroupSizeZ.default,
];
g.test('memcpy').fn(t => {
const data = new Uint32Array([0x01020304]);
const src = t.makeBufferWithContents(data, GPUBufferUsage.COPY_DST | GPUBufferUsage.STORAGE);
const dst = t.device.createBuffer({
size: 4,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE,
});
const pipeline = t.device.createComputePipeline({
layout: 'auto',
compute: {
module: t.device.createShaderModule({
code: `
struct Data {
value : u32
};
@group(0) @binding(0) var<storage, read> src : Data;
@group(0) @binding(1) var<storage, read_write> dst : Data;
@compute @workgroup_size(1) fn main() {
dst.value = src.value;
return;
}
`,
}),
entryPoint: 'main',
},
});
const bg = t.device.createBindGroup({
entries: [
{ binding: 0, resource: { buffer: src, offset: 0, size: 4 } },
{ binding: 1, resource: { buffer: dst, offset: 0, size: 4 } },
],
layout: pipeline.getBindGroupLayout(0),
});
const encoder = t.device.createCommandEncoder();
const pass = encoder.beginComputePass();
pass.setPipeline(pipeline);
pass.setBindGroup(0, bg);
pass.dispatchWorkgroups(1);
pass.end();
t.device.queue.submit([encoder.finish()]);
t.expectGPUBufferValuesEqual(dst, data);
});
g.test('large_dispatch')
.desc(`Test reasonably-sized large dispatches (see also: stress tests).`)
.params(u =>
u
// Reasonably-sized powers of two, and some stranger larger sizes.
.combine('dispatchSize', [
256,
2048,
315,
628,
2179,
kLimitInfo.maxComputeWorkgroupsPerDimension.default,
])
// Test some reasonable workgroup sizes.
.beginSubcases()
// 0 == x axis; 1 == y axis; 2 == z axis.
.combine('largeDimension', [0, 1, 2])
.expand('workgroupSize', p => [1, 2, 8, 32, kMaxComputeWorkgroupSize[p.largeDimension]])
)
.fn(t => {
// The output storage buffer is filled with this value.
const val = 0x01020304;
const badVal = 0xbaadf00d;
const wgSize = t.params.workgroupSize;
const bufferLength = t.params.dispatchSize * wgSize;
const bufferByteSize = Uint32Array.BYTES_PER_ELEMENT * bufferLength;
const dst = t.device.createBuffer({
size: bufferByteSize,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE,
});
// Only use one large dimension and workgroup size in the dispatch
// call to keep the size of the test reasonable.
const dims = [1, 1, 1];
dims[t.params.largeDimension] = t.params.dispatchSize;
const wgSizes = [1, 1, 1];
wgSizes[t.params.largeDimension] = t.params.workgroupSize;
const pipeline = t.device.createComputePipeline({
layout: 'auto',
compute: {
module: t.device.createShaderModule({
code: `
struct OutputBuffer {
value : array<u32>
};
@group(0) @binding(0) var<storage, read_write> dst : OutputBuffer;
@compute @workgroup_size(${wgSizes[0]}, ${wgSizes[1]}, ${wgSizes[2]})
fn main(
@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>
) {
var xExtent : u32 = ${dims[0]}u * ${wgSizes[0]}u;
var yExtent : u32 = ${dims[1]}u * ${wgSizes[1]}u;
var zExtent : u32 = ${dims[2]}u * ${wgSizes[2]}u;
var index : u32 = (
GlobalInvocationID.z * xExtent * yExtent +
GlobalInvocationID.y * xExtent +
GlobalInvocationID.x);
var val : u32 = ${val}u;
// Trivial error checking in the indexing and invocation.
if (GlobalInvocationID.x > xExtent ||
GlobalInvocationID.y > yExtent ||
GlobalInvocationID.z > zExtent) {
val = ${badVal}u;
}
dst.value[index] = val;
}
`,
}),
entryPoint: 'main',
},
});
const bg = t.device.createBindGroup({
entries: [{ binding: 0, resource: { buffer: dst, offset: 0, size: bufferByteSize } }],
layout: pipeline.getBindGroupLayout(0),
});
const encoder = t.device.createCommandEncoder();
const pass = encoder.beginComputePass();
pass.setPipeline(pipeline);
pass.setBindGroup(0, bg);
pass.dispatchWorkgroups(dims[0], dims[1], dims[2]);
pass.end();
t.device.queue.submit([encoder.finish()]);
t.expectGPUBufferValuesPassCheck(dst, a => checkElementsEqualGenerated(a, i => val), {
type: Uint32Array,
typedLength: bufferLength,
});
dst.destroy();
});

View file

@ -0,0 +1,13 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
TODO:
- Test some weird but valid values for entry point name (both module and pipeline creation
should succeed).
- Test using each of many entry points in the module (should succeed).
- Test using an entry point with the wrong stage (should fail).
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { GPUTest } from '../../../gpu_test.js';
export const g = makeTestGroup(GPUTest);

View file

@ -0,0 +1,505 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Compute pipeline using overridable constants test.
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { range } from '../../../../common/util/util.js';
import { GPUTest } from '../../../gpu_test.js';
class F extends GPUTest {
async ExpectShaderOutputWithConstants(isAsync, expected, constants, code) {
const dst = this.device.createBuffer({
size: expected.byteLength,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE,
});
const descriptor = {
layout: 'auto',
compute: {
module: this.device.createShaderModule({
code,
}),
entryPoint: 'main',
constants,
},
};
const promise = isAsync
? this.device.createComputePipelineAsync(descriptor)
: Promise.resolve(this.device.createComputePipeline(descriptor));
const pipeline = await promise;
const bindGroup = this.device.createBindGroup({
entries: [{ binding: 0, resource: { buffer: dst, offset: 0, size: expected.byteLength } }],
layout: pipeline.getBindGroupLayout(0),
});
const encoder = this.device.createCommandEncoder();
const pass = encoder.beginComputePass();
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.dispatchWorkgroups(1);
pass.end();
this.device.queue.submit([encoder.finish()]);
this.expectGPUBufferValuesEqual(dst, expected);
}
}
export const g = makeTestGroup(F);
g.test('basic')
.desc(
`Test that either correct constants override values or default values when no constants override value are provided at pipeline creation time are used as the output to the storage buffer.`
)
.params(u => u.combine('isAsync', [true, false]))
.fn(async t => {
const count = 11;
await t.ExpectShaderOutputWithConstants(
t.params.isAsync,
new Uint32Array(range(count, i => i)),
{
c0: 0,
c1: 1,
c2: 2,
c3: 3,
// c4 is using default value
c5: 5,
c6: 6,
// c7 is using default value
c8: 8,
c9: 9,
// c10 is using default value
},
`
override c0: bool; // type: bool
override c1: bool = false; // default override
override c2: f32; // type: float32
override c3: f32 = 0.0; // default override
override c4: f32 = 4.0; // default
override c5: i32; // type: int32
override c6: i32 = 0; // default override
override c7: i32 = 7; // default
override c8: u32; // type: uint32
override c9: u32 = 0u; // default override
override c10: u32 = 10u; // default
struct Buf {
data : array<u32, ${count}>
}
@group(0) @binding(0) var<storage, read_write> buf : Buf;
@compute @workgroup_size(1) fn main() {
buf.data[0] = u32(c0);
buf.data[1] = u32(c1);
buf.data[2] = u32(c2);
buf.data[3] = u32(c3);
buf.data[4] = u32(c4);
buf.data[5] = u32(c5);
buf.data[6] = u32(c6);
buf.data[7] = u32(c7);
buf.data[8] = u32(c8);
buf.data[9] = u32(c9);
buf.data[10] = u32(c10);
}
`
);
});
g.test('numeric_id')
.desc(
`Test that correct values are used as output to the storage buffer for constants specified with numeric id instead of their names.`
)
.params(u => u.combine('isAsync', [true, false]))
.fn(async t => {
await t.ExpectShaderOutputWithConstants(
t.params.isAsync,
new Uint32Array([1, 2, 3]),
{
1001: 1,
1: 2,
// 1003 is using default value
},
`
@id(1001) override c1: u32; // some big numeric id
@id(1) override c2: u32 = 0u; // id == 1 might collide with some generated constant id
@id(1003) override c3: u32 = 3u; // default
struct Buf {
data : array<u32, 3>
}
@group(0) @binding(0) var<storage, read_write> buf : Buf;
@compute @workgroup_size(1) fn main() {
buf.data[0] = c1;
buf.data[1] = c2;
buf.data[2] = c3;
}
`
);
});
g.test('precision')
.desc(
`Test that float number precision is preserved for constants as they are used for compute shader output of the storage buffer.`
)
.params(u => u.combine('isAsync', [true, false]))
.fn(async t => {
const c1 = 3.14159;
const c2 = 3.141592653589793238;
await t.ExpectShaderOutputWithConstants(
t.params.isAsync,
// These values will get rounded to f32 and createComputePipeline, so the values coming out from the shader won't be the exact same one as shown here.
new Float32Array([c1, c2]),
{
c1,
c2,
},
`
override c1: f32;
override c2: f32;
struct Buf {
data : array<f32, 2>
}
@group(0) @binding(0) var<storage, read_write> buf : Buf;
@compute @workgroup_size(1) fn main() {
buf.data[0] = c1;
buf.data[1] = c2;
}
`
);
});
g.test('workgroup_size')
.desc(
`Test that constants can be used as workgroup size correctly, the compute shader should write the max local invocation id to the storage buffer which is equal to the workgroup size dimension given by the constant.`
)
.params(u =>
u //
.combine('isAsync', [true, false])
.combine('type', ['u32', 'i32'])
.combine('size', [3, 16, 64])
.combine('v', ['x', 'y', 'z'])
)
.fn(async t => {
const { isAsync, type, size, v } = t.params;
const workgroup_size_str = v === 'x' ? 'd' : v === 'y' ? '1, d' : '1, 1, d';
await t.ExpectShaderOutputWithConstants(
isAsync,
new Uint32Array([size]),
{
d: size,
},
`
override d: ${type};
struct Buf {
data : array<u32, 1>
}
@group(0) @binding(0) var<storage, read_write> buf : Buf;
@compute @workgroup_size(${workgroup_size_str}) fn main(
@builtin(local_invocation_id) local_invocation_id : vec3<u32>
) {
if (local_invocation_id.${v} >= u32(d - 1)) {
buf.data[0] = local_invocation_id.${v} + 1;
}
}
`
);
});
g.test('shared_shader_module')
.desc(
`Test that when the same shader module is shared by different pipelines, the correct constant values are used as output to the storage buffer. The constant value should not affect other pipeline sharing the same shader module.`
)
.params(u => u.combine('isAsync', [true, false]))
.fn(async t => {
const module = t.device.createShaderModule({
code: `
override a: u32;
struct Buf {
data : array<u32, 1>
}
@group(0) @binding(0) var<storage, read_write> buf : Buf;
@compute @workgroup_size(1) fn main() {
buf.data[0] = a;
}`,
});
const expects = [new Uint32Array([1]), new Uint32Array([2])];
const buffers = [
t.device.createBuffer({
size: Uint32Array.BYTES_PER_ELEMENT,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE,
}),
t.device.createBuffer({
size: Uint32Array.BYTES_PER_ELEMENT,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE,
}),
];
const descriptors = [
{
layout: 'auto',
compute: {
module,
entryPoint: 'main',
constants: {
a: 1,
},
},
},
{
layout: 'auto',
compute: {
module,
entryPoint: 'main',
constants: {
a: 2,
},
},
},
];
const promises = t.params.isAsync
? Promise.all([
t.device.createComputePipelineAsync(descriptors[0]),
t.device.createComputePipelineAsync(descriptors[1]),
])
: Promise.resolve([
t.device.createComputePipeline(descriptors[0]),
t.device.createComputePipeline(descriptors[1]),
]);
const pipelines = await promises;
const bindGroups = [
t.device.createBindGroup({
entries: [
{
binding: 0,
resource: { buffer: buffers[0], offset: 0, size: Uint32Array.BYTES_PER_ELEMENT },
},
],
layout: pipelines[0].getBindGroupLayout(0),
}),
t.device.createBindGroup({
entries: [
{
binding: 0,
resource: { buffer: buffers[1], offset: 0, size: Uint32Array.BYTES_PER_ELEMENT },
},
],
layout: pipelines[1].getBindGroupLayout(0),
}),
];
const encoder = t.device.createCommandEncoder();
const pass = encoder.beginComputePass();
pass.setPipeline(pipelines[0]);
pass.setBindGroup(0, bindGroups[0]);
pass.dispatchWorkgroups(1);
pass.setPipeline(pipelines[1]);
pass.setBindGroup(0, bindGroups[1]);
pass.dispatchWorkgroups(1);
pass.end();
t.device.queue.submit([encoder.finish()]);
t.expectGPUBufferValuesEqual(buffers[0], expects[0]);
t.expectGPUBufferValuesEqual(buffers[1], expects[1]);
});
g.test('multi_entry_points')
.desc(
`Test that constants used for different entry points are used correctly as output to the storage buffer. They should have no impact for pipeline using entry points that doesn't reference them.`
)
.params(u => u.combine('isAsync', [true, false]))
.fn(async t => {
const module = t.device.createShaderModule({
code: `
override c1: u32;
override c2: u32;
override c3: u32;
struct Buf {
data : array<u32, 1>
}
@group(0) @binding(0) var<storage, read_write> buf : Buf;
@compute @workgroup_size(1) fn main1() {
buf.data[0] = c1;
}
@compute @workgroup_size(1) fn main2() {
buf.data[0] = c2;
}
@compute @workgroup_size(c3) fn main3() {
buf.data[0] = 3u;
}`,
});
const expects = [
new Uint32Array([1]),
new Uint32Array([2]),
new Uint32Array([3]),
new Uint32Array([4]),
];
const buffers = [
t.device.createBuffer({
size: Uint32Array.BYTES_PER_ELEMENT,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE,
}),
t.device.createBuffer({
size: Uint32Array.BYTES_PER_ELEMENT,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE,
}),
t.device.createBuffer({
size: Uint32Array.BYTES_PER_ELEMENT,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE,
}),
t.device.createBuffer({
size: Uint32Array.BYTES_PER_ELEMENT,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE,
}),
];
const descriptors = [
{
layout: 'auto',
compute: {
module,
entryPoint: 'main1',
constants: {
c1: 1,
},
},
},
{
layout: 'auto',
compute: {
module,
entryPoint: 'main2',
constants: {
c2: 2,
},
},
},
{
layout: 'auto',
compute: {
module,
entryPoint: 'main3',
constants: {
// c3 is used as workgroup size
c3: 1,
},
},
},
{
layout: 'auto',
compute: {
module,
entryPoint: 'main1',
constants: {
// assign a different value to c1
c1: 4,
},
},
},
];
const promises = t.params.isAsync
? Promise.all([
t.device.createComputePipelineAsync(descriptors[0]),
t.device.createComputePipelineAsync(descriptors[1]),
t.device.createComputePipelineAsync(descriptors[2]),
t.device.createComputePipelineAsync(descriptors[3]),
])
: Promise.resolve([
t.device.createComputePipeline(descriptors[0]),
t.device.createComputePipeline(descriptors[1]),
t.device.createComputePipeline(descriptors[2]),
t.device.createComputePipeline(descriptors[3]),
]);
const pipelines = await promises;
const bindGroups = [
t.device.createBindGroup({
entries: [
{
binding: 0,
resource: { buffer: buffers[0], offset: 0, size: Uint32Array.BYTES_PER_ELEMENT },
},
],
layout: pipelines[0].getBindGroupLayout(0),
}),
t.device.createBindGroup({
entries: [
{
binding: 0,
resource: { buffer: buffers[1], offset: 0, size: Uint32Array.BYTES_PER_ELEMENT },
},
],
layout: pipelines[1].getBindGroupLayout(0),
}),
t.device.createBindGroup({
entries: [
{
binding: 0,
resource: { buffer: buffers[2], offset: 0, size: Uint32Array.BYTES_PER_ELEMENT },
},
],
layout: pipelines[2].getBindGroupLayout(0),
}),
t.device.createBindGroup({
entries: [
{
binding: 0,
resource: { buffer: buffers[3], offset: 0, size: Uint32Array.BYTES_PER_ELEMENT },
},
],
layout: pipelines[3].getBindGroupLayout(0),
}),
];
const encoder = t.device.createCommandEncoder();
const pass = encoder.beginComputePass();
pass.setPipeline(pipelines[0]);
pass.setBindGroup(0, bindGroups[0]);
pass.dispatchWorkgroups(1);
pass.setPipeline(pipelines[1]);
pass.setBindGroup(0, bindGroups[1]);
pass.dispatchWorkgroups(1);
pass.setPipeline(pipelines[2]);
pass.setBindGroup(0, bindGroups[2]);
pass.dispatchWorkgroups(1);
pass.setPipeline(pipelines[3]);
pass.setBindGroup(0, bindGroups[3]);
pass.dispatchWorkgroups(1);
pass.end();
t.device.queue.submit([encoder.finish()]);
t.expectGPUBufferValuesEqual(buffers[0], expects[0]);
t.expectGPUBufferValuesEqual(buffers[1], expects[1]);
t.expectGPUBufferValuesEqual(buffers[2], expects[2]);
t.expectGPUBufferValuesEqual(buffers[3], expects[3]);
});

View file

@ -0,0 +1,93 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Tests for GPUDevice.lost.
`;
import { Fixture } from '../../../../common/framework/fixture.js';
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { attemptGarbageCollection } from '../../../../common/util/collect_garbage.js';
import { getGPU } from '../../../../common/util/navigator_gpu.js';
import {
assert,
assertNotSettledWithinTime,
raceWithRejectOnTimeout,
} from '../../../../common/util/util.js';
class DeviceLostTests extends Fixture {
// Default timeout for waiting for device lost is 2 seconds.
kDeviceLostTimeoutMS = 2000;
getDeviceLostWithTimeout(lost) {
return raceWithRejectOnTimeout(lost, this.kDeviceLostTimeoutMS, 'device was not lost');
}
expectDeviceDestroyed(device) {
this.eventualAsyncExpectation(async niceStack => {
try {
const lost = await this.getDeviceLostWithTimeout(device.lost);
this.expect(lost.reason === 'destroyed', 'device was lost from destroy');
} catch (ex) {
niceStack.message = 'device was not lost';
this.rec.expectationFailed(niceStack);
}
});
}
}
export const g = makeTestGroup(DeviceLostTests);
g.test('not_lost_on_gc')
.desc(
`'lost' is never resolved by GPUDevice being garbage collected (with attemptGarbageCollection).`
)
.fn(async t => {
// Wraps a lost promise object creation in a function scope so that the device has the best
// chance of being gone and ready for GC before trying to resolve the lost promise.
const { lost } = await (async () => {
const adapter = await getGPU(t.rec).requestAdapter();
assert(adapter !== null);
const lost = (await adapter.requestDevice()).lost;
return { lost };
})();
await assertNotSettledWithinTime(lost, t.kDeviceLostTimeoutMS, 'device was unexpectedly lost');
await attemptGarbageCollection();
});
g.test('lost_on_destroy')
.desc(`'lost' is resolved, with reason='destroyed', on GPUDevice.destroy().`)
.fn(async t => {
const adapter = await getGPU(t.rec).requestAdapter();
assert(adapter !== null);
const device = await adapter.requestDevice();
t.expectDeviceDestroyed(device);
device.destroy();
});
g.test('same_object')
.desc(`'lost' provides the same Promise and GPUDeviceLostInfo objects each time it's accessed.`)
.fn(async t => {
const adapter = await getGPU(t.rec).requestAdapter();
assert(adapter !== null);
const device = await adapter.requestDevice();
// The promises should be the same promise object.
const lostPromise1 = device.lost;
const lostPromise2 = device.lost;
t.expect(lostPromise1 === lostPromise2);
// Promise object should still be the same after destroy.
device.destroy();
const lostPromise3 = device.lost;
t.expect(lostPromise1 === lostPromise3);
// The results should also be the same result object.
const lost1 = await t.getDeviceLostWithTimeout(lostPromise1);
const lost2 = await t.getDeviceLostWithTimeout(lostPromise2);
const lost3 = await t.getDeviceLostWithTimeout(lostPromise3);
// Promise object should still be the same after we've been notified about device loss.
const lostPromise4 = device.lost;
t.expect(lostPromise1 === lostPromise4);
const lost4 = await t.getDeviceLostWithTimeout(lostPromise4);
t.expect(lost1 === lost2 && lost2 === lost3 && lost3 === lost4);
});

View file

@ -1,145 +0,0 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = '';
import { makeTestGroup } from '../../../common/framework/test_group.js';
import { attemptGarbageCollection } from '../../../common/framework/util/collect_garbage.js';
import { raceWithRejectOnTimeout } from '../../../common/framework/util/util.js';
import { GPUTest } from '../../gpu_test.js';
export const g = makeTestGroup(GPUTest);
g.test('initial,no_descriptor').fn(t => {
const fence = t.queue.createFence();
t.expect(fence.getCompletedValue() === 0);
});
g.test('initial,empty_descriptor').fn(t => {
const fence = t.queue.createFence({});
t.expect(fence.getCompletedValue() === 0);
});
g.test('initial,descriptor_with_initialValue').fn(t => {
const fence = t.queue.createFence({ initialValue: 2 });
t.expect(fence.getCompletedValue() === 2);
});
// Promise resolves when onCompletion value is less than signal value.
g.test('wait,less_than_signaled').fn(async t => {
const fence = t.queue.createFence();
t.queue.signal(fence, 2);
await fence.onCompletion(1);
t.expect(fence.getCompletedValue() === 2);
});
// Promise resolves when onCompletion value is equal to signal value.
g.test('wait,equal_to_signaled').fn(async t => {
const fence = t.queue.createFence();
t.queue.signal(fence, 2);
await fence.onCompletion(2);
t.expect(fence.getCompletedValue() === 2);
});
// All promises resolve when signal is called once.
g.test('wait,signaled_once').fn(async t => {
const fence = t.queue.createFence();
t.queue.signal(fence, 20);
const promises = [];
for (let i = 0; i <= 20; ++i) {
promises.push(
fence.onCompletion(i).then(() => {
t.expect(fence.getCompletedValue() >= i);
})
);
}
await Promise.all(promises);
});
// Promise resolves when signal is called multiple times.
g.test('wait,signaled_multiple_times').fn(async t => {
const fence = t.queue.createFence();
t.queue.signal(fence, 1);
t.queue.signal(fence, 2);
await fence.onCompletion(2);
t.expect(fence.getCompletedValue() === 2);
});
// Promise resolves if fence has already completed.
g.test('wait,already_completed').fn(async t => {
const fence = t.queue.createFence();
t.queue.signal(fence, 2);
// Wait for value to update.
while (fence.getCompletedValue() < 2) {
await new Promise(resolve => {
requestAnimationFrame(resolve);
});
}
t.expect(fence.getCompletedValue() === 2);
await fence.onCompletion(2);
t.expect(fence.getCompletedValue() === 2);
});
// Test many calls to signal and wait on fence values one at a time.
g.test('wait,many,serially').fn(async t => {
const fence = t.queue.createFence();
for (let i = 1; i <= 20; ++i) {
t.queue.signal(fence, i);
await fence.onCompletion(i);
t.expect(fence.getCompletedValue() === i);
}
});
// Test many calls to signal and wait on all fence values.
g.test('wait,many,parallel').fn(async t => {
const fence = t.queue.createFence();
const promises = [];
for (let i = 1; i <= 20; ++i) {
t.queue.signal(fence, i);
promises.push(
fence.onCompletion(i).then(() => {
t.expect(fence.getCompletedValue() >= i);
})
);
}
await Promise.all(promises);
t.expect(fence.getCompletedValue() === 20);
});
// Test onCompletion promise resolves within a time limit.
g.test('wait,resolves_within_timeout').fn(t => {
const fence = t.queue.createFence();
t.queue.signal(fence, 2);
return raceWithRejectOnTimeout(
(async () => {
await fence.onCompletion(2);
t.expect(fence.getCompletedValue() === 2);
})(),
100,
'The fence has not been resolved within time limit.'
);
});
// Test dropping references to the fence and onCompletion promise does not crash.
g.test('drop,fence_and_promise').fn(async t => {
{
const fence = t.queue.createFence();
t.queue.signal(fence, 2);
fence.onCompletion(2);
}
await attemptGarbageCollection();
});
// Test dropping references to the fence and holding the promise does not crash.
g.test('drop,promise').fn(async t => {
let promise;
{
const fence = t.queue.createFence();
t.queue.signal(fence, 2);
promise = fence.onCompletion(2);
}
await attemptGarbageCollection();
await promise;
});

View file

@ -0,0 +1,280 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Tests for object labels.
`;
import { makeTestGroup } from '../../../common/framework/test_group.js';
import { keysOf } from '../../../common/util/data_tables.js';
import { getGPU } from '../../../common/util/navigator_gpu.js';
import { GPUTest } from '../../gpu_test.js';
export const g = makeTestGroup(GPUTest);
const kTestFunctions = {
createBuffer: (t, label) => {
const buffer = t.device.createBuffer({ size: 16, usage: GPUBufferUsage.COPY_DST, label });
t.expect(buffer.label === label);
buffer.destroy();
t.expect(buffer.label === label);
},
requestDevice: async (t, label) => {
const gpu = getGPU(t.rec);
const adapter = await gpu.requestAdapter();
t.expect(!!adapter);
const device = await adapter.requestDevice({ label });
t.expect(!!device);
t.expect(device.label === label);
device.destroy();
t.expect(device.label === label);
},
createTexture: (t, label) => {
const texture = t.device.createTexture({
label,
size: [1, 1, 1],
format: 'rgba8unorm',
usage: GPUTextureUsage.RENDER_ATTACHMENT,
});
t.expect(texture.label === label);
texture.destroy();
t.expect(texture.label === label);
},
createSampler: (t, label) => {
const sampler = t.device.createSampler({ label });
t.expect(sampler.label === label);
},
createBindGroupLayout: (t, label) => {
const bindGroupLayout = t.device.createBindGroupLayout({ label, entries: [] });
t.expect(bindGroupLayout.label === label);
},
createPipelineLayout: (t, label) => {
const pipelineLayout = t.device.createPipelineLayout({ label, bindGroupLayouts: [] });
t.expect(pipelineLayout.label === label);
},
createBindGroup: (t, label) => {
const layout = t.device.createBindGroupLayout({ entries: [] });
const bindGroup = t.device.createBindGroup({ label, layout, entries: [] });
t.expect(bindGroup.label === label);
},
createShaderModule: (t, label) => {
const shaderModule = t.device.createShaderModule({
label,
code: `
@vertex fn vs() -> @builtin(position) vec4f {
return vec4f(0, 0, 0, 1);
}
`,
});
t.expect(shaderModule.label === label);
},
createComputePipeline: (t, label) => {
const module = t.device.createShaderModule({
code: `
@compute @workgroup_size(1u) fn foo() {}
`,
});
const computePipeline = t.device.createComputePipeline({
label,
layout: 'auto',
compute: {
module,
entryPoint: 'foo',
},
});
t.expect(computePipeline.label === label);
},
createRenderPipeline: (t, label) => {
const module = t.device.createShaderModule({
code: `
@vertex fn foo() -> @builtin(position) vec4f {
return vec4f(0, 0, 0, 1);
}
`,
});
const renderPipeline = t.device.createRenderPipeline({
label,
layout: 'auto',
vertex: {
module,
entryPoint: 'foo',
},
});
t.expect(renderPipeline.label === label);
},
createComputePipelineAsync: async (t, label) => {
const module = t.device.createShaderModule({
code: `
@compute @workgroup_size(1u) fn foo() {}
`,
});
const computePipeline = await t.device.createComputePipelineAsync({
label,
layout: 'auto',
compute: {
module,
entryPoint: 'foo',
},
});
t.expect(computePipeline.label === label);
},
createRenderPipelineAsync: async (t, label) => {
const module = t.device.createShaderModule({
label,
code: `
@vertex fn foo() -> @builtin(position) vec4f {
return vec4f(0, 0, 0, 1);
}
`,
});
const renderPipeline = await t.device.createRenderPipelineAsync({
label,
layout: 'auto',
vertex: {
module,
entryPoint: 'foo',
},
});
t.expect(renderPipeline.label === label);
},
createCommandEncoder: (t, label) => {
const encoder = t.device.createCommandEncoder({ label });
t.expect(encoder.label === label);
},
createRenderBundleEncoder: (t, label) => {
const encoder = t.device.createRenderBundleEncoder({
label,
colorFormats: ['rgba8unorm'],
});
t.expect(encoder.label === label);
},
createQuerySet: (t, label) => {
const querySet = t.device.createQuerySet({
label,
type: 'occlusion',
count: 1,
});
t.expect(querySet.label === label);
querySet.destroy();
t.expect(querySet.label === label);
},
beginRenderPass: (t, label) => {
const texture = t.device.createTexture({
label,
size: [1, 1, 1],
format: 'rgba8unorm',
usage: GPUTextureUsage.RENDER_ATTACHMENT,
});
const label2 = `${label}-2`;
const encoder = t.device.createCommandEncoder();
encoder.label = label2;
const renderPass = encoder.beginRenderPass({
label,
colorAttachments: [{ view: texture.createView(), loadOp: 'clear', storeOp: 'store' }],
});
t.expect(renderPass.label === label);
renderPass.end();
t.expect(renderPass.label === label);
encoder.finish();
t.expect(renderPass.label === label);
t.expect(encoder.label === label2);
texture.destroy();
},
beginComputePass: (t, label) => {
const label2 = `${label}-2`;
const encoder = t.device.createCommandEncoder();
encoder.label = label2;
const computePass = encoder.beginComputePass({ label });
t.expect(computePass.label === label);
computePass.end();
t.expect(computePass.label === label);
encoder.finish();
t.expect(computePass.label === label);
t.expect(encoder.label === label2);
},
finish: (t, label) => {
const encoder = t.device.createCommandEncoder();
const commandBuffer = encoder.finish({ label });
t.expect(commandBuffer.label === label);
},
createView: (t, label) => {
const texture = t.device.createTexture({
size: [1, 1, 1],
format: 'rgba8unorm',
usage: GPUTextureUsage.RENDER_ATTACHMENT,
});
const view = texture.createView({ label });
t.expect(view.label === label);
texture.destroy();
t.expect(view.label === label);
},
};
g.test('object_has_descriptor_label')
.desc(
`
For every create function, the descriptor.label is carried over to the object.label.
TODO: test importExternalTexture
TODO: make a best effort and generating an error that is likely to use label. There's nothing to check for
but it may surface bugs related to unusual labels.
`
)
.params(u =>
u
.combine('name', keysOf(kTestFunctions))
.beginSubcases()
.combine('label', ['label', '\0', 'null\0in\0label', '🌞👆'])
)
.fn(async t => {
const { name, label } = t.params;
const result = kTestFunctions[name](t, label);
if (result instanceof Promise) {
await result;
}
});
g.test('wrappers_do_not_share_labels')
.desc('test that different wrapper objects for the same GPU object do not share labels')
.fn(t => {
const module = t.device.createShaderModule({
code: `
@group(0) @binding(0) var<uniform> pos: vec4f;
@vertex fn main() -> @builtin(position) vec4f {
return pos;
}
`,
});
const pipeline = t.device.createRenderPipeline({
layout: 'auto',
vertex: {
module,
entryPoint: 'main',
},
});
const layout1 = pipeline.getBindGroupLayout(0);
const layout2 = pipeline.getBindGroupLayout(0);
t.expect(layout1 !== layout2);
layout1.label = 'foo';
layout2.label = 'bar';
t.expect(layout1.label === 'foo');
t.expect(layout2.label === 'bar');
});

View file

@ -0,0 +1,871 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { assert, unreachable } from '../../../../../common/util/util.js';
import { GPUTest } from '../../../../gpu_test.js';
import { checkElementsEqualEither } from '../../../../util/check_contents.js';
export const kAllWriteOps = ['storage', 'b2b-copy', 't2b-copy', 'write-buffer'];
export const kAllReadOps = [
'input-vertex',
'input-index',
'input-indirect',
'input-indirect-index',
'input-indirect-dispatch',
'constant-uniform',
'storage-read',
'b2b-copy',
'b2t-copy',
];
const kOpInfo = {
'write-buffer': {
contexts: ['queue'],
},
'b2t-copy': {
contexts: ['command-encoder'],
},
'b2b-copy': {
contexts: ['command-encoder'],
},
't2b-copy': {
contexts: ['command-encoder'],
},
storage: {
contexts: ['compute-pass-encoder', 'render-pass-encoder', 'render-bundle-encoder'],
},
'storage-read': {
contexts: ['compute-pass-encoder', 'render-pass-encoder', 'render-bundle-encoder'],
},
'input-vertex': {
contexts: ['render-pass-encoder', 'render-bundle-encoder'],
},
'input-index': {
contexts: ['render-pass-encoder', 'render-bundle-encoder'],
},
'input-indirect': {
contexts: ['render-pass-encoder', 'render-bundle-encoder'],
},
'input-indirect-index': {
contexts: ['render-pass-encoder', 'render-bundle-encoder'],
},
'input-indirect-dispatch': {
contexts: ['compute-pass-encoder'],
},
'constant-uniform': {
contexts: ['render-pass-encoder', 'render-bundle-encoder'],
},
};
export function checkOpsValidForContext(ops, context) {
const valid =
kOpInfo[ops[0]].contexts.includes(context[0]) && kOpInfo[ops[1]].contexts.includes(context[1]);
if (!valid) return false;
if (
context[0] === 'render-bundle-encoder' ||
context[0] === 'render-pass-encoder' ||
context[1] === 'render-bundle-encoder' ||
context[1] === 'render-pass-encoder'
) {
// In a render pass, it is invalid to use a resource as both writable and another usage.
// Also, for storage+storage usage, the application is opting into racy behavior.
// The storage+storage case is also skipped as the results cannot be reliably tested.
const checkImpl = (op1, op2) => {
switch (op1) {
case 'storage':
switch (op2) {
case 'storage':
case 'storage-read':
case 'input-vertex':
case 'input-index':
case 'input-indirect':
case 'input-indirect-index':
case 'constant-uniform':
// Write+other, or racy.
return false;
case 'b2t-copy':
case 't2b-copy':
case 'b2b-copy':
case 'write-buffer':
// These don't occur in a render pass.
return true;
}
break;
case 'input-vertex':
case 'input-index':
case 'input-indirect':
case 'input-indirect-index':
case 'constant-uniform':
case 'b2t-copy':
case 't2b-copy':
case 'b2b-copy':
case 'write-buffer':
// These are not write usages, or don't occur in a render pass.
break;
}
return true;
};
return checkImpl(ops[0], ops[1]) && checkImpl(ops[1], ops[0]);
}
return true;
}
const kDummyVertexShader = `
@vertex fn vert_main() -> @builtin(position) vec4<f32> {
return vec4<f32>(0.5, 0.5, 0.0, 1.0);
}
`;
// Note: If it would be useful to have any of these helpers be separate from the fixture,
// they can be refactored into standalone functions.
export class BufferSyncTest extends GPUTest {
// Vertex and index buffers used in read render pass
// Temp buffer and texture with values for buffer/texture copy write op
// There can be at most 2 write op
tmpValueBuffers = [undefined, undefined];
tmpValueTextures = [undefined, undefined];
// These intermediate buffers/textures are created before any read/write op
// to avoid extra memory synchronization between ops introduced by await on buffer/texture creations.
// Create extra buffers/textures needed by write operation
async createIntermediateBuffersAndTexturesForWriteOp(writeOp, slot, value) {
switch (writeOp) {
case 'b2b-copy':
this.tmpValueBuffers[slot] = await this.createBufferWithValue(value);
break;
case 't2b-copy':
this.tmpValueTextures[slot] = await this.createTextureWithValue(value);
break;
default:
break;
}
}
// Create extra buffers/textures needed by read operation
async createBuffersForReadOp(readOp, srcValue, opValue) {
// This helps create values that will be written into dst buffer by the readop
switch (readOp) {
case 'input-index':
// The index buffer will be the src buffer of the read op.
// The src value for readOp will be 0
// If the index buffer value is 0, the src value is written into the dst buffer.
// If the index buffer value is 1, the op value is written into the dst buffer.
this.vertexBuffer = await this.createBufferWithValues([srcValue, opValue]);
break;
case 'input-indirect':
// The indirect buffer for the draw cmd will be the src buffer of the read op.
// If the first value in the indirect buffer is 1, then the op value in vertex buffer will be written into dst buffer.
// If the first value in indirect buffer is 0, then nothing will be write into dst buffer.
this.vertexBuffer = await this.createBufferWithValues([opValue]);
break;
case 'input-indirect-index':
// The indirect buffer for draw indexed cmd will be the src buffer of the read op.
// If the first value in the indirect buffer is 1, then the opValue in vertex buffer will be written into dst buffer.
// If the first value in indirect buffer is 0, then nothing will be write into dst buffer.
this.vertexBuffer = await this.createBufferWithValues([opValue]);
this.indexBuffer = await this.createBufferWithValues([0]);
break;
default:
break;
}
let srcBuffer;
switch (readOp) {
case 'input-indirect':
// vertexCount = {0, 1}
// instanceCount = 1
// firstVertex = 0
// firstInstance = 0
srcBuffer = await this.createBufferWithValues([srcValue, 1, 0, 0]);
break;
case 'input-indirect-index':
// indexCount = {0, 1}
// instanceCount = 1
// firstIndex = 0
// baseVertex = 0
// firstInstance = 0
srcBuffer = await this.createBufferWithValues([srcValue, 1, 0, 0, 0]);
break;
case 'input-indirect-dispatch':
// workgroupCountX = {0, 1}
// workgroupCountY = 1
// workgroupCountZ = 1
srcBuffer = await this.createBufferWithValues([srcValue, 1, 1]);
break;
default:
srcBuffer = await this.createBufferWithValue(srcValue);
break;
}
const dstBuffer = this.trackForCleanup(
this.device.createBuffer({
size: Uint32Array.BYTES_PER_ELEMENT,
usage:
GPUBufferUsage.COPY_SRC |
GPUBufferUsage.COPY_DST |
GPUBufferUsage.STORAGE |
GPUBufferUsage.VERTEX |
GPUBufferUsage.INDEX |
GPUBufferUsage.INDIRECT |
GPUBufferUsage.UNIFORM,
})
);
return { srcBuffer, dstBuffer };
}
// Create a buffer with 1 uint32 element, and initialize it to a specified value.
async createBufferWithValue(initValue) {
const buffer = this.trackForCleanup(
this.device.createBuffer({
mappedAtCreation: true,
size: Uint32Array.BYTES_PER_ELEMENT,
usage:
GPUBufferUsage.COPY_SRC |
GPUBufferUsage.COPY_DST |
GPUBufferUsage.STORAGE |
GPUBufferUsage.VERTEX |
GPUBufferUsage.INDEX |
GPUBufferUsage.INDIRECT |
GPUBufferUsage.UNIFORM,
})
);
new Uint32Array(buffer.getMappedRange()).fill(initValue);
buffer.unmap();
await this.queue.onSubmittedWorkDone();
return buffer;
}
// Create a buffer, and initialize it to the specified values.
async createBufferWithValues(initValues) {
const buffer = this.trackForCleanup(
this.device.createBuffer({
mappedAtCreation: true,
size: Uint32Array.BYTES_PER_ELEMENT * initValues.length,
usage:
GPUBufferUsage.COPY_SRC |
GPUBufferUsage.COPY_DST |
GPUBufferUsage.STORAGE |
GPUBufferUsage.VERTEX |
GPUBufferUsage.INDEX |
GPUBufferUsage.INDIRECT |
GPUBufferUsage.UNIFORM,
})
);
const bufferView = new Uint32Array(buffer.getMappedRange());
bufferView.set(initValues);
buffer.unmap();
await this.queue.onSubmittedWorkDone();
return buffer;
}
// Create a 1x1 texture, and initialize it to a specified value for all elements.
async createTextureWithValue(initValue) {
const data = new Uint32Array(1).fill(initValue);
const texture = this.trackForCleanup(
this.device.createTexture({
size: { width: 1, height: 1, depthOrArrayLayers: 1 },
format: 'r32uint',
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
})
);
this.device.queue.writeTexture(
{ texture, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
data,
{ offset: 0, bytesPerRow: 256, rowsPerImage: 1 },
{ width: 1, height: 1, depthOrArrayLayers: 1 }
);
await this.queue.onSubmittedWorkDone();
return texture;
}
createBindGroup(pipeline, buffer) {
return this.device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [{ binding: 0, resource: { buffer } }],
});
}
// Create a compute pipeline and write given data into storage buffer.
createStorageWriteComputePipeline(value) {
const wgslCompute = `
struct Data {
a : u32
};
@group(0) @binding(0) var<storage, read_write> data : Data;
@compute @workgroup_size(1) fn main() {
data.a = ${value}u;
}
`;
return this.device.createComputePipeline({
layout: 'auto',
compute: {
module: this.device.createShaderModule({
code: wgslCompute,
}),
entryPoint: 'main',
},
});
}
createTrivialRenderPipeline(wgslShaders) {
return this.device.createRenderPipeline({
layout: 'auto',
vertex: {
module: this.device.createShaderModule({
code: wgslShaders.vertex,
}),
entryPoint: 'vert_main',
},
fragment: {
module: this.device.createShaderModule({
code: wgslShaders.fragment,
}),
entryPoint: 'frag_main',
targets: [{ format: 'rgba8unorm' }],
},
primitive: { topology: 'point-list' },
});
}
// Create a render pipeline and write given data into storage buffer at fragment stage.
createStorageWriteRenderPipeline(value) {
const wgslShaders = {
vertex: kDummyVertexShader,
fragment: `
struct Data {
a : u32
};
@group(0) @binding(0) var<storage, read_write> data : Data;
@fragment fn frag_main() -> @location(0) vec4<f32> {
data.a = ${value}u;
return vec4<f32>(); // result does't matter
}
`,
};
return this.createTrivialRenderPipeline(wgslShaders);
}
beginSimpleRenderPass(encoder) {
const view = this.trackForCleanup(
this.device.createTexture({
size: { width: 1, height: 1, depthOrArrayLayers: 1 },
format: 'rgba8unorm',
usage: GPUTextureUsage.RENDER_ATTACHMENT,
})
).createView();
return encoder.beginRenderPass({
colorAttachments: [
{
view,
clearValue: { r: 0.0, g: 1.0, b: 0.0, a: 1.0 },
loadOp: 'clear',
storeOp: 'store',
},
],
});
}
// Write buffer via draw call in render pass. Use bundle if needed.
encodeWriteAsStorageBufferInRenderPass(renderer, buffer, value) {
const pipeline = this.createStorageWriteRenderPipeline(value);
const bindGroup = this.createBindGroup(pipeline, buffer);
renderer.setBindGroup(0, bindGroup);
renderer.setPipeline(pipeline);
renderer.draw(1, 1, 0, 0);
}
// Write buffer via dispatch call in compute pass.
encodeWriteAsStorageBufferInComputePass(pass, buffer, value) {
const pipeline = this.createStorageWriteComputePipeline(value);
const bindGroup = this.createBindGroup(pipeline, buffer);
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.dispatchWorkgroups(1);
}
// Write buffer via BufferToBuffer copy.
encodeWriteByB2BCopy(encoder, buffer, slot) {
const tmpBuffer = this.tmpValueBuffers[slot];
assert(tmpBuffer !== undefined);
// The write operation via b2b copy is just encoded into command encoder, it doesn't write immediately.
encoder.copyBufferToBuffer(tmpBuffer, 0, buffer, 0, Uint32Array.BYTES_PER_ELEMENT);
}
// Write buffer via TextureToBuffer copy.
encodeWriteByT2BCopy(encoder, buffer, slot) {
const tmpTexture = this.tmpValueTextures[slot];
assert(tmpTexture !== undefined);
// The write operation via t2b copy is just encoded into command encoder, it doesn't write immediately.
encoder.copyTextureToBuffer(
{ texture: tmpTexture, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
{ buffer, bytesPerRow: 256 },
{ width: 1, height: 1, depthOrArrayLayers: 1 }
);
}
// Write buffer via writeBuffer API on queue
writeByWriteBuffer(buffer, value) {
const data = new Uint32Array(1).fill(value);
this.device.queue.writeBuffer(buffer, 0, data);
}
// Issue write operation via render pass, compute pass, copy, etc.
encodeWriteOp(helper, operation, context, buffer, writeOpSlot, value) {
helper.ensureContext(context);
switch (operation) {
case 'write-buffer':
this.writeByWriteBuffer(buffer, value);
break;
case 'storage':
switch (context) {
case 'render-pass-encoder':
assert(helper.renderPassEncoder !== undefined);
this.encodeWriteAsStorageBufferInRenderPass(helper.renderPassEncoder, buffer, value);
break;
case 'render-bundle-encoder':
assert(helper.renderBundleEncoder !== undefined);
this.encodeWriteAsStorageBufferInRenderPass(helper.renderBundleEncoder, buffer, value);
break;
case 'compute-pass-encoder':
assert(helper.computePassEncoder !== undefined);
this.encodeWriteAsStorageBufferInComputePass(helper.computePassEncoder, buffer, value);
break;
default:
unreachable();
}
break;
case 'b2b-copy':
assert(helper.commandEncoder !== undefined);
this.encodeWriteByB2BCopy(helper.commandEncoder, buffer, writeOpSlot);
break;
case 't2b-copy':
assert(helper.commandEncoder !== undefined);
this.encodeWriteByT2BCopy(helper.commandEncoder, buffer, writeOpSlot);
break;
default:
unreachable();
}
}
// Create a compute pipeline: read from src buffer and write it into the storage buffer.
createStorageReadComputePipeline() {
const wgslCompute = `
struct Data {
a : u32
};
@group(0) @binding(0) var<storage, read> srcData : Data;
@group(0) @binding(1) var<storage, read_write> dstData : Data;
@compute @workgroup_size(1) fn main() {
dstData.a = srcData.a;
}
`;
return this.device.createComputePipeline({
layout: 'auto',
compute: {
module: this.device.createShaderModule({
code: wgslCompute,
}),
entryPoint: 'main',
},
});
}
createBindGroupSrcDstBuffer(pipeline, srcBuffer, dstBuffer) {
return this.device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: { buffer: srcBuffer } },
{ binding: 1, resource: { buffer: dstBuffer } },
],
});
}
// Create a render pipeline: read from vertex/index buffer and write it into the storage dst buffer at fragment stage.
createVertexReadRenderPipeline() {
const wgslShaders = {
vertex: `
struct VertexOutput {
@builtin(position) position : vec4<f32>,
@location(0) @interpolate(flat) data : u32,
};
@vertex fn vert_main(@location(0) input: u32) -> VertexOutput {
var output : VertexOutput;
output.position = vec4<f32>(0.5, 0.5, 0.0, 1.0);
output.data = input;
return output;
}
`,
fragment: `
struct Data {
a : u32
};
@group(0) @binding(0) var<storage, read_write> data : Data;
@fragment fn frag_main(@location(0) @interpolate(flat) input : u32) -> @location(0) vec4<f32> {
data.a = input;
return vec4<f32>(); // result does't matter
}
`,
};
return this.device.createRenderPipeline({
layout: 'auto',
vertex: {
module: this.device.createShaderModule({
code: wgslShaders.vertex,
}),
entryPoint: 'vert_main',
buffers: [
{
arrayStride: Uint32Array.BYTES_PER_ELEMENT,
attributes: [
{
shaderLocation: 0,
offset: 0,
format: 'uint32',
},
],
},
],
},
fragment: {
module: this.device.createShaderModule({
code: wgslShaders.fragment,
}),
entryPoint: 'frag_main',
targets: [{ format: 'rgba8unorm' }],
},
primitive: { topology: 'point-list' },
});
}
// Create a render pipeline: read from uniform buffer and write it into the storage dst buffer at fragment stage.
createUniformReadRenderPipeline() {
const wgslShaders = {
vertex: kDummyVertexShader,
fragment: `
struct Data {
a : u32
};
@group(0) @binding(0) var<uniform> constant: Data;
@group(0) @binding(1) var<storage, read_write> data : Data;
@fragment fn frag_main() -> @location(0) vec4<f32> {
data.a = constant.a;
return vec4<f32>(); // result does't matter
}
`,
};
return this.createTrivialRenderPipeline(wgslShaders);
}
// Create a render pipeline: read from storage src buffer and write it into the storage dst buffer at fragment stage.
createStorageReadRenderPipeline() {
const wgslShaders = {
vertex: kDummyVertexShader,
fragment: `
struct Data {
a : u32
};
@group(0) @binding(0) var<storage, read> srcData : Data;
@group(0) @binding(1) var<storage, read_write> dstData : Data;
@fragment fn frag_main() -> @location(0) vec4<f32> {
dstData.a = srcData.a;
return vec4<f32>(); // result does't matter
}
`,
};
return this.device.createRenderPipeline({
layout: 'auto',
vertex: {
module: this.device.createShaderModule({
code: wgslShaders.vertex,
}),
entryPoint: 'vert_main',
},
fragment: {
module: this.device.createShaderModule({
code: wgslShaders.fragment,
}),
entryPoint: 'frag_main',
targets: [{ format: 'rgba8unorm' }],
},
primitive: { topology: 'point-list' },
});
}
// Write buffer via dispatch call in compute pass.
encodeReadAsStorageBufferInComputePass(pass, srcBuffer, dstBuffer) {
const pipeline = this.createStorageReadComputePipeline();
const bindGroup = this.createBindGroupSrcDstBuffer(pipeline, srcBuffer, dstBuffer);
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.dispatchWorkgroups(1);
}
// Write buffer via dispatchWorkgroupsIndirect call in compute pass.
encodeReadAsIndirectBufferInComputePass(pass, srcBuffer, dstBuffer, value) {
const pipeline = this.createStorageWriteComputePipeline(value);
const bindGroup = this.createBindGroup(pipeline, dstBuffer);
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.dispatchWorkgroupsIndirect(srcBuffer, 0);
}
// Read as vertex input and write buffer via draw call in render pass. Use bundle if needed.
encodeReadAsVertexBufferInRenderPass(renderer, srcBuffer, dstBuffer) {
const pipeline = this.createVertexReadRenderPipeline();
const bindGroup = this.device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [{ binding: 0, resource: { buffer: dstBuffer } }],
});
renderer.setBindGroup(0, bindGroup);
renderer.setPipeline(pipeline);
renderer.setVertexBuffer(0, srcBuffer);
renderer.draw(1);
}
// Read as index input and write buffer via draw call in render pass. Use bundle if needed.
encodeReadAsIndexBufferInRenderPass(renderer, srcBuffer, dstBuffer, vertexBuffer) {
const pipeline = this.createVertexReadRenderPipeline();
const bindGroup = this.device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [{ binding: 0, resource: { buffer: dstBuffer } }],
});
renderer.setBindGroup(0, bindGroup);
renderer.setPipeline(pipeline);
renderer.setVertexBuffer(0, vertexBuffer);
renderer.setIndexBuffer(srcBuffer, 'uint32');
renderer.drawIndexed(1);
}
// Read as indirect input and write buffer via draw call in render pass. Use bundle if needed.
encodeReadAsIndirectBufferInRenderPass(renderer, srcBuffer, dstBuffer, vertexBuffer) {
const pipeline = this.createVertexReadRenderPipeline();
const bindGroup = this.device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [{ binding: 0, resource: { buffer: dstBuffer } }],
});
renderer.setBindGroup(0, bindGroup);
renderer.setPipeline(pipeline);
renderer.setVertexBuffer(0, vertexBuffer);
renderer.drawIndirect(srcBuffer, 0);
}
// Read as indexed indirect input and write buffer via draw call in render pass. Use bundle if needed.
encodeReadAsIndexedIndirectBufferInRenderPass(
renderer,
srcBuffer,
dstBuffer,
vertexBuffer,
indexBuffer
) {
const pipeline = this.createVertexReadRenderPipeline();
const bindGroup = this.device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [{ binding: 0, resource: { buffer: dstBuffer } }],
});
renderer.setBindGroup(0, bindGroup);
renderer.setPipeline(pipeline);
renderer.setVertexBuffer(0, vertexBuffer);
renderer.setIndexBuffer(indexBuffer, 'uint32');
renderer.drawIndexedIndirect(srcBuffer, 0);
}
// Read as uniform buffer and write buffer via draw call in render pass. Use bundle if needed.
encodeReadAsUniformBufferInRenderPass(renderer, srcBuffer, dstBuffer) {
const pipeline = this.createUniformReadRenderPipeline();
const bindGroup = this.device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: { buffer: srcBuffer } },
{ binding: 1, resource: { buffer: dstBuffer } },
],
});
renderer.setBindGroup(0, bindGroup);
renderer.setPipeline(pipeline);
renderer.draw(1);
}
// Read as storage buffer and write buffer via draw call in render pass. Use bundle if needed.
encodeReadAsStorageBufferInRenderPass(renderer, srcBuffer, dstBuffer) {
const pipeline = this.createStorageReadRenderPipeline();
const bindGroup = this.createBindGroupSrcDstBuffer(pipeline, srcBuffer, dstBuffer);
renderer.setBindGroup(0, bindGroup);
renderer.setPipeline(pipeline);
renderer.draw(1, 1, 0, 0);
}
// Read and write via BufferToBuffer copy.
encodeReadByB2BCopy(encoder, srcBuffer, dstBuffer) {
// The b2b copy is just encoded into command encoder, it doesn't write immediately.
encoder.copyBufferToBuffer(srcBuffer, 0, dstBuffer, 0, Uint32Array.BYTES_PER_ELEMENT);
}
// Read and Write texture via BufferToTexture copy.
encodeReadByB2TCopy(encoder, srcBuffer, dstBuffer) {
const tmpTexture = this.trackForCleanup(
this.device.createTexture({
size: { width: 1, height: 1, depthOrArrayLayers: 1 },
format: 'r32uint',
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
})
);
// The b2t copy is just encoded into command encoder, it doesn't write immediately.
encoder.copyBufferToTexture(
{ buffer: srcBuffer, bytesPerRow: 256 },
{ texture: tmpTexture, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
{ width: 1, height: 1, depthOrArrayLayers: 1 }
);
// The t2b copy is just encoded into command encoder, it doesn't write immediately.
encoder.copyTextureToBuffer(
{ texture: tmpTexture, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
{ buffer: dstBuffer, bytesPerRow: 256 },
{ width: 1, height: 1, depthOrArrayLayers: 1 }
);
}
encodeReadOp(helper, operation, context, srcBuffer, dstBuffer) {
helper.ensureContext(context);
const renderer =
context === 'render-bundle-encoder' ? helper.renderBundleEncoder : helper.renderPassEncoder;
const computePass = context === 'compute-pass-encoder' ? helper.computePassEncoder : undefined;
switch (operation) {
case 'input-vertex':
// The srcBuffer is used as vertexBuffer.
// draw writes the same value in srcBuffer[0] to dstBuffer[0].
assert(renderer !== undefined);
this.encodeReadAsVertexBufferInRenderPass(renderer, srcBuffer, dstBuffer);
break;
case 'input-index':
// The srcBuffer is used as indexBuffer.
// With this vertexBuffer, drawIndexed writes the same value in srcBuffer[0] to dstBuffer[0].
assert(renderer !== undefined);
assert(this.vertexBuffer !== undefined);
this.encodeReadAsIndexBufferInRenderPass(renderer, srcBuffer, dstBuffer, this.vertexBuffer);
break;
case 'input-indirect':
// The srcBuffer is used as indirectBuffer for drawIndirect.
// srcBuffer[0] = 0 or 1 (vertexCount), which will decide the value written into dstBuffer to be either 0 or 1.
assert(renderer !== undefined);
assert(this.vertexBuffer !== undefined);
this.encodeReadAsIndirectBufferInRenderPass(
renderer,
srcBuffer,
dstBuffer,
this.vertexBuffer
);
break;
case 'input-indirect-index':
// The srcBuffer is used as indirectBuffer for drawIndexedIndirect.
// srcBuffer[0] = 0 or 1 (indexCount), which will decide the value written into dstBuffer to be either 0 or 1.
assert(renderer !== undefined);
assert(this.vertexBuffer !== undefined);
assert(this.indexBuffer !== undefined);
this.encodeReadAsIndexedIndirectBufferInRenderPass(
renderer,
srcBuffer,
dstBuffer,
this.vertexBuffer,
this.indexBuffer
);
break;
case 'input-indirect-dispatch':
// The srcBuffer is used as indirectBuffer for dispatch.
// srcBuffer[0] = 0 or 1 (workgroupCountX), which will decide the value written into dstBuffer to be either 0 or 1.
assert(computePass !== undefined);
this.encodeReadAsIndirectBufferInComputePass(computePass, srcBuffer, dstBuffer, 1);
break;
case 'constant-uniform':
// The srcBuffer is used as uniform buffer.
assert(renderer !== undefined);
this.encodeReadAsUniformBufferInRenderPass(renderer, srcBuffer, dstBuffer);
break;
case 'storage-read':
switch (context) {
case 'render-pass-encoder':
case 'render-bundle-encoder':
assert(renderer !== undefined);
this.encodeReadAsStorageBufferInRenderPass(renderer, srcBuffer, dstBuffer);
break;
case 'compute-pass-encoder':
assert(computePass !== undefined);
this.encodeReadAsStorageBufferInComputePass(computePass, srcBuffer, dstBuffer);
break;
default:
unreachable();
}
break;
case 'b2b-copy':
assert(helper.commandEncoder !== undefined);
this.encodeReadByB2BCopy(helper.commandEncoder, srcBuffer, dstBuffer);
break;
case 'b2t-copy':
assert(helper.commandEncoder !== undefined);
this.encodeReadByB2TCopy(helper.commandEncoder, srcBuffer, dstBuffer);
break;
default:
unreachable();
}
}
verifyData(buffer, expectedValue) {
const bufferData = new Uint32Array(1);
bufferData[0] = expectedValue;
this.expectGPUBufferValuesEqual(buffer, bufferData);
}
verifyDataTwoValidValues(buffer, expectedValue1, expectedValue2) {
const bufferData1 = new Uint32Array(1);
bufferData1[0] = expectedValue1;
const bufferData2 = new Uint32Array(1);
bufferData2[0] = expectedValue2;
this.expectGPUBufferValuesPassCheck(
buffer,
a => checkElementsEqualEither(a, [bufferData1, bufferData2]),
{ type: Uint32Array, typedLength: 1 }
);
}
}

View file

@ -0,0 +1,354 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Memory Synchronization Tests for multiple buffers: read before write, read after write, and write after write.
- Create multiple src buffers and initialize it to 0, wait on the fence to ensure the data is initialized.
Write Op: write a value (say 1) into the src buffer via render pass, compute pass, copy, write buffer, etc.
Read Op: read the value from the src buffer and write it to dst buffer via render pass (vertex, index, indirect input, uniform, storage), compute pass, copy etc.
Wait on another fence, then call expectContents to verify the dst buffer value.
- x= write op: {storage buffer in {compute, render, render-via-bundle}, t2b copy dst, b2b copy dst, writeBuffer}
- x= read op: {index buffer, vertex buffer, indirect buffer (draw, draw indexed, dispatch), uniform buffer, {readonly, readwrite} storage buffer in {compute, render, render-via-bundle}, b2b copy src, b2t copy src}
- x= read-write sequence: {read then write, write then read, write then write}
- x= op context: {queue, command-encoder, compute-pass-encoder, render-pass-encoder, render-bundle-encoder}, x= op boundary: {queue-op, command-buffer, pass, execute-bundles, render-bundle}
- Not every context/boundary combinations are valid. We have the checkOpsValidForContext func to do the filtering.
- If two writes are in the same passes, render result has loose guarantees.
TODO: Tests with more than one buffer to try to stress implementations a little bit more.
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import {
kOperationBoundaries,
kBoundaryInfo,
OperationContextHelper,
} from '../operation_context_helper.js';
import {
kAllReadOps,
kAllWriteOps,
BufferSyncTest,
checkOpsValidForContext,
} from './buffer_sync_test.js';
// The src value is what stores in the src buffer before any operation.
const kSrcValue = 0;
// The op value is what the read/write operation write into the target buffer.
const kOpValue = 1;
export const g = makeTestGroup(BufferSyncTest);
g.test('rw')
.desc(
`
Perform a 'read' operations on multiple buffers, followed by a 'write' operation.
Operations are separated by a 'boundary' (pass, encoder, queue-op, etc.).
Test that the results are synchronized.
The read should not see the contents written by the subsequent write.
`
)
.params(u =>
u //
.combine('boundary', kOperationBoundaries)
.expand('_context', p => kBoundaryInfo[p.boundary].contexts)
.expandWithParams(function* ({ _context }) {
for (const readOp of kAllReadOps) {
for (const writeOp of kAllWriteOps) {
if (checkOpsValidForContext([readOp, writeOp], _context)) {
yield {
readOp,
readContext: _context[0],
writeOp,
writeContext: _context[1],
};
}
}
}
})
)
.fn(async t => {
const { readContext, readOp, writeContext, writeOp, boundary } = t.params;
const helper = new OperationContextHelper(t);
const srcBuffers = [];
const dstBuffers = [];
const kBufferCount = 4;
for (let i = 0; i < kBufferCount; i++) {
const { srcBuffer, dstBuffer } = await t.createBuffersForReadOp(readOp, kSrcValue, kOpValue);
srcBuffers.push(srcBuffer);
dstBuffers.push(dstBuffer);
}
await t.createIntermediateBuffersAndTexturesForWriteOp(writeOp, 0, kOpValue);
// The read op will read from src buffers and write to dst buffers based on what it reads.
// A boundary will separate multiple read and write operations. The write op will write the
// given op value into each src buffer as well. The write op happens after read op. So we are
// expecting each src value to be in the mapped dst buffer.
for (let i = 0; i < kBufferCount; i++) {
t.encodeReadOp(helper, readOp, readContext, srcBuffers[i], dstBuffers[i]);
}
helper.ensureBoundary(boundary);
for (let i = 0; i < kBufferCount; i++) {
t.encodeWriteOp(helper, writeOp, writeContext, srcBuffers[i], 0, kOpValue);
}
helper.ensureSubmit();
for (let i = 0; i < kBufferCount; i++) {
// Only verify the value of the first element of each dstBuffer.
t.verifyData(dstBuffers[i], kSrcValue);
}
});
g.test('wr')
.desc(
`
Perform a 'write' operation on on multiple buffers, followed by a 'read' operation.
Operations are separated by a 'boundary' (pass, encoder, queue-op, etc.).
Test that the results are synchronized.
The read should see exactly the contents written by the previous write.`
)
.params(u =>
u //
.combine('boundary', kOperationBoundaries)
.expand('_context', p => kBoundaryInfo[p.boundary].contexts)
.expandWithParams(function* ({ _context }) {
for (const readOp of kAllReadOps) {
for (const writeOp of kAllWriteOps) {
if (checkOpsValidForContext([readOp, writeOp], _context)) {
yield {
readOp,
readContext: _context[0],
writeOp,
writeContext: _context[1],
};
}
}
}
})
)
.fn(async t => {
const { readContext, readOp, writeContext, writeOp, boundary } = t.params;
const helper = new OperationContextHelper(t);
const srcBuffers = [];
const dstBuffers = [];
const kBufferCount = 4;
for (let i = 0; i < kBufferCount; i++) {
const { srcBuffer, dstBuffer } = await t.createBuffersForReadOp(readOp, kSrcValue, kOpValue);
srcBuffers.push(srcBuffer);
dstBuffers.push(dstBuffer);
}
await t.createIntermediateBuffersAndTexturesForWriteOp(writeOp, 0, kOpValue);
// The write op will write the given op value into src buffers.
// The read op will read from src buffers and write to dst buffers based on what it reads.
// The write op happens before read op. So we are expecting the op value to be in the dst
// buffers.
for (let i = 0; i < kBufferCount; i++) {
t.encodeWriteOp(helper, writeOp, writeContext, srcBuffers[i], 0, kOpValue);
}
helper.ensureBoundary(boundary);
for (let i = 0; i < kBufferCount; i++) {
t.encodeReadOp(helper, readOp, readContext, srcBuffers[i], dstBuffers[i]);
}
helper.ensureSubmit();
for (let i = 0; i < kBufferCount; i++) {
// Only verify the value of the first element of the dstBuffer
t.verifyData(dstBuffers[i], kOpValue);
}
});
g.test('ww')
.desc(
`
Perform a 'first' write operation on multiple buffers, followed by a 'second' write operation.
Operations are separated by a 'boundary' (pass, encoder, queue-op, etc.).
Test that the results are synchronized.
The second write should overwrite the contents of the first.`
)
.params(u =>
u //
.combine('boundary', kOperationBoundaries)
.expand('_context', p => kBoundaryInfo[p.boundary].contexts)
.expandWithParams(function* ({ _context }) {
for (const firstWriteOp of kAllWriteOps) {
for (const secondWriteOp of kAllWriteOps) {
if (checkOpsValidForContext([firstWriteOp, secondWriteOp], _context)) {
yield {
writeOps: [firstWriteOp, secondWriteOp],
contexts: _context,
};
}
}
}
})
)
.fn(async t => {
const { writeOps, contexts, boundary } = t.params;
const helper = new OperationContextHelper(t);
const buffers = [];
const kBufferCount = 4;
for (let i = 0; i < kBufferCount; i++) {
const buffer = await t.createBufferWithValue(0);
buffers.push(buffer);
}
await t.createIntermediateBuffersAndTexturesForWriteOp(writeOps[0], 0, 1);
await t.createIntermediateBuffersAndTexturesForWriteOp(writeOps[1], 1, 2);
for (let i = 0; i < kBufferCount; i++) {
t.encodeWriteOp(helper, writeOps[0], contexts[0], buffers[i], 0, 1);
}
helper.ensureBoundary(boundary);
for (let i = 0; i < kBufferCount; i++) {
t.encodeWriteOp(helper, writeOps[1], contexts[1], buffers[i], 1, 2);
}
helper.ensureSubmit();
for (let i = 0; i < kBufferCount; i++) {
t.verifyData(buffers[i], 2);
}
});
g.test('multiple_pairs_of_draws_in_one_render_pass')
.desc(
`
Test write-after-write operations on multiple buffers via the one render pass. The first write
will write the buffer index * 2 + 1 into all storage buffers. The second write will write the
buffer index * 2 + 2 into the all buffers in the same pass. Expected data in all buffers is either
buffer index * 2 + 1 or buffer index * 2 + 2. It may use bundle in each draw.
`
)
.paramsSubcasesOnly(u =>
u //
.combine('firstDrawUseBundle', [false, true])
.combine('secondDrawUseBundle', [false, true])
)
.fn(async t => {
const { firstDrawUseBundle, secondDrawUseBundle } = t.params;
const encoder = t.device.createCommandEncoder();
const passEncoder = t.beginSimpleRenderPass(encoder);
const kBufferCount = 4;
const buffers = [];
for (let b = 0; b < kBufferCount; ++b) {
const buffer = await t.createBufferWithValue(0);
buffers.push(buffer);
const useBundle = [firstDrawUseBundle, secondDrawUseBundle];
for (let i = 0; i < 2; ++i) {
const renderEncoder = useBundle[i]
? t.device.createRenderBundleEncoder({
colorFormats: ['rgba8unorm'],
})
: passEncoder;
const pipeline = t.createStorageWriteRenderPipeline(2 * b + i + 1);
const bindGroup = t.createBindGroup(pipeline, buffer);
renderEncoder.setPipeline(pipeline);
renderEncoder.setBindGroup(0, bindGroup);
renderEncoder.draw(1, 1, 0, 0);
if (useBundle[i]) passEncoder.executeBundles([renderEncoder.finish()]);
}
}
passEncoder.end();
t.device.queue.submit([encoder.finish()]);
for (let b = 0; b < kBufferCount; ++b) {
t.verifyDataTwoValidValues(buffers[b], 2 * b + 1, 2 * b + 2);
}
});
g.test('multiple_pairs_of_draws_in_one_render_bundle')
.desc(
`
Test write-after-write operations on multiple buffers via the one render bundle. The first write
will write the buffer index * 2 + 1 into all storage buffers. The second write will write the
buffer index * 2 + 2 into the all buffers in the same pass. Expected data in all buffers is either
buffer index * 2 + 1 or buffer index * 2 + 2.
`
)
.fn(async t => {
const encoder = t.device.createCommandEncoder();
const passEncoder = t.beginSimpleRenderPass(encoder);
const renderEncoder = t.device.createRenderBundleEncoder({
colorFormats: ['rgba8unorm'],
});
const kBufferCount = 4;
const buffers = [];
for (let b = 0; b < kBufferCount; ++b) {
const buffer = await t.createBufferWithValue(0);
buffers.push(buffer);
for (let i = 0; i < 2; ++i) {
const pipeline = t.createStorageWriteRenderPipeline(2 * b + i + 1);
const bindGroup = t.createBindGroup(pipeline, buffer);
renderEncoder.setPipeline(pipeline);
renderEncoder.setBindGroup(0, bindGroup);
renderEncoder.draw(1, 1, 0, 0);
}
}
passEncoder.executeBundles([renderEncoder.finish()]);
passEncoder.end();
t.device.queue.submit([encoder.finish()]);
for (let b = 0; b < kBufferCount; ++b) {
t.verifyDataTwoValidValues(buffers[b], 2 * b + 1, 2 * b + 2);
}
});
g.test('multiple_pairs_of_dispatches_in_one_compute_pass')
.desc(
`
Test write-after-write operations on multiple buffers via the one compute pass. The first write
will write the buffer index * 2 + 1 into all storage buffers. The second write will write the
buffer index * 2 + 2 into the all buffers in the same pass. Expected data in all buffers is the
buffer index * 2 + 2.
`
)
.fn(async t => {
const encoder = t.device.createCommandEncoder();
const pass = encoder.beginComputePass();
const kBufferCount = 4;
const buffers = [];
for (let b = 0; b < kBufferCount; ++b) {
const buffer = await t.createBufferWithValue(0);
buffers.push(buffer);
for (let i = 0; i < 2; ++i) {
const pipeline = t.createStorageWriteComputePipeline(2 * b + i + 1);
const bindGroup = t.createBindGroup(pipeline, buffer);
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.dispatchWorkgroups(1);
}
}
pass.end();
t.device.queue.submit([encoder.finish()]);
for (let b = 0; b < kBufferCount; ++b) {
t.verifyData(buffers[b], 2 * b + 2);
}
});

View file

@ -0,0 +1,257 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Memory Synchronization Tests for Buffer: read before write, read after write, and write after write.
- Create a src buffer and initialize it to 0, wait on the fence to ensure the data is initialized.
Write Op: write a value (say 1) into the src buffer via render pass, compute pass, copy, write buffer, etc.
Read Op: read the value from the src buffer and write it to dst buffer via render pass (vertex, index, indirect input, uniform, storage), compute pass, copy etc.
Wait on another fence, then call expectContents to verify the dst buffer value.
- x= write op: {storage buffer in {compute, render, render-via-bundle}, t2b copy dst, b2b copy dst, writeBuffer}
- x= read op: {index buffer, vertex buffer, indirect buffer (draw, draw indexed, dispatch), uniform buffer, {readonly, readwrite} storage buffer in {compute, render, render-via-bundle}, b2b copy src, b2t copy src}
- x= read-write sequence: {read then write, write then read, write then write}
- x= op context: {queue, command-encoder, compute-pass-encoder, render-pass-encoder, render-bundle-encoder}, x= op boundary: {queue-op, command-buffer, pass, execute-bundles, render-bundle}
- Not every context/boundary combinations are valid. We have the checkOpsValidForContext func to do the filtering.
- If two writes are in the same passes, render result has loose guarantees.
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import {
kOperationBoundaries,
kBoundaryInfo,
OperationContextHelper,
} from '../operation_context_helper.js';
import {
kAllReadOps,
kAllWriteOps,
BufferSyncTest,
checkOpsValidForContext,
} from './buffer_sync_test.js';
// The src value is what stores in the src buffer before any operation.
const kSrcValue = 0;
// The op value is what the read/write operation write into the target buffer.
const kOpValue = 1;
export const g = makeTestGroup(BufferSyncTest);
g.test('rw')
.desc(
`
Perform a 'read' operations on a buffer, followed by a 'write' operation.
Operations are separated by a 'boundary' (pass, encoder, queue-op, etc.).
Test that the results are synchronized.
The read should not see the contents written by the subsequent write.`
)
.params(u =>
u //
.combine('boundary', kOperationBoundaries)
.expand('_context', p => kBoundaryInfo[p.boundary].contexts)
.expandWithParams(function* ({ _context }) {
for (const readOp of kAllReadOps) {
for (const writeOp of kAllWriteOps) {
if (checkOpsValidForContext([readOp, writeOp], _context)) {
yield {
readOp,
readContext: _context[0],
writeOp,
writeContext: _context[1],
};
}
}
}
})
)
.fn(async t => {
const { readContext, readOp, writeContext, writeOp, boundary } = t.params;
const helper = new OperationContextHelper(t);
const { srcBuffer, dstBuffer } = await t.createBuffersForReadOp(readOp, kSrcValue, kOpValue);
await t.createIntermediateBuffersAndTexturesForWriteOp(writeOp, 0, kOpValue);
// The read op will read from src buffer and write to dst buffer based on what it reads.
// The write op will write the given op value into src buffer as well.
// The write op happens after read op. So we are expecting the src value to be in the dst buffer.
t.encodeReadOp(helper, readOp, readContext, srcBuffer, dstBuffer);
helper.ensureBoundary(boundary);
t.encodeWriteOp(helper, writeOp, writeContext, srcBuffer, 0, kOpValue);
helper.ensureSubmit();
// Only verify the value of the first element of the dstBuffer
t.verifyData(dstBuffer, kSrcValue);
});
g.test('wr')
.desc(
`
Perform a 'write' operation on a buffer, followed by a 'read' operation.
Operations are separated by a 'boundary' (pass, encoder, queue-op, etc.).
Test that the results are synchronized.
The read should see exactly the contents written by the previous write.`
)
.params(u =>
u //
.combine('boundary', kOperationBoundaries)
.expand('_context', p => kBoundaryInfo[p.boundary].contexts)
.expandWithParams(function* ({ _context }) {
for (const readOp of kAllReadOps) {
for (const writeOp of kAllWriteOps) {
if (checkOpsValidForContext([readOp, writeOp], _context)) {
yield {
readOp,
readContext: _context[0],
writeOp,
writeContext: _context[1],
};
}
}
}
})
)
.fn(async t => {
const { readContext, readOp, writeContext, writeOp, boundary } = t.params;
const helper = new OperationContextHelper(t);
const { srcBuffer, dstBuffer } = await t.createBuffersForReadOp(readOp, kSrcValue, kOpValue);
await t.createIntermediateBuffersAndTexturesForWriteOp(writeOp, 0, kOpValue);
// The write op will write the given op value into src buffer.
// The read op will read from src buffer and write to dst buffer based on what it reads.
// The write op happens before read op. So we are expecting the op value to be in the dst buffer.
t.encodeWriteOp(helper, writeOp, writeContext, srcBuffer, 0, kOpValue);
helper.ensureBoundary(boundary);
t.encodeReadOp(helper, readOp, readContext, srcBuffer, dstBuffer);
helper.ensureSubmit();
// Only verify the value of the first element of the dstBuffer
t.verifyData(dstBuffer, kOpValue);
});
g.test('ww')
.desc(
`
Perform a 'first' write operation on a buffer, followed by a 'second' write operation.
Operations are separated by a 'boundary' (pass, encoder, queue-op, etc.).
Test that the results are synchronized.
The second write should overwrite the contents of the first.`
)
.params(u =>
u //
.combine('boundary', kOperationBoundaries)
.expand('_context', p => kBoundaryInfo[p.boundary].contexts)
.expandWithParams(function* ({ _context }) {
for (const firstWriteOp of kAllWriteOps) {
for (const secondWriteOp of kAllWriteOps) {
if (checkOpsValidForContext([firstWriteOp, secondWriteOp], _context)) {
yield {
writeOps: [firstWriteOp, secondWriteOp],
contexts: _context,
};
}
}
}
})
)
.fn(async t => {
const { writeOps, contexts, boundary } = t.params;
const helper = new OperationContextHelper(t);
const buffer = await t.createBufferWithValue(0);
await t.createIntermediateBuffersAndTexturesForWriteOp(writeOps[0], 0, 1);
await t.createIntermediateBuffersAndTexturesForWriteOp(writeOps[1], 1, 2);
t.encodeWriteOp(helper, writeOps[0], contexts[0], buffer, 0, 1);
helper.ensureBoundary(boundary);
t.encodeWriteOp(helper, writeOps[1], contexts[1], buffer, 1, 2);
helper.ensureSubmit();
t.verifyData(buffer, 2);
});
// Cases with loose render result guarantees.
g.test('two_draws_in_the_same_render_pass')
.desc(
`Test write-after-write operations in the same render pass. The first write will write 1 into
a storage buffer. The second write will write 2 into the same buffer in the same pass. Expected
data in buffer is either 1 or 2. It may use bundle in each draw.`
)
.paramsSubcasesOnly(u =>
u //
.combine('firstDrawUseBundle', [false, true])
.combine('secondDrawUseBundle', [false, true])
)
.fn(async t => {
const { firstDrawUseBundle, secondDrawUseBundle } = t.params;
const buffer = await t.createBufferWithValue(0);
const encoder = t.device.createCommandEncoder();
const passEncoder = t.beginSimpleRenderPass(encoder);
const useBundle = [firstDrawUseBundle, secondDrawUseBundle];
for (let i = 0; i < 2; ++i) {
const renderEncoder = useBundle[i]
? t.device.createRenderBundleEncoder({
colorFormats: ['rgba8unorm'],
})
: passEncoder;
const pipeline = t.createStorageWriteRenderPipeline(i + 1);
const bindGroup = t.createBindGroup(pipeline, buffer);
renderEncoder.setPipeline(pipeline);
renderEncoder.setBindGroup(0, bindGroup);
renderEncoder.draw(1, 1, 0, 0);
if (useBundle[i]) passEncoder.executeBundles([renderEncoder.finish()]);
}
passEncoder.end();
t.device.queue.submit([encoder.finish()]);
t.verifyDataTwoValidValues(buffer, 1, 2);
});
g.test('two_draws_in_the_same_render_bundle')
.desc(
`Test write-after-write operations in the same render bundle. The first write will write 1 into
a storage buffer. The second write will write 2 into the same buffer in the same pass. Expected
data in buffer is either 1 or 2.`
)
.fn(async t => {
const buffer = await t.createBufferWithValue(0);
const encoder = t.device.createCommandEncoder();
const passEncoder = t.beginSimpleRenderPass(encoder);
const renderEncoder = t.device.createRenderBundleEncoder({
colorFormats: ['rgba8unorm'],
});
for (let i = 0; i < 2; ++i) {
const pipeline = t.createStorageWriteRenderPipeline(i + 1);
const bindGroup = t.createBindGroup(pipeline, buffer);
renderEncoder.setPipeline(pipeline);
renderEncoder.setBindGroup(0, bindGroup);
renderEncoder.draw(1, 1, 0, 0);
}
passEncoder.executeBundles([renderEncoder.finish()]);
passEncoder.end();
t.device.queue.submit([encoder.finish()]);
t.verifyDataTwoValidValues(buffer, 1, 2);
});
g.test('two_dispatches_in_the_same_compute_pass')
.desc(
`Test write-after-write operations in the same compute pass. The first write will write 1 into
a storage buffer. The second write will write 2 into the same buffer in the same pass. Expected
data in buffer is 2.`
)
.fn(async t => {
const buffer = await t.createBufferWithValue(0);
const encoder = t.device.createCommandEncoder();
const pass = encoder.beginComputePass();
for (let i = 0; i < 2; ++i) {
const pipeline = t.createStorageWriteComputePipeline(i + 1);
const bindGroup = t.createBindGroup(pipeline, buffer);
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.dispatchWorkgroups(1);
}
pass.end();
t.device.queue.submit([encoder.finish()]);
t.verifyData(buffer, 2);
});

View file

@ -0,0 +1,317 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { assert, unreachable } from '../../../../common/util/util.js';
/**
* Boundary between the first operation, and the second operation.
*/
export const kOperationBoundaries = [
'queue-op', // Operations are performed in different queue operations (submit, writeTexture).
'command-buffer', // Operations are in different command buffers.
'pass', // Operations are in different passes.
'execute-bundles', // Operations are in different executeBundles(...) calls
'render-bundle', // Operations are in different render bundles.
'dispatch', // Operations are in different dispatches.
'draw', // Operations are in different draws.
];
/**
* Context a particular operation is permitted in.
* These contexts should be sorted such that the first is the most top-level
* context, and the last is most nested (inside a render bundle, in a render pass, ...).
*/
export const kOperationContexts = [
'queue', // Operation occurs on the GPUQueue object
'command-encoder', // Operation may be encoded in a GPUCommandEncoder.
'compute-pass-encoder', // Operation may be encoded in a GPUComputePassEncoder.
'render-pass-encoder', // Operation may be encoded in a GPURenderPassEncoder.
'render-bundle-encoder', // Operation may be encoded in a GPURenderBundleEncoder.
];
function combineContexts(as, bs) {
const result = [];
for (const a of as) {
for (const b of bs) {
result.push([a, b]);
}
}
return result;
}
const queueContexts = combineContexts(kOperationContexts, kOperationContexts);
const commandBufferContexts = combineContexts(
kOperationContexts.filter(c => c !== 'queue'),
kOperationContexts.filter(c => c !== 'queue')
);
/**
* Mapping of OperationBoundary => to a set of OperationContext pairs.
* The boundary is capable of separating operations in those two contexts.
*/
export const kBoundaryInfo = {
'queue-op': {
contexts: queueContexts,
},
'command-buffer': {
contexts: commandBufferContexts,
},
pass: {
contexts: [
['compute-pass-encoder', 'compute-pass-encoder'],
['compute-pass-encoder', 'render-pass-encoder'],
['render-pass-encoder', 'compute-pass-encoder'],
['render-pass-encoder', 'render-pass-encoder'],
['render-bundle-encoder', 'render-pass-encoder'],
['render-pass-encoder', 'render-bundle-encoder'],
['render-bundle-encoder', 'render-bundle-encoder'],
],
},
'execute-bundles': {
contexts: [['render-bundle-encoder', 'render-bundle-encoder']],
},
'render-bundle': {
contexts: [
['render-bundle-encoder', 'render-pass-encoder'],
['render-pass-encoder', 'render-bundle-encoder'],
['render-bundle-encoder', 'render-bundle-encoder'],
],
},
dispatch: {
contexts: [['compute-pass-encoder', 'compute-pass-encoder']],
},
draw: {
contexts: [
['render-pass-encoder', 'render-pass-encoder'],
['render-bundle-encoder', 'render-pass-encoder'],
['render-pass-encoder', 'render-bundle-encoder'],
],
},
};
export class OperationContextHelper {
// We start at the queue context which is top-level.
currentContext = 'queue';
// Set based on the current context.
commandBuffers = [];
renderBundles = [];
kTextureSize = [4, 4];
kTextureFormat = 'rgba8unorm';
constructor(t) {
this.t = t;
this.device = t.device;
this.queue = t.device.queue;
}
// Ensure that all encoded commands are finished and submitted.
ensureSubmit() {
this.ensureContext('queue');
this.flushCommandBuffers();
}
popContext() {
switch (this.currentContext) {
case 'queue':
unreachable();
break;
case 'command-encoder': {
assert(this.commandEncoder !== undefined);
const commandBuffer = this.commandEncoder.finish();
this.commandEncoder = undefined;
this.currentContext = 'queue';
return commandBuffer;
}
case 'compute-pass-encoder':
assert(this.computePassEncoder !== undefined);
this.computePassEncoder.end();
this.computePassEncoder = undefined;
this.currentContext = 'command-encoder';
break;
case 'render-pass-encoder':
assert(this.renderPassEncoder !== undefined);
this.renderPassEncoder.end();
this.renderPassEncoder = undefined;
this.currentContext = 'command-encoder';
break;
case 'render-bundle-encoder': {
assert(this.renderBundleEncoder !== undefined);
const renderBundle = this.renderBundleEncoder.finish();
this.renderBundleEncoder = undefined;
this.currentContext = 'render-pass-encoder';
return renderBundle;
}
}
return null;
}
makeDummyAttachment() {
const texture = this.t.trackForCleanup(
this.device.createTexture({
format: this.kTextureFormat,
size: this.kTextureSize,
usage: GPUTextureUsage.RENDER_ATTACHMENT,
})
);
return {
view: texture.createView(),
loadOp: 'load',
storeOp: 'store',
};
}
ensureContext(context) {
// Find the common ancestor. So we can transition from currentContext -> context.
const ancestorContext =
kOperationContexts[
Math.min(
kOperationContexts.indexOf(context),
kOperationContexts.indexOf(this.currentContext)
)
];
// Pop the context until we're at the common ancestor.
while (this.currentContext !== ancestorContext) {
// About to pop the render pass encoder. Execute any outstanding render bundles.
if (this.currentContext === 'render-pass-encoder') {
this.flushRenderBundles();
}
const result = this.popContext();
if (result) {
if (result instanceof GPURenderBundle) {
this.renderBundles.push(result);
} else {
this.commandBuffers.push(result);
}
}
}
if (this.currentContext === context) {
return;
}
switch (context) {
case 'queue':
unreachable();
break;
case 'command-encoder':
assert(this.currentContext === 'queue');
this.commandEncoder = this.device.createCommandEncoder();
break;
case 'compute-pass-encoder':
switch (this.currentContext) {
case 'queue':
this.commandEncoder = this.device.createCommandEncoder();
// fallthrough
case 'command-encoder':
assert(this.commandEncoder !== undefined);
this.computePassEncoder = this.commandEncoder.beginComputePass();
break;
case 'compute-pass-encoder':
case 'render-bundle-encoder':
case 'render-pass-encoder':
unreachable();
}
break;
case 'render-pass-encoder':
switch (this.currentContext) {
case 'queue':
this.commandEncoder = this.device.createCommandEncoder();
// fallthrough
case 'command-encoder':
assert(this.commandEncoder !== undefined);
this.renderPassEncoder = this.commandEncoder.beginRenderPass({
colorAttachments: [this.makeDummyAttachment()],
});
break;
case 'render-pass-encoder':
case 'render-bundle-encoder':
case 'compute-pass-encoder':
unreachable();
}
break;
case 'render-bundle-encoder':
switch (this.currentContext) {
case 'queue':
this.commandEncoder = this.device.createCommandEncoder();
// fallthrough
case 'command-encoder':
assert(this.commandEncoder !== undefined);
this.renderPassEncoder = this.commandEncoder.beginRenderPass({
colorAttachments: [this.makeDummyAttachment()],
});
// fallthrough
case 'render-pass-encoder':
this.renderBundleEncoder = this.device.createRenderBundleEncoder({
colorFormats: [this.kTextureFormat],
});
break;
case 'render-bundle-encoder':
case 'compute-pass-encoder':
unreachable();
}
break;
}
this.currentContext = context;
}
flushRenderBundles() {
assert(this.renderPassEncoder !== undefined);
if (this.renderBundles.length) {
this.renderPassEncoder.executeBundles(this.renderBundles);
this.renderBundles = [];
}
}
flushCommandBuffers() {
if (this.commandBuffers.length) {
this.queue.submit(this.commandBuffers);
this.commandBuffers = [];
}
}
ensureBoundary(boundary) {
switch (boundary) {
case 'command-buffer':
this.ensureContext('queue');
break;
case 'queue-op':
this.ensureContext('queue');
// Submit any GPUCommandBuffers so the next one is in a separate submit.
this.flushCommandBuffers();
break;
case 'dispatch':
// Nothing to do to separate dispatches.
assert(this.currentContext === 'compute-pass-encoder');
break;
case 'draw':
// Nothing to do to separate draws.
assert(
this.currentContext === 'render-pass-encoder' ||
this.currentContext === 'render-bundle-encoder'
);
break;
case 'pass':
this.ensureContext('command-encoder');
break;
case 'render-bundle':
this.ensureContext('render-pass-encoder');
break;
case 'execute-bundles':
this.ensureContext('render-pass-encoder');
// Execute any GPURenderBundles so the next one is in a separate executeBundles.
this.flushRenderBundles();
break;
}
}
}

Some files were not shown because too many files have changed in this diff Show more