Update WebGPU CTS (#30454)

* Fix webgpu-cts import script

* Update webgpu-cts to f2b59e0362

* Updated expectations
This commit is contained in:
Samson 2023-09-30 18:28:46 +02:00 committed by GitHub
parent ddc47aa56b
commit d74d51190a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
271 changed files with 147919 additions and 8349 deletions

View file

@ -782,6 +782,8 @@ tests/wpt/mozilla/tests for Servo-only tests""" % reference_path)
res = call(["npm", "run", "wpt"], cwd=clone_dir)
if res != 0:
return res
# https://github.com/gpuweb/cts/pull/2770
delete(path.join(clone_dir, "out-wpt", "cts-chunked2sec.https.html"))
cts_html = path.join(clone_dir, "out-wpt", "cts.https.html")
# patch
with open(cts_html, 'r') as file:

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1 +1 @@
480edec387e8cd5bf5934680050c59a3f7a01438
f2b59e03621238d0d0fd6305be2c406ce3e45ac2

View file

@ -159,6 +159,13 @@ export class Fixture {
throw new SkipTestCase(msg);
}
/** Throws an exception marking the subcase as skipped if condition is true */
skipIf(cond, msg = '') {
if (cond) {
this.skip(typeof msg === 'function' ? msg() : msg);
}
}
/** Log a warning and increase the result status to "Warn". */
warn(msg) {
this.rec.warn(new Error(msg));

View file

@ -0,0 +1,16 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { assert } from '../util/util.js'; /** Metadata about tests (that can't be derived at runtime). */
export function loadMetadataForSuite(suiteDir) {
assert(typeof require !== 'undefined', 'loadMetadataForSuite is only implemented on Node');
const fs = require('fs');
const metadataFile = `${suiteDir}/listing_meta.json`;
if (!fs.existsSync(metadataFile)) {
return null;
}
const metadata = JSON.parse(fs.readFileSync(metadataFile, 'utf8'));
return metadata;
}

View file

@ -1,17 +1,9 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { mergeParams } from '../internal/params_utils.js';
**/ import { mergeParams, mergeParamsChecked } from '../internal/params_utils.js';
import { comparePublicParamsPaths, Ordering } from '../internal/query/compare.js';
import { stringifyPublicParams } from '../internal/query/stringify_params.js';
import { assert, mapLazy } from '../util/util.js';
// ================================================================
// "Public" ParamsBuilder API / Documentation
// ================================================================
/**
* Provides doc comments for the methods of CaseParamsBuilder and SubcaseParamsBuilder.
* (Also enforces rough interface match between them.)
*/
import { assert, mapLazy, objectEquals } from '../util/util.js';
/**
* Base class for `CaseParamsBuilder` and `SubcaseParamsBuilder`.
@ -29,8 +21,8 @@ export class ParamsBuilderBase {
/**
* Calls the (normally hidden) `iterateCasesWithSubcases()` method.
*/
export function builderIterateCasesWithSubcases(builder) {
return builder.iterateCasesWithSubcases();
export function builderIterateCasesWithSubcases(builder, caseFilter) {
return builder.iterateCasesWithSubcases(caseFilter);
}
/**
@ -42,27 +34,61 @@ export function builderIterateCasesWithSubcases(builder) {
* This means, for example, that the `unit` passed into `TestBuilder.params()` can be reused.
*/
export class CaseParamsBuilder extends ParamsBuilderBase {
*iterateCasesWithSubcases() {
for (const a of this.cases()) {
yield [a, undefined];
*iterateCasesWithSubcases(caseFilter) {
for (const caseP of this.cases(caseFilter)) {
if (caseFilter) {
// this.cases() only filters out cases which conflict with caseFilter. Now that we have
// the final caseP, filter out cases which are missing keys that caseFilter requires.
const ordering = comparePublicParamsPaths(caseP, caseFilter);
if (ordering === Ordering.StrictSuperset || ordering === Ordering.Unordered) {
continue;
}
}
yield [caseP, undefined];
}
}
[Symbol.iterator]() {
return this.cases();
return this.cases(null);
}
/** @inheritDoc */
expandWithParams(expander) {
const newGenerator = expanderGenerator(this.cases, expander);
return new CaseParamsBuilder(() => newGenerator({}));
const baseGenerator = this.cases;
return new CaseParamsBuilder(function* (caseFilter) {
for (const a of baseGenerator(caseFilter)) {
for (const b of expander(a)) {
if (caseFilter) {
// If the expander generated any key-value pair that conflicts with caseFilter, skip.
const kvPairs = Object.entries(b);
if (kvPairs.some(([k, v]) => k in caseFilter && !objectEquals(caseFilter[k], v))) {
continue;
}
}
yield mergeParamsChecked(a, b);
}
}
});
}
/** @inheritDoc */
expand(key, expander) {
return this.expandWithParams(function* (p) {
for (const value of expander(p)) {
yield { [key]: value };
const baseGenerator = this.cases;
return new CaseParamsBuilder(function* (caseFilter) {
for (const a of baseGenerator(caseFilter)) {
assert(!(key in a), `New key '${key}' already exists in ${JSON.stringify(a)}`);
for (const v of expander(a)) {
// If the expander generated a value for this key that conflicts with caseFilter, skip.
if (caseFilter && key in caseFilter) {
if (!objectEquals(caseFilter[key], v)) {
continue;
}
}
yield { ...a, [key]: v };
}
}
});
}
@ -89,8 +115,12 @@ export class CaseParamsBuilder extends ParamsBuilderBase {
/** @inheritDoc */
filter(pred) {
const newGenerator = filterGenerator(this.cases, pred);
return new CaseParamsBuilder(() => newGenerator({}));
const baseGenerator = this.cases;
return new CaseParamsBuilder(function* (caseFilter) {
for (const a of baseGenerator(caseFilter)) {
if (pred(a)) yield a;
}
});
}
/** @inheritDoc */
@ -104,12 +134,9 @@ export class CaseParamsBuilder extends ParamsBuilderBase {
* generate new subcases instead of new cases.
*/
beginSubcases() {
return new SubcaseParamsBuilder(
() => this.cases(),
function* () {
yield {};
}
);
return new SubcaseParamsBuilder(this.cases, function* () {
yield {};
});
}
}
@ -135,8 +162,17 @@ export class SubcaseParamsBuilder extends ParamsBuilderBase {
this.subcases = generator;
}
*iterateCasesWithSubcases() {
for (const caseP of this.cases()) {
*iterateCasesWithSubcases(caseFilter) {
for (const caseP of this.cases(caseFilter)) {
if (caseFilter) {
// this.cases() only filters out cases which conflict with caseFilter. Now that we have
// the final caseP, filter out cases which are missing keys that caseFilter requires.
const ordering = comparePublicParamsPaths(caseP, caseFilter);
if (ordering === Ordering.StrictSuperset || ordering === Ordering.Unordered) {
continue;
}
}
const subcases = Array.from(this.subcases(caseP));
if (subcases.length) {
yield [caseP, subcases];
@ -146,15 +182,27 @@ export class SubcaseParamsBuilder extends ParamsBuilderBase {
/** @inheritDoc */
expandWithParams(expander) {
return new SubcaseParamsBuilder(this.cases, expanderGenerator(this.subcases, expander));
const baseGenerator = this.subcases;
return new SubcaseParamsBuilder(this.cases, function* (base) {
for (const a of baseGenerator(base)) {
for (const b of expander(mergeParams(base, a))) {
yield mergeParamsChecked(a, b);
}
}
});
}
/** @inheritDoc */
expand(key, expander) {
return this.expandWithParams(function* (p) {
for (const value of expander(p)) {
// TypeScript doesn't know here that NewPKey is always a single literal string type.
yield { [key]: value };
const baseGenerator = this.subcases;
return new SubcaseParamsBuilder(this.cases, function* (base) {
for (const a of baseGenerator(base)) {
const before = mergeParams(base, a);
assert(!(key in before), () => `Key '${key}' already exists in ${JSON.stringify(before)}`);
for (const v of expander(before)) {
yield { ...a, [key]: v };
}
}
});
}
@ -173,7 +221,12 @@ export class SubcaseParamsBuilder extends ParamsBuilderBase {
/** @inheritDoc */
filter(pred) {
return new SubcaseParamsBuilder(this.cases, filterGenerator(this.subcases, pred));
const baseGenerator = this.subcases;
return new SubcaseParamsBuilder(this.cases, function* (base) {
for (const a of baseGenerator(base)) {
if (pred(mergeParams(base, a))) yield a;
}
});
}
/** @inheritDoc */
@ -182,26 +235,6 @@ export class SubcaseParamsBuilder extends ParamsBuilderBase {
}
}
function expanderGenerator(baseGenerator, expander) {
return function* (base) {
for (const a of baseGenerator(base)) {
for (const b of expander(mergeParams(base, a))) {
yield mergeParams(a, b);
}
}
};
}
function filterGenerator(baseGenerator, pred) {
return function* (base) {
for (const a of baseGenerator(base)) {
if (pred(mergeParams(base, a))) {
yield a;
}
}
};
}
/** Assert an object is not a Generator (a thing returned from a generator function). */
function assertNotGenerator(x) {
if ('constructor' in x) {

View file

@ -7,4 +7,5 @@ export const globalTestConfig = {
testHeartbeatCallback: () => {},
noRaceWithRejectOnTimeout: false,
unrollConstEvalLoops: false,
compatibility: false,
};

View file

@ -21,17 +21,15 @@ export class TestFileLoader extends EventTarget {
return ret;
}
async loadTree(query, subqueriesToExpand = []) {
const tree = await loadTreeForQuery(
this,
query,
subqueriesToExpand.map(s => {
async loadTree(query, { subqueriesToExpand = [], maxChunkTime = Infinity } = {}) {
const tree = await loadTreeForQuery(this, query, {
subqueriesToExpand: subqueriesToExpand.map(s => {
const q = parseQuery(s);
assert(q.level >= 2, () => `subqueriesToExpand entries should not be multi-file:\n ${q}`);
return q;
})
);
}),
maxChunkTime,
});
this.dispatchEvent(new MessageEvent('finish'));
return tree;
}

View file

@ -20,6 +20,7 @@ const kMinSeverityForStack = LogSeverity.Warn;
/** Holds onto a LiveTestCaseResult owned by the Logger, and writes the results into it. */
export class TestCaseRecorder {
nonskippedSubcaseCount = 0;
inSubCase = false;
subCaseStatus = LogSeverity.Pass;
finalCaseStatus = LogSeverity.Pass;
@ -42,12 +43,18 @@ export class TestCaseRecorder {
}
finish() {
assert(this.startTime >= 0, 'finish() before start()');
// This is a framework error. If this assert is hit, it won't be localized
// to a test. The whole test run will fail out.
assert(this.startTime >= 0, 'internal error: finish() before start()');
const timeMilliseconds = now() - this.startTime;
// Round to next microsecond to avoid storing useless .xxxx00000000000002 in results.
this.result.timems = Math.ceil(timeMilliseconds * 1000) / 1000;
if (this.finalCaseStatus === LogSeverity.Skip && this.nonskippedSubcaseCount !== 0) {
this.threw(new Error('internal error: case is "skip" but has nonskipped subcases'));
}
// Convert numeric enum back to string (but expose 'exception' as 'fail')
this.result.status =
this.finalCaseStatus === LogSeverity.Pass
@ -67,6 +74,9 @@ export class TestCaseRecorder {
}
endSubCase(expectedStatus) {
if (this.subCaseStatus !== LogSeverity.Skip) {
this.nonskippedSubcaseCount++;
}
try {
if (expectedStatus === 'fail') {
if (this.subCaseStatus <= LogSeverity.Warn) {

View file

@ -19,8 +19,11 @@ export function extractPublicParams(params) {
return publicParams;
}
/** Used to escape reserved characters in URIs */
const kPercent = '%';
export const badParamValueChars = new RegExp(
'[' + kParamKVSeparator + kParamSeparator + kWildcard + ']'
'[' + kParamKVSeparator + kParamSeparator + kWildcard + kPercent + ']'
);
export function publicParamsEquals(x, y) {
@ -50,9 +53,21 @@ function typeAssert() {}
}
}
/** Merges two objects into one `{ ...a, ...b }` and return it with a flattened type. */
export function mergeParams(a, b) {
for (const key of Object.keys(a)) {
assert(!(key in b), 'Duplicate key: ' + key);
}
return { ...a, ...b };
}
/**
* Merges two objects into one `{ ...a, ...b }` and asserts they had no overlapping keys.
* This is slower than {@link mergeParams}.
*/
export function mergeParamsChecked(a, b) {
const merged = mergeParams(a, b);
assert(
Object.keys(merged).length === Object.keys(a).length + Object.keys(b).length,
() => `Duplicate key between ${JSON.stringify(a)} and ${JSON.stringify(b)}`
);
return merged;
}

View file

@ -1,6 +1,6 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { assert, sortObjectByKey } from '../../util/util.js';
**/ import { assert, sortObjectByKey, isPlainObject } from '../../util/util.js';
// JSON can't represent various values and by default stores them as `null`.
// Instead, storing them as a magic string values in JSON.
const jsUndefinedMagicValue = '_undef_';
@ -52,6 +52,17 @@ function stringifyFilter(k, v) {
);
}
const isObject = v !== null && typeof v === 'object' && !Array.isArray(v);
if (isObject) {
assert(
isPlainObject(v),
`value must be a plain object but it appears to be a '${
Object.getPrototypeOf(v).constructor.name
}`
);
}
assert(typeof v !== 'function', `${v} can not be a function`);
if (Object.is(v, -0)) {
return jsNegativeZeroMagicValue;
}

View file

@ -19,6 +19,8 @@ import {
import { validQueryPart } from '../internal/query/validQueryPart.js';
import { assert, unreachable } from '../util/util.js';
import { logToWebsocket } from './websocket_logger.js';
export function makeTestGroup(fixture) {
return new TestGroup(fixture);
}
@ -29,6 +31,9 @@ export function makeTestGroupForUnitTesting(fixture) {
return new TestGroup(fixture);
}
/** Parameter name for batch number (see also TestBuilder.batch). */
const kBatchParamName = 'batch__';
export class TestGroup {
seen = new Set();
tests = [];
@ -74,6 +79,16 @@ export class TestGroup {
test.validate();
}
}
collectNonEmptyTests() {
const testPaths = [];
for (const test of this.tests) {
if (test.computeCaseCount() > 0) {
testPaths.push({ testPath: test.testPath });
}
}
return testPaths;
}
}
class TestBuilder {
@ -127,6 +142,7 @@ class TestBuilder {
};
}
/** Perform various validation/"lint" chenks. */
validate() {
const testPathString = this.testPath.join(kPathSeparator);
assert(this.testFn !== undefined, () => {
@ -142,13 +158,18 @@ class TestBuilder {
}
const seen = new Set();
for (const [caseParams, subcases] of builderIterateCasesWithSubcases(this.testCases)) {
for (const [caseParams, subcases] of builderIterateCasesWithSubcases(this.testCases, null)) {
for (const subcaseParams of subcases ?? [{}]) {
const params = mergeParams(caseParams, subcaseParams);
assert(this.batchSize === 0 || !('batch__' in params));
assert(this.batchSize === 0 || !(kBatchParamName in params));
// stringifyPublicParams also checks for invalid params values
const testcaseString = stringifyPublicParams(params);
let testcaseString;
try {
testcaseString = stringifyPublicParams(params);
} catch (e) {
throw new Error(`${e}: ${testPathString}`);
}
// A (hopefully) unique representation of a params value.
const testcaseStringUnique = stringifyPublicParamsUniquely(params);
@ -162,6 +183,18 @@ class TestBuilder {
}
}
computeCaseCount() {
if (this.testCases === undefined) {
return 1;
}
let caseCount = 0;
for (const [_caseParams, _subcases] of builderIterateCasesWithSubcases(this.testCases, null)) {
caseCount++;
}
return caseCount;
}
params(cases) {
assert(this.testCases === undefined, 'test case is already parameterized');
if (cases instanceof Function) {
@ -186,48 +219,69 @@ class TestBuilder {
}
}
*iterate() {
makeCaseSpecific(params, subcases) {
assert(this.testFn !== undefined, 'No test function (.fn()) for test');
return new RunCaseSpecific(
this.testPath,
params,
this.isUnimplemented,
subcases,
this.fixture,
this.testFn,
this.beforeFn,
this.testCreationStack
);
}
*iterate(caseFilter) {
this.testCases ??= kUnitCaseParamsBuilder;
for (const [caseParams, subcases] of builderIterateCasesWithSubcases(this.testCases)) {
// Remove the batch__ from the caseFilter because the params builder doesn't
// know about it (we don't add it until later in this function).
let filterToBatch;
const caseFilterWithoutBatch = caseFilter ? { ...caseFilter } : null;
if (caseFilterWithoutBatch && kBatchParamName in caseFilterWithoutBatch) {
const batchParam = caseFilterWithoutBatch[kBatchParamName];
assert(typeof batchParam === 'number');
filterToBatch = batchParam;
delete caseFilterWithoutBatch[kBatchParamName];
}
for (const [caseParams, subcases] of builderIterateCasesWithSubcases(
this.testCases,
caseFilterWithoutBatch
)) {
// If batches are not used, yield just one case.
if (this.batchSize === 0 || subcases === undefined) {
yield new RunCaseSpecific(
this.testPath,
caseParams,
this.isUnimplemented,
subcases,
this.fixture,
this.testFn,
this.beforeFn,
this.testCreationStack
yield this.makeCaseSpecific(caseParams, subcases);
continue;
}
// Same if there ends up being only one batch.
const subcaseArray = Array.from(subcases);
if (subcaseArray.length <= this.batchSize) {
yield this.makeCaseSpecific(caseParams, subcaseArray);
continue;
}
// There are multiple batches. Helper function for this case:
const makeCaseForBatch = batch => {
const sliceStart = batch * this.batchSize;
return this.makeCaseSpecific(
{ ...caseParams, [kBatchParamName]: batch },
subcaseArray.slice(sliceStart, Math.min(subcaseArray.length, sliceStart + this.batchSize))
);
} else {
const subcaseArray = Array.from(subcases);
if (subcaseArray.length <= this.batchSize) {
yield new RunCaseSpecific(
this.testPath,
caseParams,
this.isUnimplemented,
subcaseArray,
this.fixture,
this.testFn,
this.beforeFn,
this.testCreationStack
);
} else {
for (let i = 0; i < subcaseArray.length; i = i + this.batchSize) {
yield new RunCaseSpecific(
this.testPath,
{ ...caseParams, batch__: i / this.batchSize },
this.isUnimplemented,
subcaseArray.slice(i, Math.min(subcaseArray.length, i + this.batchSize)),
this.fixture,
this.testFn,
this.beforeFn,
this.testCreationStack
);
}
}
};
// If we filter to just one batch, yield it.
if (filterToBatch !== undefined) {
yield makeCaseForBatch(filterToBatch);
continue;
}
// Finally, if not, yield all of the batches.
for (let batch = 0; batch * this.batchSize < subcaseArray.length; ++batch) {
yield makeCaseForBatch(batch);
}
}
}
@ -254,6 +308,18 @@ class RunCaseSpecific {
this.testCreationStack = testCreationStack;
}
computeSubcaseCount() {
if (this.subcases) {
let count = 0;
for (const _subcase of this.subcases) {
count++;
}
return count;
} else {
return 1;
}
}
async runTest(rec, sharedState, params, throwSkip, expectedStatus) {
try {
rec.beginSubCase();
@ -276,8 +342,9 @@ class RunCaseSpecific {
// or unexpected validation/OOM error from the GPUDevice.
if (throwSkip && ex instanceof SkipTestCase) {
throw ex;
} else {
rec.threw(ex);
}
rec.threw(ex);
} finally {
try {
rec.endSubCase(expectedStatus);
@ -466,6 +533,13 @@ class RunCaseSpecific {
rec.threw(ex);
} finally {
rec.finish();
const msg = {
q: selfQuery.toString(),
timems: rec.result.timems,
nonskippedSubcaseCount: rec.nonskippedSubcaseCount,
};
logToWebsocket(JSON.stringify(msg));
}
}
}

View file

@ -1,9 +1,10 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { globalTestConfig } from '../framework/test_config.js';
**/ import { loadMetadataForSuite } from '../framework/metadata.js';
import { globalTestConfig } from '../framework/test_config.js';
import { assert, now } from '../util/util.js';
import { compareQueries, Ordering } from './query/compare.js';
import { comparePublicParamsPaths, compareQueries, Ordering } from './query/compare.js';
import {
TestQueryMultiCase,
TestQuerySingleCase,
@ -50,7 +51,6 @@ export class TestTree {
constructor(forQuery, root) {
this.forQuery = forQuery;
TestTree.propagateCounts(root);
this.root = root;
assert(
root.query.level === 1 && root.query.depthInLevel === 0,
@ -58,6 +58,20 @@ export class TestTree {
);
}
static async create(forQuery, root, maxChunkTime) {
const suite = forQuery.suite;
let chunking = undefined;
if (Number.isFinite(maxChunkTime)) {
const metadata = loadMetadataForSuite(`./src/${suite}`);
assert(metadata !== null, `metadata for ${suite} is missing, but maxChunkTime was requested`);
chunking = { metadata, maxChunkTime };
}
await TestTree.propagateCounts(root, chunking);
return new TestTree(forQuery, root);
}
/**
* Iterate through the leaves of a version of the tree which has been pruned to exclude
* subtrees which:
@ -130,16 +144,48 @@ export class TestTree {
}
/** Propagate the subtreeTODOs/subtreeTests state upward from leaves to parent nodes. */
static propagateCounts(subtree) {
subtree.subtreeCounts ??= { tests: 0, nodesWithTODO: 0 };
static async propagateCounts(subtree, chunking) {
subtree.subtreeCounts ??= { tests: 0, nodesWithTODO: 0, totalTimeMS: 0 };
subtree.subcaseCount = 0;
for (const [, child] of subtree.children) {
if ('children' in child) {
const counts = TestTree.propagateCounts(child);
const counts = await TestTree.propagateCounts(child, chunking);
subtree.subtreeCounts.tests += counts.tests;
subtree.subtreeCounts.nodesWithTODO += counts.nodesWithTODO;
subtree.subtreeCounts.totalTimeMS += counts.totalTimeMS;
subtree.subcaseCount += counts.subcaseCount;
} else {
subtree.subcaseCount = child.subcaseCount;
}
}
return subtree.subtreeCounts;
// If we're chunking based on a maxChunkTime, then at each
// TestQueryMultiCase node of the tree we look at its total time. If the
// total time is larger than the maxChunkTime, we set collapsible=false to
// make sure it gets split up in the output. Note:
// - TestQueryMultiTest and higher nodes are never set to collapsible anyway, so we ignore them.
// - TestQuerySingleCase nodes can't be collapsed, so we ignore them.
if (chunking && subtree.query instanceof TestQueryMultiCase) {
const testLevelQuery = new TestQueryMultiCase(
subtree.query.suite,
subtree.query.filePathParts,
subtree.query.testPathParts,
{}
).toString();
const metadata = chunking.metadata;
const subcaseTiming = metadata[testLevelQuery]?.subcaseMS;
if (subcaseTiming !== undefined) {
const totalTiming = subcaseTiming * subtree.subcaseCount;
subtree.subtreeCounts.totalTimeMS = totalTiming;
if (totalTiming > chunking.maxChunkTime) {
subtree.collapsible = false;
}
}
}
return { ...subtree.subtreeCounts, subcaseCount: subtree.subcaseCount ?? 0 };
}
/** Displays counts in the format `(Nodes with TODOs) / (Total test count)`. */
@ -171,7 +217,11 @@ export class TestTree {
// MAINTENANCE_TODO: Consider having subqueriesToExpand actually impact the depth-order of params
// in the tree.
export async function loadTreeForQuery(loader, queryToLoad, subqueriesToExpand) {
export async function loadTreeForQuery(
loader,
queryToLoad,
{ subqueriesToExpand, maxChunkTime = Infinity }
) {
const suite = queryToLoad.suite;
const specs = await loader.listing(suite);
@ -283,24 +333,29 @@ export async function loadTreeForQuery(loader, queryToLoad, subqueriesToExpand)
);
// This is 1 test. Set tests=1 then count TODOs.
subtreeL2.subtreeCounts ??= { tests: 1, nodesWithTODO: 0 };
subtreeL2.subtreeCounts ??= { tests: 1, nodesWithTODO: 0, totalTimeMS: 0 };
if (t.description) setSubtreeDescriptionAndCountTODOs(subtreeL2, t.description);
let caseFilter = null;
if ('params' in queryToLoad) {
caseFilter = queryToLoad.params;
}
// MAINTENANCE_TODO: If tree generation gets too slow, avoid actually iterating the cases in a
// file if there's no need to (based on the subqueriesToExpand).
for (const c of t.iterate()) {
{
const queryL3 = new TestQuerySingleCase(suite, entry.file, c.id.test, c.id.params);
const orderingL3 = compareQueries(queryL3, queryToLoad);
if (orderingL3 === Ordering.Unordered || orderingL3 === Ordering.StrictSuperset) {
// Case is not matched by this query.
for (const c of t.iterate(caseFilter)) {
// iterate() guarantees c's query is equal to or a subset of queryToLoad.
if (queryToLoad instanceof TestQuerySingleCase) {
// A subset is OK if it's TestQueryMultiCase, but for SingleCase it must match exactly.
const ordering = comparePublicParamsPaths(c.id.params, queryToLoad.params);
if (ordering !== Ordering.Equal) {
continue;
}
}
// Leaf for case is suite:a,b:c,d:x=1;y=2
addLeafForCase(subtreeL2, c, isCollapsible);
foundCase = true;
}
}
@ -322,13 +377,13 @@ export async function loadTreeForQuery(loader, queryToLoad, subqueriesToExpand)
}
assert(foundCase, `Query \`${queryToLoad.toString()}\` does not match any cases`);
return new TestTree(queryToLoad, subtreeL0);
return TestTree.create(queryToLoad, subtreeL0, maxChunkTime);
}
function setSubtreeDescriptionAndCountTODOs(subtree, description) {
assert(subtree.description === undefined);
subtree.description = description.trim();
subtree.subtreeCounts ??= { tests: 0, nodesWithTODO: 0 };
subtree.subtreeCounts ??= { tests: 0, nodesWithTODO: 0, totalTimeMS: 0 };
if (subtree.description.indexOf('TODO') !== -1) {
subtree.subtreeCounts.nodesWithTODO++;
}
@ -477,6 +532,7 @@ function insertLeaf(parent, query, t) {
query,
run: (rec, expectations) => t.run(rec, query, expectations || []),
isUnimplemented: t.isUnimplemented,
subcaseCount: t.computeSubcaseCount(),
};
// This is a leaf (e.g. s:f:t:x=1;* -> s:f:t:x=1). The key is always ''.

View file

@ -1,3 +1,3 @@
// AUTO-GENERATED - DO NOT EDIT. See tools/gen_version.
export const version = '480edec387e8cd5bf5934680050c59a3f7a01438';
export const version = 'f2b59e03621238d0d0fd6305be2c406ce3e45ac2';

View file

@ -0,0 +1,52 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ /**
* - 'uninitialized' means we haven't tried to connect yet
* - Promise means it's pending
* - 'failed' means it failed (this is the most common case, where the logger isn't running)
* - WebSocket means it succeeded
*/ let connection = 'uninitialized';
/**
* Log a string to a websocket at `localhost:59497`. See `tools/websocket-logger`.
*
* This does nothing if a connection couldn't be established on the first call.
*/
export function logToWebsocket(msg) {
if (connection === 'failed') {
return;
}
if (connection === 'uninitialized') {
connection = new Promise(resolve => {
if (typeof WebSocket === 'undefined') {
resolve('failed');
return;
}
const ws = new WebSocket('ws://localhost:59497/optional_cts_websocket_logger');
ws.onopen = () => {
resolve(ws);
};
ws.onerror = () => {
connection = 'failed';
resolve('failed');
};
ws.onclose = () => {
connection = 'failed';
resolve('failed');
};
});
void connection.then(resolved => {
connection = resolved;
});
}
void (async () => {
// connection may be a promise or a value here. Either is OK to await.
const ws = await connection;
if (ws !== 'failed') {
ws.send(msg);
}
})();
}

View file

@ -16,3 +16,83 @@ export function optionEnabled(opt, searchParams = getWindowURL().searchParams) {
export function optionString(opt, searchParams = getWindowURL().searchParams) {
return searchParams.get(opt) || '';
}
/**
* The possible options for the tests.
*/
export const kDefaultCTSOptions = {
worker: false,
debug: true,
compatibility: false,
unrollConstEvalLoops: false,
powerPreference: '',
};
/**
* Extra per option info.
*/
/**
* Options to the CTS.
*/
export const kCTSOptionsInfo = {
worker: { description: 'run in a worker' },
debug: { description: 'show more info' },
compatibility: { description: 'run in compatibility mode' },
unrollConstEvalLoops: { description: 'unroll const eval loops in WGSL' },
powerPreference: {
description: 'set default powerPreference for some tests',
parser: optionString,
selectValueDescriptions: [
{ value: '', description: 'default' },
{ value: 'low-power', description: 'low-power' },
{ value: 'high-performance', description: 'high-performance' },
],
},
};
/**
* Converts camel case to snake case.
* Examples:
* fooBar -> foo_bar
* parseHTMLFile -> parse_html_file
*/
export function camelCaseToSnakeCase(id) {
return id
.replace(/(.)([A-Z][a-z]+)/g, '$1_$2')
.replace(/([a-z0-9])([A-Z])/g, '$1_$2')
.toLowerCase();
}
/**
* Creates a Options from search parameters.
*/
function getOptionsInfoFromSearchString(optionsInfos, searchString) {
const searchParams = new URLSearchParams(searchString);
const optionValues = {};
for (const [optionName, info] of Object.entries(optionsInfos)) {
const parser = info.parser || optionEnabled;
optionValues[optionName] = parser(camelCaseToSnakeCase(optionName), searchParams);
}
return optionValues;
}
/**
* Given a test query string in the form of `suite:foo,bar,moo&opt1=val1&opt2=val2
* returns the query and the options.
*/
export function parseSearchParamLikeWithOptions(optionsInfos, query) {
const searchString = query.includes('q=') || query.startsWith('?') ? query : `q=${query}`;
const queries = new URLSearchParams(searchString).getAll('q');
const options = getOptionsInfoFromSearchString(optionsInfos, searchString);
return { queries, options };
}
/**
* Given a test query string in the form of `suite:foo,bar,moo&opt1=val1&opt2=val2
* returns the query and the common options.
*/
export function parseSearchParamLikeWithCTSOptions(query) {
return parseSearchParamLikeWithOptions(kCTSOptionsInfo, query);
}

View file

@ -1,6 +1,7 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { setBaseResourcePath } from '../../framework/resources.js';
import { globalTestConfig } from '../../framework/test_config.js';
import { DefaultTestFileLoader } from '../../internal/file_loader.js';
import { Logger } from '../../internal/logging/logger.js';
import { parseQuery } from '../../internal/query/parseQuery.js';
@ -8,8 +9,6 @@ import { parseQuery } from '../../internal/query/parseQuery.js';
import { setDefaultRequestAdapterOptions } from '../../util/navigator_gpu.js';
import { assert } from '../../util/util.js';
// Should be DedicatedWorkerGlobalScope, but importing lib "webworker" conflicts with lib "dom".
const loader = new DefaultTestFileLoader();
setBaseResourcePath('../../../resources');
@ -17,14 +16,23 @@ setBaseResourcePath('../../../resources');
self.onmessage = async ev => {
const query = ev.data.query;
const expectations = ev.data.expectations;
const defaultRequestAdapterOptions = ev.data.defaultRequestAdapterOptions;
const debug = ev.data.debug;
const ctsOptions = ev.data.ctsOptions;
setDefaultRequestAdapterOptions(defaultRequestAdapterOptions);
const { debug, unrollConstEvalLoops, powerPreference, compatibility } = ctsOptions;
globalTestConfig.unrollConstEvalLoops = unrollConstEvalLoops;
globalTestConfig.compatibility = compatibility;
Logger.globalDebugMode = debug;
const log = new Logger();
if (powerPreference || compatibility) {
setDefaultRequestAdapterOptions({
...(powerPreference && { powerPreference }),
// MAINTENANCE_TODO: Change this to whatever the option ends up being
...(compatibility && { compatibilityMode: true }),
});
}
const testcases = Array.from(await loader.loadCases(parseQuery(query)));
assert(testcases.length === 1, 'worker query resulted in != 1 cases');

View file

@ -2,14 +2,13 @@
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { LogMessageWithStack } from '../../internal/logging/log_message.js';
import { getDefaultRequestAdapterOptions } from '../../util/navigator_gpu.js';
import { kDefaultCTSOptions } from './options.js';
export class TestWorker {
resolvers = new Map();
constructor(debug) {
this.debug = debug;
constructor(ctsOptions) {
this.ctsOptions = { ...(ctsOptions || kDefaultCTSOptions), ...{ worker: true } };
const selfPath = import.meta.url;
const selfPathDir = selfPath.substring(0, selfPath.lastIndexOf('/'));
const workerPath = selfPathDir + '/test_worker-worker.js';
@ -33,8 +32,7 @@ export class TestWorker {
this.worker.postMessage({
query,
expectations,
debug: this.debug,
defaultRequestAdapterOptions: getDefaultRequestAdapterOptions(),
ctsOptions: this.ctsOptions,
});
const workerResult = await new Promise(resolve => {
this.resolvers.set(query, resolve);

View file

@ -22,7 +22,7 @@ setup({
void (async () => {
const workerEnabled = optionEnabled('worker');
const worker = workerEnabled ? new TestWorker(false) : undefined;
const worker = workerEnabled ? new TestWorker() : undefined;
globalTestConfig.unrollConstEvalLoops = optionEnabled('unroll_const_eval_loops');

View file

@ -10,6 +10,20 @@ export function numericKeysOf(obj) {
return Object.keys(obj).map(n => Number(n));
}
/**
* @returns a new Record from @p objects, using the string returned by Object.toString() as the keys
* and the objects as the values.
*/
export function objectsToRecord(objects) {
const record = {};
return objects.reduce((obj, type) => {
return {
...obj,
[type.toString()]: type,
};
}, record);
}
/**
* Creates an info lookup object from a more nicely-formatted table. See below for examples.
*
@ -27,3 +41,67 @@ export function makeTable(members, defaults, table) {
return result;
}
/**
* Creates an info lookup object from a more nicely-formatted table.
*
* Note: Using `as const` on the arguments to this function is necessary to infer the correct type.
*
* Example:
*
* ```
* const t = makeTableWithDefaults(
* { c: 'default' }, // columnRenames
* ['a', 'default', 'd'], // columnsKept
* ['a', 'b', 'c', 'd'], // columns
* [123, 456, 789, 1011], // defaults
* { // table
* foo: [1, 2, 3, 4],
* bar: [5, , , 8],
* moo: [ , 9,10, ],
* }
* );
*
* // t = {
* // foo: { a: 1, default: 3, d: 4 },
* // bar: { a: 5, default: 789, d: 8 },
* // moo: { a: 123, default: 10, d: 1011 },
* // };
* ```
*
* MAINTENANCE_TODO: `ZipKeysWithValues<Members, Table[k], Defaults>` is incorrect
* because Members no longer maps to Table[k]. It's not clear if this is even possible to fix
* because it requires mapping, not zipping. Maybe passing in a index mapping
* would fix it (which is gross) but if you have columnsKept as [0, 2, 3] then maybe it would
* be possible to generate the correct type? I don't think we can generate the map at compile time
* so we'd have to hand code it. Other ideas, don't generate kLimitsInfoCore and kLimitsInfoCompat
* where they are keys of infos. Instead, generate kLimitsInfoCoreDefaults, kLimitsInfoCoreMaximums,
* kLimitsInfoCoreClasses where each is just a `{[k: string]: type}`. Could zip those after or,
* maybe that suggests passing in the hard coded indices would work.
*
* @param columnRenames the name of the column in the table that will be assigned to the 'default' property of each entry.
* @param columnsKept the names of properties you want in the generated lookup table. This must be a subset of the columns of the tables except for the name 'default' which is looked from the previous argument.
* @param columns the names of the columns of the name
* @param defaults the default value by column for any element in a row of the table that is undefined
* @param table named table rows.
*/
export function makeTableRenameAndFilter(columnRenames, columnsKept, columns, defaults, table) {
const result = {};
const keyToIndex = new Map(
columnsKept.map(name => {
const remappedName = columnRenames[name] === undefined ? name : columnRenames[name];
return [name, columns.indexOf(remappedName)];
})
);
for (const [k, v] of Object.entries(table)) {
const item = {};
for (const member of columnsKept) {
const ndx = keyToIndex.get(member);
item[member] = v[ndx] ?? defaults[ndx];
}
result[k] = item;
}
return result;
}

View file

@ -2,7 +2,7 @@
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/
import { ErrorWithExtra, assert } from './util.js';
import { ErrorWithExtra, assert, objectEquals } from './util.js';
/**
* Finds and returns the `navigator.gpu` object (or equivalent, for non-browser implementations).
@ -37,6 +37,10 @@ let impl = undefined;
let defaultRequestAdapterOptions;
export function setDefaultRequestAdapterOptions(options) {
// It's okay to call this if you don't change the options
if (objectEquals(options, defaultRequestAdapterOptions)) {
return;
}
if (impl) {
throw new Error('must call setDefaultRequestAdapterOptions before getGPU');
}

View file

@ -157,6 +157,13 @@ export function rejectWithoutUncaught(err) {
return p;
}
/**
* Returns true if v is a plain JavaScript object.
*/
export function isPlainObject(v) {
return !!v && Object.getPrototypeOf(v).constructor === Object.prototype.constructor;
}
/**
* Makes a copy of a JS `object`, with the keys reordered into sorted order.
*/
@ -282,6 +289,51 @@ export const kTypedArrayBufferViews = {
export const kTypedArrayBufferViewKeys = keysOf(kTypedArrayBufferViews);
export const kTypedArrayBufferViewConstructors = Object.values(kTypedArrayBufferViews);
/**
* Creates a case parameter for a typedarray.
*
* You can't put typedarrays in case parameters directly so instead of
*
* ```
* u.combine('data', [
* new Uint8Array([1, 2, 3]),
* new Float32Array([4, 5, 6]),
* ])
* ```
*
* You can use
*
* ```
* u.combine('data', [
* typedArrayParam('Uint8Array' [1, 2, 3]),
* typedArrayParam('Float32Array' [4, 5, 6]),
* ])
* ```
*
* and then convert the params to typedarrays eg.
*
* ```
* .fn(t => {
* const data = t.params.data.map(v => typedArrayFromParam(v));
* })
* ```
*/
export function typedArrayParam(type, data) {
return { type, data };
}
export function createTypedArray(type, data) {
return new kTypedArrayBufferViews[type](data);
}
/**
* Converts a TypedArrayParam to a typedarray. See typedArrayParam
*/
export function typedArrayFromParam(param) {
const { type, data } = param;
return createTypedArray(type, data);
}
function subarrayAsU8(buf, { start = 0, length }) {
if (buf instanceof ArrayBuffer) {
return new Uint8Array(buf, start, length);
@ -307,3 +359,25 @@ function subarrayAsU8(buf, { start = 0, length }) {
export function memcpy(src, dst) {
subarrayAsU8(dst.dst, dst).set(subarrayAsU8(src.src, src));
}
/**
* Used to create a value that is specified by multiplying some runtime value
* by a constant and then adding a constant to it.
*/
/**
* Filters out SpecValues that are the same.
*/
export function filterUniqueValueTestVariants(valueTestVariants) {
return new Map(valueTestVariants.map(v => [`m:${v.mult},a:${v.add}`, v])).values();
}
/**
* Used to create a value that is specified by multiplied some runtime value
* by a constant and then adding a constant to it. This happens often in test
* with limits that can only be known at runtime and yet we need a way to
* add parameters to a test and those parameters must be constants.
*/
export function makeValueTestVariant(base, variant) {
return base * variant.mult + variant.add;
}

View file

@ -218,6 +218,11 @@
<meta name=variant content='?q=webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:*'>
<meta name=variant content='?q=webgpu:api,operation,sampling,anisotropy:anisotropic_filter_checkerboard:*'>
<meta name=variant content='?q=webgpu:api,operation,sampling,anisotropy:anisotropic_filter_mipmap_color:*'>
<meta name=variant content='?q=webgpu:api,operation,sampling,filter_mode:magFilter,nearest:*'>
<meta name=variant content='?q=webgpu:api,operation,sampling,filter_mode:magFilter,linear:*'>
<meta name=variant content='?q=webgpu:api,operation,sampling,filter_mode:minFilter,nearest:*'>
<meta name=variant content='?q=webgpu:api,operation,sampling,filter_mode:minFilter,linear:*'>
<meta name=variant content='?q=webgpu:api,operation,sampling,filter_mode:mipmapFilter:*'>
<meta name=variant content='?q=webgpu:api,operation,shader_module,compilation_info:getCompilationInfo_returns:*'>
<meta name=variant content='?q=webgpu:api,operation,shader_module,compilation_info:line_number_and_position:*'>
<meta name=variant content='?q=webgpu:api,operation,shader_module,compilation_info:offset_and_length:*'>
@ -313,6 +318,7 @@
<meta name=variant content='?q=webgpu:api,validation,capability_checks,limits,maxColorAttachments:beginRenderPass,at_over:*'>
<meta name=variant content='?q=webgpu:api,validation,capability_checks,limits,maxColorAttachments:createRenderBundle,at_over:*'>
<meta name=variant content='?q=webgpu:api,validation,capability_checks,limits,maxColorAttachments:validate,maxColorAttachmentBytesPerSample:*'>
<meta name=variant content='?q=webgpu:api,validation,capability_checks,limits,maxColorAttachments:validate,kMaxColorAttachmentsToTest:*'>
<meta name=variant content='?q=webgpu:api,validation,capability_checks,limits,maxComputeInvocationsPerWorkgroup:createComputePipeline,at_over:*'>
<meta name=variant content='?q=webgpu:api,validation,capability_checks,limits,maxComputeWorkgroupSizeX:createComputePipeline,at_over:*'>
<meta name=variant content='?q=webgpu:api,validation,capability_checks,limits,maxComputeWorkgroupSizeY:createComputePipeline,at_over:*'>
@ -806,6 +812,8 @@
<meta name=variant content='?q=webgpu:api,validation,state,device_lost,destroy:createShaderModule:*'>
<meta name=variant content='?q=webgpu:api,validation,state,device_lost,destroy:createComputePipeline:*'>
<meta name=variant content='?q=webgpu:api,validation,state,device_lost,destroy:createRenderPipeline:*'>
<meta name=variant content='?q=webgpu:api,validation,state,device_lost,destroy:createComputePipelineAsync:*'>
<meta name=variant content='?q=webgpu:api,validation,state,device_lost,destroy:createRenderPipelineAsync:*'>
<meta name=variant content='?q=webgpu:api,validation,state,device_lost,destroy:createCommandEncoder:*'>
<meta name=variant content='?q=webgpu:api,validation,state,device_lost,destroy:createRenderBundleEncoder:*'>
<meta name=variant content='?q=webgpu:api,validation,state,device_lost,destroy:createQuerySet:*'>
@ -841,6 +849,17 @@
<meta name=variant content='?q=webgpu:api,validation,texture,rg11b10ufloat_renderable:begin_render_pass_msaa_and_resolve:*'>
<meta name=variant content='?q=webgpu:api,validation,texture,rg11b10ufloat_renderable:begin_render_bundle_encoder:*'>
<meta name=variant content='?q=webgpu:api,validation,texture,rg11b10ufloat_renderable:create_render_pipeline:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,encoding,cmds,copyTextureToBuffer:compressed:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,encoding,programmable,pipeline_bind_group_compat:twoDifferentTextureViews,render_pass,used:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,encoding,programmable,pipeline_bind_group_compat:twoDifferentTextureViews,render_pass,unused:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,encoding,programmable,pipeline_bind_group_compat:twoDifferentTextureViews,compute_pass,used:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,encoding,programmable,pipeline_bind_group_compat:twoDifferentTextureViews,compute_pass,unused:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,render_pipeline,fragment_state:colorState:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,render_pipeline,shader_module:sample_mask:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,render_pipeline,vertex_state:maxVertexAttributesVertexIndexInstanceIndex:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,texture,createTexture:unsupportedTextureFormats:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,texture,createTexture:unsupportedTextureViewFormats:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,texture,cubeArray:cube_array:*'>
<meta name=variant content='?q=webgpu:examples:test_name:*'>
<meta name=variant content='?q=webgpu:examples:not_implemented_yet,without_plan:*'>
<meta name=variant content='?q=webgpu:examples:not_implemented_yet,with_plan:*'>
@ -864,6 +883,26 @@
<meta name=variant content='?q=webgpu:idl,constants,flags:ColorWrite,values:*'>
<meta name=variant content='?q=webgpu:idl,constants,flags:ShaderStage,count:*'>
<meta name=variant content='?q=webgpu:idl,constants,flags:ShaderStage,values:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,af_addition:scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,af_addition:vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,af_addition:vector_scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,af_addition:scalar_vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,af_comparison:equals:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,af_comparison:not_equals:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,af_comparison:less_than:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,af_comparison:less_equals:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,af_comparison:greater_than:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,af_comparison:greater_equals:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,af_matrix_addition:matrix:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,af_matrix_subtraction:matrix:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,af_multiplication:scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,af_multiplication:vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,af_multiplication:vector_scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,af_multiplication:scalar_vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,af_subtraction:scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,af_subtraction:vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,af_subtraction:vector_scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,af_subtraction:scalar_vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,bitwise:bitwise_or:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,bitwise:bitwise_or_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,bitwise:bitwise_and:*'>
@ -882,13 +921,56 @@
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,bool_logical:or_short_circuit:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,bool_logical:equals:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,bool_logical:not_equals:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_addition:scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_addition:vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_addition:scalar_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_addition:vector_scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_addition:vector_scalar_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_addition:scalar_vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_comparison:equals:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_comparison:not_equals:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_comparison:less_than:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_comparison:less_equals:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_comparison:greater_than:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_comparison:greater_equals:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_division:scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_division:vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_division:scalar_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_division:vector_scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_division:vector_scalar_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_division:scalar_vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_matrix_addition:matrix:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_matrix_addition:matrix_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_matrix_matrix_multiplication:matrix_matrix:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_matrix_matrix_multiplication:matrix_matrix_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_matrix_scalar_multiplication:matrix_scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_matrix_scalar_multiplication:matrix_scalar_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_matrix_scalar_multiplication:scalar_matrix:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_matrix_subtraction:matrix:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_matrix_subtraction:matrix_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_matrix_vector_multiplication:matrix_vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_matrix_vector_multiplication:vector_matrix:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_matrix_vector_multiplication:vector_matrix_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_multiplication:scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_multiplication:vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_multiplication:scalar_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_multiplication:vector_scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_multiplication:vector_scalar_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_multiplication:scalar_vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_remainder:scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_remainder:vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_remainder:scalar_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_remainder:vector_scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_remainder:vector_scalar_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_remainder:scalar_vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_subtraction:scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_subtraction:vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_subtraction:scalar_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_subtraction:vector_scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_subtraction:vector_scalar_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f16_subtraction:scalar_vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_addition:scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_addition:vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_addition:scalar_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_addition:vector_scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_addition:vector_scalar_compound:*'>
@ -900,6 +982,7 @@
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_comparison:greater_than:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_comparison:greater_equals:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_division:scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_division:vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_division:scalar_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_division:vector_scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_division:vector_scalar_compound:*'>
@ -917,16 +1000,19 @@
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_matrix_vector_multiplication:vector_matrix:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_matrix_vector_multiplication:vector_matrix_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_multiplication:scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_multiplication:vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_multiplication:scalar_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_multiplication:vector_scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_multiplication:vector_scalar_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_multiplication:scalar_vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_remainder:scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_remainder:vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_remainder:scalar_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_remainder:vector_scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_remainder:vector_scalar_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_remainder:scalar_vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_subtraction:scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_subtraction:vector:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_subtraction:scalar_compound:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_subtraction:vector_scalar:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,binary,f32_subtraction:vector_scalar_compound:*'>
@ -1031,16 +1117,26 @@
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicAdd:add_workgroup:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicAnd:and_storage:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicAnd:and_workgroup:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicCompareExchangeWeak:exchange:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicExchange:exchange:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicLoad:load:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicCompareExchangeWeak:compare_exchange_weak_storage_basic:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicCompareExchangeWeak:compare_exchange_weak_workgroup_basic:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicCompareExchangeWeak:compare_exchange_weak_storage_advanced:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicCompareExchangeWeak:compare_exchange_weak_workgroup_advanced:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicExchange:exchange_storage_basic:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicExchange:exchange_workgroup_basic:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicExchange:exchange_storage_advanced:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicExchange:exchange_workgroup_advanced:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicLoad:load_storage:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicLoad:load_workgroup:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicMax:max_storage:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicMax:max_workgroup:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicMin:min_storage:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicMin:min_workgroup:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicOr:or_storage:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicOr:or_workgroup:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicStore:store:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicStore:store_storage_basic:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicStore:store_workgroup_basic:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicStore:store_storage_advanced:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicStore:store_workgroup_advanced:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicSub:sub_storage:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicSub:sub_workgroup:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,atomics,atomicXor:xor_storage:*'>
@ -1103,6 +1199,9 @@
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,distance:f32_vec3:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,distance:f32_vec4:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,distance:f16:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,distance:f16_vec2:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,distance:f16_vec3:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,distance:f16_vec4:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,dot:abstract_int:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,dot:i32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,dot:u32:*'>
@ -1110,7 +1209,9 @@
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,dot:f32_vec2:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,dot:f32_vec3:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,dot:f32_vec4:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,dot:f16:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,dot:f16_vec2:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,dot:f16_vec3:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,dot:f16_vec4:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,dpdx:f32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,dpdxCoarse:f32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,dpdxFine:f32:*'>
@ -1129,7 +1230,9 @@
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,faceForward:f32_vec2:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,faceForward:f32_vec3:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,faceForward:f32_vec4:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,faceForward:f16:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,faceForward:f16_vec2:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,faceForward:f16_vec3:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,faceForward:f16_vec4:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,firstLeadingBit:u32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,firstLeadingBit:i32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,firstTrailingBit:u32:*'>
@ -1175,6 +1278,9 @@
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,length:f32_vec3:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,length:f32_vec4:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,length:f16:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,length:f16_vec2:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,length:f16_vec3:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,length:f16_vec4:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,log:abstract_float:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,log:f32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,log:f16:*'>
@ -1193,12 +1299,18 @@
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,min:abstract_float:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,min:f32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,min:f16:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,mix:matching_abstract_float:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,mix:matching_f32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,mix:matching_f16:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,mix:nonmatching_abstract_float:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,mix:nonmatching_f32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,mix:monmatching_f16:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,mix:abstract_float_matching:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,mix:abstract_float_nonmatching_vec2:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,mix:abstract_float_nonmatching_vec3:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,mix:abstract_float_nonmatching_vec4:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,mix:f32_matching:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,mix:f32_nonmatching_vec2:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,mix:f32_nonmatching_vec3:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,mix:f32_nonmatching_vec4:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,mix:f16_matching:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,mix:f16_nonmatching_vec2:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,mix:f16_nonmatching_vec3:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,mix:f16_nonmatching_vec4:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,modf:f32_fract:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,modf:f32_whole:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,modf:f32_vec2_fract:*'>
@ -1215,11 +1327,21 @@
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,modf:f16_vec3_whole:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,modf:f16_vec4_fract:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,modf:f16_vec4_whole:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,modf:abstract_fract:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,modf:abstract_whole:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,modf:abstract_vec2_fract:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,modf:abstract_vec2_whole:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,modf:abstract_vec3_fract:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,modf:abstract_vec3_whole:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,modf:abstract_vec4_fract:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,modf:abstract_vec4_whole:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,normalize:abstract_float:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,normalize:f32_vec2:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,normalize:f32_vec3:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,normalize:f32_vec4:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,normalize:f16:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,normalize:f16_vec2:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,normalize:f16_vec3:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,normalize:f16_vec4:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,pack2x16float:pack:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,pack2x16snorm:pack:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,pack2x16unorm:pack:*'>
@ -1236,12 +1358,16 @@
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,reflect:f32_vec2:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,reflect:f32_vec3:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,reflect:f32_vec4:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,reflect:f16:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,reflect:f16_vec2:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,reflect:f16_vec3:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,reflect:f16_vec4:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,refract:abstract_float:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,refract:f32_vec2:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,refract:f32_vec3:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,refract:f32_vec4:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,refract:f16:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,refract:f16_vec2:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,refract:f16_vec3:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,refract:f16_vec4:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,reverseBits:u32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,reverseBits:i32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,round:abstract_float:*'>
@ -1367,14 +1493,24 @@
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,unpack4x8unorm:unpack:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,workgroupBarrier:stage:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,workgroupBarrier:barrier:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,abstract_float_assignment:f32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,abstract_float_assignment:f16:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,af_arithmetic:negation:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,af_assignment:abstract:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,af_assignment:f32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,af_assignment:f16:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,bool_conversion:bool:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,bool_conversion:u32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,bool_conversion:i32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,bool_conversion:f32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,bool_conversion:f16:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,bool_logical:negation:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,f16_arithmetic:negation:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,f16_conversion:bool:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,f16_conversion:u32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,f16_conversion:i32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,f16_conversion:f32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,f16_conversion:f32_mat:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,f16_conversion:f16:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,f16_conversion:f16_mat:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,f32_arithmetic:negation:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,f32_conversion:bool:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,f32_conversion:u32:*'>
@ -1382,6 +1518,7 @@
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,f32_conversion:f32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,f32_conversion:f32_mat:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,f32_conversion:f16:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,f32_conversion:f16_mat:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,i32_arithmetic:negation:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,i32_complement:i32_complement:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,i32_conversion:bool:*'>
@ -1526,13 +1663,118 @@
<meta name=variant content='?q=webgpu:shader,execution,statement,increment_decrement:vec4_element_decrement:*'>
<meta name=variant content='?q=webgpu:shader,execution,statement,increment_decrement:frexp_exp_increment:*'>
<meta name=variant content='?q=webgpu:shader,execution,zero_init:compute,zero_init:*'>
<meta name=variant content='?q=webgpu:shader,validation,const_assert,const_assert:constant_expression:*'>
<meta name=variant content='?q=webgpu:shader,validation,const_assert,const_assert:constant_expression_no_assert:*'>
<meta name=variant content='?q=webgpu:shader,validation,const_assert,const_assert:constant_expression_assert:*'>
<meta name=variant content='?q=webgpu:shader,validation,const_assert,const_assert:constant_expression_logical_or_no_assert:*'>
<meta name=variant content='?q=webgpu:shader,validation,const_assert,const_assert:constant_expression_logical_or_assert:*'>
<meta name=variant content='?q=webgpu:shader,validation,const_assert,const_assert:constant_expression_logical_and_no_assert:*'>
<meta name=variant content='?q=webgpu:shader,validation,const_assert,const_assert:constant_expression_logical_and_assert:*'>
<meta name=variant content='?q=webgpu:shader,validation,const_assert,const_assert:evaluation_stage:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,const:no_direct_recursion:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,const:no_indirect_recursion:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,const:no_indirect_recursion_via_array_size:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,const:no_indirect_recursion_via_struct_attribute:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,override:no_direct_recursion:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,override:no_indirect_recursion:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,ptr_spelling:let_ptr_explicit_type_matches_var:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,ptr_spelling:let_ptr_reads:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,ptr_spelling:let_ptr_writes:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,ptr_spelling:ptr_handle_space_invalid:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,ptr_spelling:ptr_bad_store_type:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,ptr_spelling:ptr_address_space_never_uses_access_mode:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,ptr_spelling:ptr_not_instantiable:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,var_access_mode:explicit_access_mode:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,var_access_mode:implicit_access_mode:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,var_access_mode:read_access:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,var_access_mode:write_access:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,access,vector:vector:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,binary,bitwise_shift:shift_left_concrete:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,binary,bitwise_shift:shift_left_vec_size_mismatch:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,binary,bitwise_shift:shift_right_concrete:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,binary,bitwise_shift:shift_right_vec_size_mismatch:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,abs:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,acos:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,acos:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,acosh:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,acosh:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,asin:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,asin:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,asinh:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,asinh:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,atan:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,atan:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,atan2:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,atan2:integer_argument_y:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,atan2:integer_argument_x:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,atanh:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,atanh:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,atomics:stage:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,bitcast:bad_const_to_f32:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,bitcast:bad_type_constructible:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,bitcast:bad_type_nonconstructible:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,bitcast:bad_to_vec3h:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,bitcast:bad_to_f16:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,bitcast:valid_vec2h:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,bitcast:valid_vec4h:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,ceil:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,ceil:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,clamp:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,cos:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,cos:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,cosh:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,cosh:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,degrees:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,degrees:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,exp:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,exp:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,exp2:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,exp2:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,inverseSqrt:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,inverseSqrt:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,length:scalar:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,length:vec2:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,length:vec3:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,length:vec4:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,length:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,log:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,log:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,log2:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,log2:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,modf:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,modf:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,radians:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,radians:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,round:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,round:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,saturate:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,saturate:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,sign:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,sign:unsigned_integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,sin:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,sin:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,sinh:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,sinh:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,sqrt:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,sqrt:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,tan:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,tan:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,alias_analysis:two_pointers:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,alias_analysis:one_pointer_one_module_scope:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,alias_analysis:subcalls:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,alias_analysis:member_accessors:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,alias_analysis:same_pointer_read_and_write:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,alias_analysis:aliasing_inside_function:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,restrictions:vertex_returns_position:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,restrictions:entry_point_call_target:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,restrictions:function_return_types:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,restrictions:function_parameter_types:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,restrictions:function_parameter_matching:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,restrictions:no_direct_recursion:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,restrictions:no_indirect_recursion:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,restrictions:param_names_must_differ:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,restrictions:param_scope_is_function_body:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,restrictions:param_number_matches_call:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,restrictions:call_arg_types_match_params:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,align:parsing:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,align:required_alignment:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,align:placement:*'>
@ -1551,9 +1793,26 @@
<meta name=variant content='?q=webgpu:shader,validation,parse,comments:unterminated_block_comment:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,const:placement:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,const_assert:parse:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,diagnostic:valid_params:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,diagnostic:invalid_severity:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,diagnostic:warning_unknown_rule:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,diagnostic:valid_locations:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,diagnostic:invalid_locations:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,diagnostic:conflicting_directive:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,diagnostic:conflicting_attribute_same_location:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,diagnostic:conflicting_attribute_different_location:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,discard:placement:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,enable:enable:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,identifiers:identifiers:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,identifiers:module_var_name:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,identifiers:module_const_name:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,identifiers:override_name:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,identifiers:function_name:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,identifiers:struct_name:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,identifiers:alias_name:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,identifiers:function_param_name:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,identifiers:function_const_name:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,identifiers:function_let_name:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,identifiers:function_var_name:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,identifiers:non_normalized:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,literal:bools:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,literal:abstract_int:*'>
@ -1562,6 +1821,10 @@
<meta name=variant content='?q=webgpu:shader,validation,parse,literal:abstract_float:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,literal:f32:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,literal:f16:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,must_use:declaration:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,must_use:call:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,must_use:builtin_must_use:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,must_use:builtin_no_must_use:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,pipeline_stage:vertex_parsing:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,pipeline_stage:fragment_parsing:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,pipeline_stage:compute_parsing:*'>
@ -1614,9 +1877,11 @@
<meta name=variant content='?q=webgpu:shader,validation,parse,source:invalid_source:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,unary_ops:all:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,var_and_let:initializer_type:*'>
<meta name=variant content='?q=webgpu:shader,validation,resource_interface,bindings:single_entry_point:*'>
<meta name=variant content='?q=webgpu:shader,validation,resource_interface,bindings:different_entry_points:*'>
<meta name=variant content='?q=webgpu:shader,validation,resource_interface,bindings:binding_attributes:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,var_and_let:var_access_mode_bad_other_template_contents:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,var_and_let:var_access_mode_bad_template_delim:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,binding:binding:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,binding:binding_f16:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,binding:binding_without_group:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,builtins:stage_inout:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,builtins:type:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,builtins:nesting:*'>
@ -1628,21 +1893,72 @@
<meta name=variant content='?q=webgpu:shader,validation,shader_io,entry_point:missing_attribute_on_return_type:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,entry_point:missing_attribute_on_return_type_struct:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,entry_point:no_entry_point_provided:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,group:group:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,group:group_f16:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,group:group_without_binding:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,group_and_binding:binding_attributes:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,group_and_binding:private_module_scope:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,group_and_binding:private_function_scope:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,group_and_binding:function_scope:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,group_and_binding:function_scope_texture:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,group_and_binding:single_entry_point:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,group_and_binding:different_entry_points:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,id:id:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,id:id_fp16:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,id:id_struct_member:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,id:id_non_override:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,id:id_in_function:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,interpolate:type_and_sampling:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,interpolate:require_location:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,interpolate:integral_types:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,interpolate:duplicate:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,interpolate:interpolation_validation:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,invariant:parsing:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,invariant:valid_only_with_vertex_position_builtin:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,invariant:not_valid_on_user_defined_io:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,invariant:invalid_use_of_parameters:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,invariant:duplicate:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,locations:stage_inout:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,locations:type:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,locations:nesting:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,locations:duplicates:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,locations:validation:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,locations:location_fp16:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,size:size:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,size:size_fp16:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,size:size_non_struct:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,workgroup_size:workgroup_size:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,workgroup_size:workgroup_size_fragment_shader:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,workgroup_size:workgroup_size_vertex_shader:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,workgroup_size:workgroup_size_function:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,workgroup_size:workgroup_size_const:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,workgroup_size:workgroup_size_var:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,workgroup_size:workgroup_size_fp16:*'>
<meta name=variant content='?q=webgpu:shader,validation,types,alias:no_direct_recursion:*'>
<meta name=variant content='?q=webgpu:shader,validation,types,alias:no_indirect_recursion:*'>
<meta name=variant content='?q=webgpu:shader,validation,types,alias:no_indirect_recursion_via_vector_element:*'>
<meta name=variant content='?q=webgpu:shader,validation,types,alias:no_indirect_recursion_via_matrix_element:*'>
<meta name=variant content='?q=webgpu:shader,validation,types,alias:no_indirect_recursion_via_array_element:*'>
<meta name=variant content='?q=webgpu:shader,validation,types,alias:no_indirect_recursion_via_array_size:*'>
<meta name=variant content='?q=webgpu:shader,validation,types,alias:no_indirect_recursion_via_atomic:*'>
<meta name=variant content='?q=webgpu:shader,validation,types,alias:no_indirect_recursion_via_ptr_store_type:*'>
<meta name=variant content='?q=webgpu:shader,validation,types,alias:no_indirect_recursion_via_struct_member:*'>
<meta name=variant content='?q=webgpu:shader,validation,types,alias:no_indirect_recursion_via_struct_attribute:*'>
<meta name=variant content='?q=webgpu:shader,validation,types,struct:no_direct_recursion:*'>
<meta name=variant content='?q=webgpu:shader,validation,types,struct:no_indirect_recursion:*'>
<meta name=variant content='?q=webgpu:shader,validation,types,struct:no_indirect_recursion_via_array_element:*'>
<meta name=variant content='?q=webgpu:shader,validation,types,struct:no_indirect_recursion_via_array_size:*'>
<meta name=variant content='?q=webgpu:shader,validation,types,struct:no_indirect_recursion_via_struct_attribute:*'>
<meta name=variant content='?q=webgpu:shader,validation,types,struct:no_indirect_recursion_via_struct_member_nested_in_alias:*'>
<meta name=variant content='?q=webgpu:shader,validation,types,vector:vector:*'>
<meta name=variant content='?q=webgpu:shader,validation,uniformity,uniformity:basics:*'>
<meta name=variant content='?q=webgpu:shader,validation,uniformity,uniformity:fragment_builtin_values:*'>
<meta name=variant content='?q=webgpu:shader,validation,uniformity,uniformity:compute_builtin_values:*'>
<meta name=variant content='?q=webgpu:shader,validation,uniformity,uniformity:pointers:*'>
<meta name=variant content='?q=webgpu:shader,validation,uniformity,uniformity:function_variables:*'>
<meta name=variant content='?q=webgpu:shader,validation,uniformity,uniformity:function_pointer_parameters:*'>
<meta name=variant content='?q=webgpu:shader,validation,uniformity,uniformity:short_circuit_expressions:*'>
<meta name=variant content='?q=webgpu:shader,validation,uniformity,uniformity:binary_expressions:*'>
<meta name=variant content='?q=webgpu:shader,validation,uniformity,uniformity:unary_expressions:*'>
<meta name=variant content='?q=webgpu:shader,validation,uniformity,uniformity:functions:*'>
<meta name=variant content='?q=webgpu:util,texture,texel_data:unorm_texel_data_in_shader:*'>
<meta name=variant content='?q=webgpu:util,texture,texel_data:snorm_texel_data_in_shader:*'>
<meta name=variant content='?q=webgpu:util,texture,texel_data:uint_texel_data_in_shader:*'>
@ -1678,12 +1994,16 @@
<meta name=variant content='?q=webgpu:web_platform,copyToTexture,ImageBitmap:from_canvas:*'>
<meta name=variant content='?q=webgpu:web_platform,copyToTexture,ImageBitmap:copy_subrect_from_ImageData:*'>
<meta name=variant content='?q=webgpu:web_platform,copyToTexture,ImageBitmap:copy_subrect_from_2D_Canvas:*'>
<meta name=variant content='?q=webgpu:web_platform,copyToTexture,ImageData:from_ImageData:*'>
<meta name=variant content='?q=webgpu:web_platform,copyToTexture,ImageData:copy_subrect_from_ImageData:*'>
<meta name=variant content='?q=webgpu:web_platform,copyToTexture,canvas:copy_contents_from_2d_context_canvas:*'>
<meta name=variant content='?q=webgpu:web_platform,copyToTexture,canvas:copy_contents_from_gl_context_canvas:*'>
<meta name=variant content='?q=webgpu:web_platform,copyToTexture,canvas:copy_contents_from_gpu_context_canvas:*'>
<meta name=variant content='?q=webgpu:web_platform,copyToTexture,canvas:copy_contents_from_bitmaprenderer_context_canvas:*'>
<meta name=variant content='?q=webgpu:web_platform,copyToTexture,canvas:color_space_conversion:*'>
<meta name=variant content='?q=webgpu:web_platform,copyToTexture,video:copy_from_video_element:*'>
<meta name=variant content='?q=webgpu:web_platform,copyToTexture,image:from_image:*'>
<meta name=variant content='?q=webgpu:web_platform,copyToTexture,image:copy_subrect_from_2D_Canvas:*'>
<meta name=variant content='?q=webgpu:web_platform,copyToTexture,video:copy_from_video:*'>
<meta name=variant content='?q=webgpu:web_platform,external_texture,video:importExternalTexture,sample:*'>
<meta name=variant content='?q=webgpu:web_platform,external_texture,video:importExternalTexture,sampleWithRotationMetadata:*'>
<meta name=variant content='?q=webgpu:web_platform,external_texture,video:importExternalTexture,sampleWithVideoFrameWithVisibleRectParam:*'>

View file

@ -10,7 +10,12 @@ import { Fixture } from '../../../../common/framework/fixture.js';
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { getGPU } from '../../../../common/util/navigator_gpu.js';
import { assert, assertReject, raceWithRejectOnTimeout } from '../../../../common/util/util.js';
import { kFeatureNames, kLimitInfo, kLimits } from '../../../capability_info.js';
import {
getDefaultLimitsForAdapter,
kFeatureNames,
kLimits,
kLimitClasses,
} from '../../../capability_info.js';
import { clamp, isPowerOfTwo } from '../../../util/math.js';
export const g = makeTestGroup(Fixture);
@ -36,10 +41,11 @@ g.test('default')
// Default device should have no features.
t.expect(device.features.size === 0, 'Default device should not have any features');
// All limits should be defaults.
const limitInfo = getDefaultLimitsForAdapter(adapter);
for (const limit of kLimits) {
t.expect(
device.limits[limit] === kLimitInfo[limit].default,
`Expected ${limit} == default: ${device.limits[limit]} != ${kLimitInfo[limit].default}`
device.limits[limit] === limitInfo[limit].default,
`Expected ${limit} == default: ${device.limits[limit]} != ${limitInfo[limit].default}`
);
}
@ -231,10 +237,11 @@ g.test('limits,supported')
const adapter = await gpu.requestAdapter();
assert(adapter !== null);
const limitInfo = getDefaultLimitsForAdapter(adapter);
let value = -1;
switch (limitValue) {
case 'default':
value = kLimitInfo[limit].default;
value = limitInfo[limit].default;
break;
case 'adapter':
value = adapter.limits[limit];
@ -264,7 +271,7 @@ g.test('limit,better_than_supported')
.combine('limit', kLimits)
.beginSubcases()
.expandWithParams(p => {
switch (kLimitInfo[p.limit].class) {
switch (kLimitClasses[p.limit]) {
case 'maximum':
return [
{ mul: 1, add: 1 },
@ -287,9 +294,10 @@ g.test('limit,better_than_supported')
const adapter = await gpu.requestAdapter();
assert(adapter !== null);
const limitInfo = getDefaultLimitsForAdapter(adapter);
const value = adapter.limits[limit] * mul + add;
const requiredLimits = {
[limit]: clamp(value, { min: 0, max: kLimitInfo[limit].maximumValue }),
[limit]: clamp(value, { min: 0, max: limitInfo[limit].maximumValue }),
};
t.shouldReject('OperationError', adapter.requestDevice({ requiredLimits }));
@ -308,7 +316,7 @@ g.test('limit,worse_than_default')
.combine('limit', kLimits)
.beginSubcases()
.expandWithParams(p => {
switch (kLimitInfo[p.limit].class) {
switch (kLimitClasses[p.limit]) {
case 'maximum':
return [
{ mul: 1, add: -1 },
@ -331,13 +339,14 @@ g.test('limit,worse_than_default')
const adapter = await gpu.requestAdapter();
assert(adapter !== null);
const value = kLimitInfo[limit].default * mul + add;
const limitInfo = getDefaultLimitsForAdapter(adapter);
const value = limitInfo[limit].default * mul + add;
const requiredLimits = {
[limit]: clamp(value, { min: 0, max: kLimitInfo[limit].maximumValue }),
[limit]: clamp(value, { min: 0, max: limitInfo[limit].maximumValue }),
};
let success;
switch (kLimitInfo[limit].class) {
switch (limitInfo[limit].class) {
case 'alignment':
success = isPowerOfTwo(value);
break;
@ -350,7 +359,7 @@ g.test('limit,worse_than_default')
const device = await adapter.requestDevice({ requiredLimits });
assert(device !== null);
t.expect(
device.limits[limit] === kLimitInfo[limit].default,
device.limits[limit] === limitInfo[limit].default,
'Devices reported limit should match the default limit'
);

View file

@ -15,8 +15,10 @@ import {
kDepthStencilFormats,
textureDimensionAndFormatCompatible,
depthStencilFormatAspectSize,
isCompressedTextureFormat,
viewCompatible,
} from '../../../format_info.js';
import { GPUTest } from '../../../gpu_test.js';
import { GPUTest, TextureTestMixin } from '../../../gpu_test.js';
import { makeBufferWithContents } from '../../../util/buffer.js';
import { checkElementsEqual, checkElementsEqualEither } from '../../../util/check_contents.js';
import { align } from '../../../util/math.js';
@ -26,7 +28,7 @@ import { kBytesPerRowAlignment, dataBytesForCopyOrFail } from '../../../util/tex
const dataGenerator = new DataArrayGenerator();
class F extends GPUTest {
class F extends TextureTestMixin(GPUTest) {
GetInitialDataPerMipLevel(dimension, textureSize, format, mipLevel) {
const textureSizeAtLevel = physicalMipSize(textureSize, format, dimension, mipLevel);
const bytesPerBlock = kTextureFormatInfo[format].color.bytes;
@ -62,6 +64,14 @@ class F extends GPUTest {
srcCopyLevel,
dstCopyLevel
) {
this.skipIfTextureFormatNotSupported(srcFormat, dstFormat);
// If we're in compatibility mode and it's a compressed texture
// then we need to render the texture to test the results of the copy.
const extraTextureUsageFlags =
isCompressedTextureFormat(dstFormat) && this.isCompatibility
? GPUTextureUsage.TEXTURE_BINDING
: 0;
const mipLevelCount = dimension === '1d' ? 1 : 4;
// Create srcTexture and dstTexture
@ -78,7 +88,7 @@ class F extends GPUTest {
dimension,
size: dstTextureSize,
format: dstFormat,
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST | extraTextureUsageFlags,
mipLevelCount,
};
const dstTexture = this.device.createTexture(dstTextureDesc);
@ -165,14 +175,23 @@ class F extends GPUTest {
assert(appliedCopyDepth >= 0);
const encoder = this.device.createCommandEncoder();
encoder.copyTextureToTexture(
{ texture: srcTexture, mipLevel: srcCopyLevel, origin: appliedSrcOffset },
{ texture: dstTexture, mipLevel: dstCopyLevel, origin: appliedDstOffset },
{ width: appliedCopyWidth, height: appliedCopyHeight, depthOrArrayLayers: appliedCopyDepth }
);
const appliedSize = {
width: appliedCopyWidth,
height: appliedCopyHeight,
depthOrArrayLayers: appliedCopyDepth,
};
{
const encoder = this.device.createCommandEncoder();
encoder.copyTextureToTexture(
{ texture: srcTexture, mipLevel: srcCopyLevel, origin: appliedSrcOffset },
{ texture: dstTexture, mipLevel: dstCopyLevel, origin: appliedDstOffset },
appliedSize
);
this.device.queue.submit([encoder.finish()]);
}
// Copy the whole content of dstTexture at dstCopyLevel to dstBuffer.
const dstBlocksPerRow = dstTextureSizeAtLevel.width / blockWidth;
const dstBlockRowsPerImage = dstTextureSizeAtLevel.height / blockHeight;
const bytesPerDstAlignedBlockRow = align(dstBlocksPerRow * bytesPerBlock, 256);
@ -180,6 +199,67 @@ class F extends GPUTest {
(dstBlockRowsPerImage * dstTextureSizeAtLevel.depthOrArrayLayers - 1) *
bytesPerDstAlignedBlockRow +
align(dstBlocksPerRow * bytesPerBlock, 4);
if (isCompressedTextureFormat(dstTexture.format) && this.isCompatibility) {
assert(viewCompatible(srcFormat, dstFormat));
// compare by rendering. We need the expected texture to match
// the dstTexture so we'll create a texture where we supply
// all of the data in JavaScript.
const expectedTexture = this.device.createTexture({
size: [dstTexture.width, dstTexture.height, dstTexture.depthOrArrayLayers],
mipLevelCount: dstTexture.mipLevelCount,
format: dstTexture.format,
usage: dstTexture.usage,
});
const expectedData = new Uint8Array(dstBufferSize);
// Execute the equivalent of `copyTextureToTexture`, copying
// from `initialSrcData` to `expectedData`.
this.updateLinearTextureDataSubBox(dstFormat, appliedSize, {
src: {
dataLayout: {
bytesPerRow: srcBlocksPerRow * bytesPerBlock,
rowsPerImage: srcBlockRowsPerImage,
offset: 0,
},
origin: appliedSrcOffset,
data: initialSrcData,
},
dest: {
dataLayout: {
bytesPerRow: dstBlocksPerRow * bytesPerBlock,
rowsPerImage: dstBlockRowsPerImage,
offset: 0,
},
origin: appliedDstOffset,
data: expectedData,
},
});
// Upload `expectedData` to `expectedTexture`. If `copyTextureToTexture`
// worked then the contents of `dstTexture` should match `expectedTexture`
this.queue.writeTexture(
{ texture: expectedTexture, mipLevel: dstCopyLevel },
expectedData,
{
bytesPerRow: dstBlocksPerRow * bytesPerBlock,
rowsPerImage: dstBlockRowsPerImage,
},
dstTextureSizeAtLevel
);
this.expectTexturesToMatchByRendering(
dstTexture,
expectedTexture,
dstCopyLevel,
appliedDstOffset,
appliedSize
);
return;
}
// Copy the whole content of dstTexture at dstCopyLevel to dstBuffer.
const dstBufferDesc = {
size: dstBufferSize,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
@ -187,17 +267,20 @@ class F extends GPUTest {
const dstBuffer = this.device.createBuffer(dstBufferDesc);
this.trackForCleanup(dstBuffer);
encoder.copyTextureToBuffer(
{ texture: dstTexture, mipLevel: dstCopyLevel },
{
buffer: dstBuffer,
bytesPerRow: bytesPerDstAlignedBlockRow,
rowsPerImage: dstBlockRowsPerImage,
},
dstTextureSizeAtLevel
);
{
const encoder = this.device.createCommandEncoder();
encoder.copyTextureToBuffer(
{ texture: dstTexture, mipLevel: dstCopyLevel },
{
buffer: dstBuffer,
bytesPerRow: bytesPerDstAlignedBlockRow,
rowsPerImage: dstBlockRowsPerImage,
},
dstTextureSizeAtLevel
);
this.device.queue.submit([encoder.finish()]);
this.device.queue.submit([encoder.finish()]);
}
// Fill expectedUint8DataWithPadding with the expected data of dstTexture. The other values in
// expectedUint8DataWithPadding are kept 0 to check if the texels untouched by the copy are 0
@ -810,6 +893,7 @@ g.test('color_textures,compressed,non_array')
)
.beforeAllSubcases(t => {
const { srcFormat, dstFormat } = t.params;
t.skipIfCopyTextureToTextureNotSupportedForFormat(srcFormat, dstFormat);
t.selectDeviceOrSkipTestCase([
kTextureFormatInfo[srcFormat].feature,
kTextureFormatInfo[dstFormat].feature,
@ -963,7 +1047,7 @@ g.test('color_textures,compressed,array')
)
.beforeAllSubcases(t => {
const { srcFormat, dstFormat } = t.params;
t.skipIfCopyTextureToTextureNotSupportedForFormat(srcFormat, dstFormat);
t.selectDeviceOrSkipTestCase([
kTextureFormatInfo[srcFormat].feature,
kTextureFormatInfo[dstFormat].feature,

View file

@ -33,14 +33,9 @@
DoCopyTextureToBufferWithDepthAspectTest().
TODO: Expand tests of GPUExtent3D [1]
TODO: Fix this test for the various skipped formats [2]:
- snorm tests failing due to rounding
- float tests failing because float values are not byte-preserved
- compressed formats
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { assert, memcpy, unreachable } from '../../../../common/util/util.js';
import { assert, ErrorWithExtra, memcpy, unreachable } from '../../../../common/util/util.js';
import {
kMinDynamicBufferOffsetAlignment,
kBufferSizeAlignment,
@ -53,10 +48,13 @@ import {
depthStencilBufferTextureCopySupported,
textureDimensionAndFormatCompatible,
depthStencilFormatAspectSize,
isCompressedTextureFormat,
} from '../../../format_info.js';
import { GPUTest } from '../../../gpu_test.js';
import { GPUTest, TextureTestMixin } from '../../../gpu_test.js';
import { makeBufferWithContents } from '../../../util/buffer.js';
import { checkElementsEqual } from '../../../util/check_contents.js';
import { align } from '../../../util/math.js';
import { physicalMipSizeFromTexture } from '../../../util/texture/base.js';
import { DataArrayGenerator } from '../../../util/texture/data_generation.js';
import {
bytesInACompleteRow,
@ -64,6 +62,8 @@ import {
getTextureCopyLayout,
kBytesPerRowAlignment,
} from '../../../util/texture/layout.js';
import { TexelView } from '../../../util/texture/texel_view.js';
import { findFailedPixels } from '../../../util/texture/texture_ok.js';
/** Each combination of methods assume that the ones before it were tested and work correctly. */
const kMethodsToTest = [
@ -75,64 +75,10 @@ const kMethodsToTest = [
{ initMethod: 'WriteTexture', checkMethod: 'PartialCopyT2B' },
];
// [2]: Fix things so this list can be reduced to zero (see file description)
const kExcludedFormats = new Set([
'r8snorm',
'rg8snorm',
'rgba8snorm',
'rg11b10ufloat',
'rg16float',
'rgba16float',
'r32float',
'rg32float',
'rgba32float',
]);
const kWorkingColorTextureFormats = kColorTextureFormats.filter(x => !kExcludedFormats.has(x));
const dataGenerator = new DataArrayGenerator();
const altDataGenerator = new DataArrayGenerator();
class ImageCopyTest extends GPUTest {
/** Offset for a particular texel in the linear texture data */
getTexelOffsetInBytes(textureDataLayout, format, texel, origin = { x: 0, y: 0, z: 0 }) {
const { offset, bytesPerRow, rowsPerImage } = textureDataLayout;
const info = kTextureFormatInfo[format];
assert(texel.x >= origin.x && texel.y >= origin.y && texel.z >= origin.z);
assert(texel.x % info.blockWidth === 0);
assert(texel.y % info.blockHeight === 0);
assert(origin.x % info.blockWidth === 0);
assert(origin.y % info.blockHeight === 0);
const bytesPerImage = rowsPerImage * bytesPerRow;
return (
offset +
(texel.z - origin.z) * bytesPerImage +
((texel.y - origin.y) / info.blockHeight) * bytesPerRow +
((texel.x - origin.x) / info.blockWidth) * info.color.bytes
);
}
*iterateBlockRows(size, origin, format) {
if (size.width === 0 || size.height === 0 || size.depthOrArrayLayers === 0) {
// do not iterate anything for an empty region
return;
}
const info = kTextureFormatInfo[format];
assert(size.height % info.blockHeight === 0);
for (let y = 0; y < size.height; y += info.blockHeight) {
for (let z = 0; z < size.depthOrArrayLayers; ++z) {
yield {
x: origin.x,
y: origin.y + y,
z: origin.z + z,
};
}
}
}
class ImageCopyTest extends TextureTestMixin(GPUTest) {
/**
* This is used for testing passing undefined members of `GPUImageDataLayout` instead of actual
* values where possible. Passing arguments as values and not as objects so that they are passed
@ -203,6 +149,114 @@ class ImageCopyTest extends GPUTest {
}
}
/**
* Compares data in `expected` to data in `buffer.
* Areas defined by size and dataLayout are compared by interpreting the data as appropriate
* for the texture format. As an example, with 'rgb9e5ufloat' multiple values can
* represent the same number. For example, double the exponent and halving the
* mantissa. Areas outside the area defined by size and dataLayout are expected to match
* by binary comparison.
*/
expectGPUBufferValuesEqualWhenInterpretedAsTextureFormat(
expected,
buffer,
format,
size,
dataLayout
) {
if (isCompressedTextureFormat(format)) {
this.expectGPUBufferValuesEqual(buffer, expected);
return;
}
const regularFormat = format;
// data is in a format like this
//
// ....
// ttttt..
// ttttt..
// ttttt..
// .......
// ttttt..
// ttttt..
// ttttt...
//
// where the first `....` represents the portion of the buffer before
// `dataLayout.offset`. `ttttt` represents width (size[0]) and `..`
// represents the portion when `dataLayout.bytesPerRow` is greater than the
// data needed for width texels. `......` represents when height (size[1])
// is less than `dataLayout.rowsPerImage`. `...` represents any data past
// ((height - 1) * depth * bytePerRow + bytesPerRow) and the end of the
// buffer
const checkByTextureFormat = actual => {
const zero = { x: 0, y: 0, z: 0 };
// compare texel areas
{
const actTexelView = TexelView.fromTextureDataByReference(regularFormat, actual, {
bytesPerRow: dataLayout.bytesPerRow,
rowsPerImage: dataLayout.rowsPerImage,
subrectOrigin: [0, 0, 0],
subrectSize: size,
});
const expTexelView = TexelView.fromTextureDataByReference(regularFormat, expected, {
bytesPerRow: dataLayout.bytesPerRow,
rowsPerImage: dataLayout.rowsPerImage,
subrectOrigin: [0, 0, 0],
subrectSize: size,
});
const failedPixelsMessage = findFailedPixels(
regularFormat,
zero,
size,
{ actTexelView, expTexelView },
{
maxFractionalDiff: 0,
}
);
if (failedPixelsMessage !== undefined) {
const msg = 'Texture level had unexpected contents:\n' + failedPixelsMessage;
return new ErrorWithExtra(msg, () => ({
expTexelView,
actTexelView,
}));
}
}
// compare non texel areas
{
const rowLength = bytesInACompleteRow(size.width, format);
let lastOffset = 0;
for (const texel of this.iterateBlockRows(size, format)) {
const offset = this.getTexelOffsetInBytes(dataLayout, format, texel, zero);
const actualPart = actual.subarray(lastOffset, offset);
const expectedPart = expected.subarray(lastOffset, offset);
const error = checkElementsEqual(actualPart, expectedPart);
if (error) {
return error;
}
assert(offset >= lastOffset); // make sure iterateBlockRows always goes forward
lastOffset = offset + rowLength;
}
// compare end of buffers
{
const actualPart = actual.subarray(lastOffset, actual.length);
const expectedPart = expected.subarray(lastOffset, expected.length);
return checkElementsEqual(actualPart, expectedPart);
}
}
};
this.expectGPUBufferValuesPassCheck(buffer, checkByTextureFormat, {
srcByteOffset: 0,
type: Uint8Array,
typedLength: expected.length,
method: 'copy',
mode: 'fail',
});
}
/** Run a CopyT2B command with appropriate arguments corresponding to `ChangeBeforePass` */
copyTextureToBufferWithAppliedArguments(
buffer,
@ -312,12 +366,79 @@ class ImageCopyTest extends GPUTest {
}
}
generateMatchingTextureInJSRenderAndCompareContents(
{ texture: actualTexture, mipLevel: mipLevelOrUndefined, origin },
copySize,
format,
expected,
expectedDataLayout
) {
const size = [actualTexture.width, actualTexture.height, actualTexture.depthOrArrayLayers];
const expectedTexture = this.device.createTexture({
label: 'expectedTexture',
size,
dimension: actualTexture.dimension,
format,
mipLevelCount: actualTexture.mipLevelCount,
usage: actualTexture.usage,
});
this.trackForCleanup(expectedTexture);
const mipLevel = mipLevelOrUndefined || 0;
const fullMipLevelTextureCopyLayout = getTextureCopyLayout(
format,
actualTexture.dimension,
size,
{
mipLevel,
}
);
// allocate data for entire mip level.
const expectedTextureMipLevelData = new Uint8Array(
align(fullMipLevelTextureCopyLayout.byteLength, 4)
);
const mipSize = physicalMipSizeFromTexture(expectedTexture, mipLevel);
// update the data for the entire mip level with the data
// that would be copied to the "actual" texture
this.updateLinearTextureDataSubBox(format, copySize, {
src: {
dataLayout: expectedDataLayout,
origin: { x: 0, y: 0, z: 0 },
data: expected,
},
dest: {
dataLayout: { offset: 0, ...fullMipLevelTextureCopyLayout },
origin,
data: expectedTextureMipLevelData,
},
});
// MAINTENANCE_TODO: If we're testing writeTexture should this use copyBufferToTexture instead?
this.queue.writeTexture(
{ texture: expectedTexture, mipLevel },
expectedTextureMipLevelData,
{ ...fullMipLevelTextureCopyLayout, offset: 0 },
mipSize
);
this.expectTexturesToMatchByRendering(
actualTexture,
expectedTexture,
mipLevel,
origin,
copySize
);
}
/**
* We check an appropriate part of the texture against the given data.
* Used directly with PartialCopyT2B check method (for a subpart of the texture)
* and by `copyWholeTextureToBufferAndCheckContentsWithUpdatedData` with FullCopyT2B check method
* (for the whole texture). We also ensure that CopyT2B doesn't overwrite bytes it's not supposed
* to if validateOtherBytesInBuffer is set to true.
* (for the whole texture).
*/
copyPartialTextureToBufferAndCheckContents(
{ texture, mipLevel, origin },
@ -337,6 +458,14 @@ class ImageCopyTest extends GPUTest {
GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST
);
// At this point both buffer and bufferData have the same random data in
// them. We'll use copyTextureToBuffer to update buffer with data from the
// texture and updateLinearTextureDataSubBox to update bufferData with the
// data we originally uploaded to the texture.
// buffer has ...... in it.
// Copy to buffer the portion of texture that was previously uploaded.
// After execution buffer has t.t.t. because the rows are padded.
this.copyTextureToBufferWithAppliedArguments(
buffer,
expectedDataLayout,
@ -345,80 +474,32 @@ class ImageCopyTest extends GPUTest {
changeBeforePass
);
this.updateLinearTextureDataSubBox(
expectedDataLayout,
expectedDataLayout,
checkSize,
origin,
origin,
format,
bufferData,
expected
);
// We originally copied expected to texture using expectedDataLayout.
// We're copying back out of texture above.
this.expectGPUBufferValuesEqual(buffer, bufferData);
}
/**
* Copies the whole texture into linear data stored in a buffer for further checks.
*
* Used for `copyWholeTextureToBufferAndCheckContentsWithUpdatedData`.
*/
copyWholeTextureToNewBuffer({ texture, mipLevel }, resultDataLayout) {
const { mipSize, byteLength, bytesPerRow, rowsPerImage } = resultDataLayout;
const buffer = this.device.createBuffer({
size: align(byteLength, 4), // this is necessary because we need to copy and map data from this buffer
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
// bufferData has ...... in it.
// Update bufferData to have the same contents as buffer.
// When done, bufferData now has t.t.t. because the rows are padded.
this.updateLinearTextureDataSubBox(format, checkSize, {
src: {
dataLayout: expectedDataLayout,
origin: { x: 0, y: 0, z: 0 },
data: expected,
},
dest: {
dataLayout: expectedDataLayout,
origin: { x: 0, y: 0, z: 0 },
data: bufferData,
},
});
this.trackForCleanup(buffer);
const encoder = this.device.createCommandEncoder();
encoder.copyTextureToBuffer(
{ texture, mipLevel },
{ buffer, bytesPerRow, rowsPerImage },
mipSize
this.expectGPUBufferValuesEqualWhenInterpretedAsTextureFormat(
bufferData,
buffer,
format,
checkSize,
expectedDataLayout
);
this.device.queue.submit([encoder.finish()]);
return buffer;
}
/**
* Takes the data returned by `copyWholeTextureToNewBuffer` and updates it after a copy operation
* on the texture by emulating the copy behaviour here directly.
*/
updateLinearTextureDataSubBox(
destinationDataLayout,
sourceDataLayout,
copySize,
destinationOrigin,
sourceOrigin,
format,
destination,
source
) {
for (const texel of this.iterateBlockRows(copySize, sourceOrigin, format)) {
const srcOffsetElements = this.getTexelOffsetInBytes(
sourceDataLayout,
format,
texel,
sourceOrigin
);
const dstOffsetElements = this.getTexelOffsetInBytes(
destinationDataLayout,
format,
texel,
destinationOrigin
);
const rowLength = bytesInACompleteRow(copySize.width, format);
memcpy(
{ src: source, start: srcOffsetElements, length: rowLength },
{ dst: destination, start: dstOffsetElements }
);
}
}
/**
@ -449,17 +530,18 @@ class ImageCopyTest extends GPUTest {
// other eventual async expectations to ensure it will be correct.
this.eventualAsyncExpectation(async () => {
const readback = await readbackPromise;
this.updateLinearTextureDataSubBox(
{ offset: 0, ...fullTextureCopyLayout },
texturePartialDataLayout,
copySize,
destinationOrigin,
origin,
format,
readback.data,
partialData
);
this.updateLinearTextureDataSubBox(format, copySize, {
dest: {
dataLayout: { offset: 0, ...fullTextureCopyLayout },
origin,
data: readback.data,
},
src: {
dataLayout: texturePartialDataLayout,
origin: { x: 0, y: 0, z: 0 },
data: partialData,
},
});
this.copyPartialTextureToBufferAndCheckContents(
{ texture, mipLevel, origin: destinationOrigin },
{ width: mipSize[0], height: mipSize[1], depthOrArrayLayers: mipSize[2] },
@ -495,7 +577,7 @@ class ImageCopyTest extends GPUTest {
format,
dimension,
mipLevelCount: mipLevel + 1,
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST | GPUTextureUsage.TEXTURE_BINDING,
});
this.trackForCleanup(texture);
@ -512,27 +594,27 @@ class ImageCopyTest extends GPUTest {
changeBeforePass
);
this.copyPartialTextureToBufferAndCheckContents(
{ texture, mipLevel, origin },
copySize,
format,
data,
textureDataLayout,
changeBeforePass
);
if (this.canCallCopyTextureToBufferWithTextureFormat(texture.format)) {
this.copyPartialTextureToBufferAndCheckContents(
{ texture, mipLevel, origin },
copySize,
format,
data,
textureDataLayout,
changeBeforePass
);
} else {
this.generateMatchingTextureInJSRenderAndCompareContents(
{ texture, mipLevel, origin },
copySize,
format,
data,
textureDataLayout
);
}
break;
}
case 'FullCopyT2B': {
const fullTextureCopyLayout = getTextureCopyLayout(format, dimension, textureSize, {
mipLevel,
});
const fullData = this.copyWholeTextureToNewBuffer(
{ texture, mipLevel },
fullTextureCopyLayout
);
this.uploadLinearTextureDataToTextureSubBox(
{ texture, mipLevel, origin },
textureDataLayout,
@ -542,16 +624,36 @@ class ImageCopyTest extends GPUTest {
changeBeforePass
);
this.copyWholeTextureToBufferAndCheckContentsWithUpdatedData(
{ texture, mipLevel, origin },
fullTextureCopyLayout,
textureDataLayout,
copySize,
format,
fullData,
data
);
if (this.canCallCopyTextureToBufferWithTextureFormat(texture.format)) {
const fullTextureCopyLayout = getTextureCopyLayout(format, dimension, textureSize, {
mipLevel,
});
const fullData = this.copyWholeTextureToNewBuffer(
{ texture, mipLevel },
fullTextureCopyLayout
);
this.copyWholeTextureToBufferAndCheckContentsWithUpdatedData(
{ texture, mipLevel, origin },
fullTextureCopyLayout,
textureDataLayout,
copySize,
format,
fullData,
data
);
} else {
this.generateMatchingTextureInJSRenderAndCompareContents(
{ texture, mipLevel, origin },
copySize,
format,
data,
textureDataLayout
//fullTextureCopyLayout,
//fullData,
);
}
break;
}
default:
@ -1224,7 +1326,7 @@ bytes in copy works for every format.
.params(u =>
u
.combineWithParams(kMethodsToTest)
.combine('format', kWorkingColorTextureFormats)
.combine('format', kColorTextureFormats)
.filter(formatCanBeTested)
.combine('dimension', kTextureDimensions)
.filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
@ -1239,6 +1341,7 @@ bytes in copy works for every format.
)
.beforeAllSubcases(t => {
const info = kTextureFormatInfo[t.params.format];
t.skipIfTextureFormatNotSupported(t.params.format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {
@ -1324,7 +1427,7 @@ works for every format with 2d and 2d-array textures.
.params(u =>
u
.combineWithParams(kMethodsToTest)
.combine('format', kWorkingColorTextureFormats)
.combine('format', kColorTextureFormats)
.filter(formatCanBeTested)
.combine('dimension', kTextureDimensions)
.filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
@ -1335,6 +1438,7 @@ works for every format with 2d and 2d-array textures.
)
.beforeAllSubcases(t => {
const info = kTextureFormatInfo[t.params.format];
t.skipIfTextureFormatNotSupported(t.params.format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {
@ -1396,7 +1500,7 @@ for all formats. We pass origin and copyExtent as [number, number, number].`
.params(u =>
u
.combineWithParams(kMethodsToTest)
.combine('format', kWorkingColorTextureFormats)
.combine('format', kColorTextureFormats)
.filter(formatCanBeTested)
.combine('dimension', kTextureDimensions)
.filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
@ -1414,6 +1518,7 @@ for all formats. We pass origin and copyExtent as [number, number, number].`
)
.beforeAllSubcases(t => {
const info = kTextureFormatInfo[t.params.format];
t.skipIfTextureFormatNotSupported(t.params.format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {
@ -1551,7 +1656,7 @@ TODO: Make a variant for depth-stencil formats.
.params(u =>
u
.combineWithParams(kMethodsToTest)
.combine('format', kWorkingColorTextureFormats)
.combine('format', kColorTextureFormats)
.filter(formatCanBeTested)
.combine('dimension', ['2d', '3d'])
.filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
@ -1604,6 +1709,7 @@ TODO: Make a variant for depth-stencil formats.
)
.beforeAllSubcases(t => {
const info = kTextureFormatInfo[t.params.format];
t.skipIfTextureFormatNotSupported(t.params.format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {

View file

@ -4,18 +4,11 @@
Basic command buffer compute tests.
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { kLimitInfo } from '../../../capability_info.js';
import { GPUTest } from '../../../gpu_test.js';
import { checkElementsEqualGenerated } from '../../../util/check_contents.js';
export const g = makeTestGroup(GPUTest);
const kMaxComputeWorkgroupSize = [
kLimitInfo.maxComputeWorkgroupSizeX.default,
kLimitInfo.maxComputeWorkgroupSizeY.default,
kLimitInfo.maxComputeWorkgroupSizeZ.default,
];
g.test('memcpy').fn(t => {
const data = new Uint32Array([0x01020304]);
@ -73,28 +66,33 @@ g.test('large_dispatch')
.params(u =>
u
// Reasonably-sized powers of two, and some stranger larger sizes.
.combine('dispatchSize', [
256,
2048,
315,
628,
2179,
kLimitInfo.maxComputeWorkgroupsPerDimension.default,
])
.combine('dispatchSize', [256, 2048, 315, 628, 2179, 'maximum'])
// Test some reasonable workgroup sizes.
.beginSubcases()
// 0 == x axis; 1 == y axis; 2 == z axis.
.combine('largeDimension', [0, 1, 2])
.expand('workgroupSize', p => [1, 2, 8, 32, kMaxComputeWorkgroupSize[p.largeDimension]])
.expand('workgroupSize', p => [1, 2, 8, 32, 'maximum'])
)
.fn(t => {
// The output storage buffer is filled with this value.
const val = 0x01020304;
const badVal = 0xbaadf00d;
const wgSize = t.params.workgroupSize;
const bufferLength = t.params.dispatchSize * wgSize;
const kMaxComputeWorkgroupSize = [
t.device.limits.maxComputeWorkgroupSizeX,
t.device.limits.maxComputeWorkgroupSizeY,
t.device.limits.maxComputeWorkgroupSizeZ,
];
const wgSize =
t.params.workgroupSize === 'maximum'
? kMaxComputeWorkgroupSize[t.params.largeDimension]
: t.params.workgroupSize;
const dispatchSize =
t.params.dispatchSize === 'maximum'
? t.device.limits.maxComputeWorkgroupsPerDimension
: t.params.dispatchSize;
const bufferLength = dispatchSize * wgSize;
const bufferByteSize = Uint32Array.BYTES_PER_ELEMENT * bufferLength;
const dst = t.device.createBuffer({
size: bufferByteSize,
@ -104,9 +102,9 @@ g.test('large_dispatch')
// Only use one large dimension and workgroup size in the dispatch
// call to keep the size of the test reasonable.
const dims = [1, 1, 1];
dims[t.params.largeDimension] = t.params.dispatchSize;
dims[t.params.largeDimension] = dispatchSize;
const wgSizes = [1, 1, 1];
wgSizes[t.params.largeDimension] = t.params.workgroupSize;
wgSizes[t.params.largeDimension] = wgSize;
const pipeline = t.device.createComputePipeline({
layout: 'auto',
compute: {

View file

@ -151,6 +151,9 @@ g.test('render_pass_store_op,color_attachment_only')
.combine('mipLevel', kMipLevel)
.combine('arrayLayer', kArrayLayers)
)
.beforeAllSubcases(t => {
t.skipIfTextureFormatNotSupported(t.params.colorFormat);
})
.fn(t => {
const colorAttachment = t.device.createTexture({
format: t.params.colorFormat,

View file

@ -5,8 +5,11 @@
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { range } from '../../../../common/util/util.js';
import { kLimitInfo } from '../../../capability_info.js';
import { kRenderableColorTextureFormats, kTextureFormatInfo } from '../../../format_info.js';
import {
computeBytesPerSampleFromFormats,
kRenderableColorTextureFormats,
kTextureFormatInfo,
} from '../../../format_info.js';
import { GPUTest, TextureTestMixin } from '../../../gpu_test.js';
import { getFragmentShaderCodeWithOutput, getPlainTypeInfo } from '../../../util/shader.js';
import { kTexelRepresentationInfo } from '../../../util/texture/texel_data.js';
@ -28,11 +31,14 @@ export const g = makeTestGroup(TextureTestMixin(GPUTest));
// Values to write into each attachment
// We make values different for each attachment index and each channel
// to make sure they didn't get mixed up
// Clamp alpha to 3 to avoid comparing a large expected value with a max 3 value for rgb10a2uint
// MAINTENANCE_TODO: Make TexelRepresentation.numericRange per-component and use that.
const attachmentsIntWriteValues = [
{ R: 1, G: 2, B: 3, A: 4 },
{ R: 5, G: 6, B: 7, A: 8 },
{ R: 9, G: 10, B: 11, A: 12 },
{ R: 13, G: 14, B: 15, A: 16 },
{ R: 1, G: 2, B: 3, A: 1 },
{ R: 5, G: 6, B: 7, A: 2 },
{ R: 9, G: 10, B: 11, A: 3 },
{ R: 13, G: 14, B: 15, A: 0 },
];
const attachmentsFloatWriteValues = [
@ -49,18 +55,11 @@ g.test('color,attachments')
.combine('format', kRenderableColorTextureFormats)
.beginSubcases()
.combine('attachmentCount', [2, 3, 4])
.filter(t => {
// We only need to test formats that have a valid color attachment bytes per sample.
const pixelByteCost = kTextureFormatInfo[t.format].colorRender?.byteCost;
return (
pixelByteCost !== undefined &&
pixelByteCost * t.attachmentCount <= kLimitInfo.maxColorAttachmentBytesPerSample.default
);
})
.expand('emptyAttachmentId', p => range(p.attachmentCount, i => i))
)
.beforeAllSubcases(t => {
const info = kTextureFormatInfo[t.params.format];
t.skipIfTextureFormatNotSupported(t.params.format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {
@ -68,6 +67,14 @@ g.test('color,attachments')
const componentCount = kTexelRepresentationInfo[format].componentOrder.length;
const info = kTextureFormatInfo[format];
// We only need to test formats that have a valid color attachment bytes per sample.
const pixelByteCost = kTextureFormatInfo[format].colorRender?.byteCost;
t.skipIf(
pixelByteCost === undefined ||
computeBytesPerSampleFromFormats(range(attachmentCount, () => format)) >
t.device.limits.maxColorAttachmentBytesPerSample
);
const writeValues =
info.color.type === 'sint' || info.color.type === 'uint'
? attachmentsIntWriteValues
@ -156,6 +163,7 @@ g.test('color,component_count')
)
.beforeAllSubcases(t => {
const info = kTextureFormatInfo[t.params.format];
t.skipIfTextureFormatNotSupported(t.params.format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {

View file

@ -262,6 +262,9 @@ struct FragmentOutput2 {
class F extends TextureTestMixin(GPUTest) {
async init() {
await super.init();
if (this.isCompatibility) {
this.skip('WGSL sample_mask is not supported in compatibility mode');
}
// Create a 2x2 color texture to sample from
// texel 0 - Red
// texel 1 - Green

View file

@ -322,6 +322,9 @@ g.test('blending,formats')
u //
.combine('format', kBlendableFormats)
)
.beforeAllSubcases(t => {
t.skipIfTextureFormatNotSupported(t.params.format);
})
.fn(t => {
const { format } = t.params;

View file

@ -423,9 +423,10 @@ g.test('vertex_attributes,basic')
const vertexCount = 4;
const instanceCount = 4;
const attributesPerVertexBuffer =
t.params.vertex_attribute_count / t.params.vertex_buffer_count;
assert(Math.round(attributesPerVertexBuffer) === attributesPerVertexBuffer);
// In compat mode, @builtin(vertex_index) and @builtin(instance_index) each take an attribute.
const maxAttributes = t.device.limits.maxVertexAttributes - (t.isCompatibility ? 2 : 0);
const numAttributes = Math.min(maxAttributes, t.params.vertex_attribute_count);
const maxAttributesPerVertexBuffer = Math.ceil(numAttributes / t.params.vertex_buffer_count);
let shaderLocation = 0;
let attributeValue = 0;
@ -470,7 +471,12 @@ g.test('vertex_attributes,basic')
}
const attributes = [];
for (let a = 0; a < attributesPerVertexBuffer; ++a) {
const numAttributesForBuffer = Math.min(
maxAttributesPerVertexBuffer,
maxAttributes - b * maxAttributesPerVertexBuffer
);
for (let a = 0; a < numAttributesForBuffer; ++a) {
const attribute = {
format: t.params.vertex_format,
shaderLocation,
@ -483,7 +489,7 @@ g.test('vertex_attributes,basic')
}
for (let v = 0; v < vertexOrInstanceCount; ++v) {
for (let a = 0; a < attributesPerVertexBuffer; ++a) {
for (let a = 0; a < numAttributesForBuffer; ++a) {
vertexBufferValues.push(attributeValue);
attributeValue += 1.234; // Values will get rounded later if we make a Uint32Array.
}
@ -564,7 +570,7 @@ g.test('vertex_attributes,basic')
let accumulateVariableDeclarationsInFragmentShader = '';
let accumulateVariableAssignmentsInFragmentShader = '';
// The remaining 3 vertex attributes
if (t.params.vertex_attribute_count === 16) {
if (numAttributes === 16) {
accumulateVariableDeclarationsInVertexShader = `
@location(13) @interpolate(flat) outAttrib13 : vec4<${wgslFormat}>,
`;
@ -581,6 +587,21 @@ g.test('vertex_attributes,basic')
outBuffer.primitives[input.primitiveId].attrib14 = input.attrib13.z;
outBuffer.primitives[input.primitiveId].attrib15 = input.attrib13.w;
`;
} else if (numAttributes === 14) {
accumulateVariableDeclarationsInVertexShader = `
@location(13) @interpolate(flat) outAttrib13 : vec4<${wgslFormat}>,
`;
accumulateVariableAssignmentsInVertexShader = `
output.outAttrib13 =
vec4<${wgslFormat}>(input.attrib12, input.attrib13, 0, 0);
`;
accumulateVariableDeclarationsInFragmentShader = `
@location(13) @interpolate(flat) attrib13 : vec4<${wgslFormat}>,
`;
accumulateVariableAssignmentsInFragmentShader = `
outBuffer.primitives[input.primitiveId].attrib12 = input.attrib13.x;
outBuffer.primitives[input.primitiveId].attrib13 = input.attrib13.y;
`;
}
const pipeline = t.device.createRenderPipeline({

View file

@ -543,6 +543,7 @@ export const g = makeTestGroup(TextureZeroInitTest);
g.test('uninitialized_texture_is_zero')
.params(kTestParams)
.beforeAllSubcases(t => {
t.skipIfTextureFormatNotSupported(t.params.format);
t.selectDeviceOrSkipTestCase(kTextureFormatInfo[t.params.format].feature);
})
.fn(t => {

View file

@ -99,6 +99,10 @@ g.test('texture_binding')
({ format, viewFormat }) => format !== viewFormat && viewCompatible(format, viewFormat)
)
)
.beforeAllSubcases(t => {
const { format, viewFormat } = t.params;
t.skipIfTextureFormatNotSupported(format, viewFormat);
})
.fn(t => {
const { format, viewFormat } = t.params;
@ -197,6 +201,10 @@ in view format and match in base format.`
)
.combine('sampleCount', [1, 4])
)
.beforeAllSubcases(t => {
const { format, viewFormat } = t.params;
t.skipIfTextureFormatNotSupported(format, viewFormat);
})
.fn(t => {
const { format, viewFormat, sampleCount } = t.params;

View file

@ -1,15 +1,18 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
TODO: Test more corner case values for Float16 / Float32 (INF, NaN, +-0, ...) and reduce the
TODO: Test more corner case values for Float16 / Float32 (INF, NaN, ...) and reduce the
float tolerance.
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { assert, memcpy, unreachable } from '../../../../common/util/util.js';
import {
kMaxVertexAttributes,
kMaxVertexBufferArrayStride,
kMaxVertexBuffers,
assert,
filterUniqueValueTestVariants,
makeValueTestVariant,
memcpy,
unreachable,
} from '../../../../common/util/util.js';
import {
kPerStageBindingLimits,
kVertexFormatInfo,
kVertexFormats,
@ -59,8 +62,11 @@ class VertexStateTest extends GPUTest {
// than maxVertexAttributes = 16.
// However this might not work in the future for implementations that allow even more vertex
// attributes so there will need to be larger changes when that happens.
const maxUniformBuffers = kPerStageBindingLimits['uniformBuf'].max;
assert(maxUniformBuffers + kPerStageBindingLimits['storageBuf'].max >= kMaxVertexAttributes);
const maxUniformBuffers = this.getDefaultLimit(kPerStageBindingLimits['uniformBuf'].maxLimit);
assert(
maxUniformBuffers + this.getDefaultLimit(kPerStageBindingLimits['storageBuf'].maxLimit) >=
this.device.limits.maxVertexAttributes
);
let vsInputs = '';
let vsChecks = '';
@ -138,7 +144,7 @@ fn check(success : bool) {
}
fn floatsSimilar(a : f32, b : f32, tolerance : f32) -> bool {
// TODO do we check for + and - 0?
// Note: -0.0 and 0.0 have different bit patterns, but compare as equal.
return abs(a - b) < tolerance;
}
@ -257,7 +263,8 @@ struct VSOutputs {
switch (formatInfo.type) {
case 'float': {
const data = [42.42, 0.0, 1.0, -1.0, 1000, -18.7, 25.17];
// -0.0 and +0.0 have different bit patterns, but compare as equal.
const data = [42.42, 0.0, -0.0, 1.0, -1.0, 1000, -18.7, 25.17];
const expectedData = new Float32Array(data).buffer;
const vertexData =
bitSize === 32
@ -564,11 +571,21 @@ g.test('vertex_format_to_shader_format_conversion')
.combine('format', kVertexFormats)
.combine('shaderComponentCount', [1, 2, 3, 4])
.beginSubcases()
.combine('slot', [0, 1, kMaxVertexBuffers - 1])
.combine('shaderLocation', [0, 1, kMaxVertexAttributes - 1])
.combine('slotVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
])
.combine('shaderLocationVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
])
)
.fn(t => {
const { format, shaderComponentCount, slot, shaderLocation } = t.params;
const { format, shaderComponentCount, slotVariant, shaderLocationVariant } = t.params;
const slot = t.makeLimitVariant('maxVertexBuffers', slotVariant);
const shaderLocation = t.makeLimitVariant('maxVertexAttributes', shaderLocationVariant);
t.runTest([
{
slot,
@ -647,30 +664,40 @@ g.test('non_zero_array_stride_and_attribute_offset')
u //
.combine('format', kVertexFormats)
.beginSubcases()
.expand('arrayStride', p => {
.expand('arrayStrideVariant', p => {
const formatInfo = kVertexFormatInfo[p.format];
const formatSize = formatInfo.bytesPerComponent * formatInfo.componentCount;
return [align(formatSize, 4), align(formatSize, 4) + 4, kMaxVertexBufferArrayStride];
return [
{ mult: 0, add: align(formatSize, 4) },
{ mult: 0, add: align(formatSize, 4) + 4 },
{ mult: 1, add: 0 },
];
})
.expand('offset', p => {
.expand('offsetVariant', p => {
const formatInfo = kVertexFormatInfo[p.format];
const formatSize = formatInfo.bytesPerComponent * formatInfo.componentCount;
return new Set(
[
0,
formatSize,
4,
p.arrayStride / 2,
p.arrayStride - formatSize * 2,
p.arrayStride - formatSize - 4,
p.arrayStride - formatSize,
].map(offset => clamp(offset, { min: 0, max: p.arrayStride - formatSize }))
);
return [
{ mult: 0, add: 0 },
{ mult: 0, add: formatSize },
{ mult: 0, add: 4 },
{ mult: 0.5, add: 0 },
{ mult: 1, add: -formatSize * 2 },
{ mult: 1, add: -formatSize - 4 },
{ mult: 1, add: -formatSize },
];
})
)
.fn(t => {
const { format, arrayStride, offset } = t.params;
const { format, arrayStrideVariant, offsetVariant } = t.params;
const arrayStride = t.makeLimitVariant('maxVertexBufferArrayStride', arrayStrideVariant);
const formatInfo = kVertexFormatInfo[format];
const formatSize = formatInfo.bytesPerComponent * formatInfo.componentCount;
const offset = clamp(makeValueTestVariant(arrayStride, offsetVariant), {
min: 0,
max: arrayStride - formatSize,
});
t.runTest([
{
slot: 0,
@ -728,11 +755,16 @@ g.test('vertex_buffer_used_multiple_times_overlapped')
u //
.combine('format', kVertexFormats)
.beginSubcases()
.combine('vbCount', [2, 3, kMaxVertexBuffers])
.combine('vbCountVariant', [
{ mult: 0, add: 2 },
{ mult: 0, add: 3 },
{ mult: 1, add: 0 },
])
.combine('additionalVBOffset', [0, 4, 120])
)
.fn(t => {
const { format, vbCount, additionalVBOffset } = t.params;
const { format, vbCountVariant, additionalVBOffset } = t.params;
const vbCount = t.makeLimitVariant('maxVertexBuffers', vbCountVariant);
const kVertexCount = 20;
const kInstanceCount = 1;
const formatInfo = kVertexFormatInfo[format];
@ -830,11 +862,16 @@ g.test('vertex_buffer_used_multiple_times_interleaved')
u //
.combine('format', kVertexFormats)
.beginSubcases()
.combine('vbCount', [2, 3, kMaxVertexBuffers])
.combine('vbCountVariant', [
{ mult: 0, add: 2 },
{ mult: 0, add: 3 },
{ mult: 1, add: 0 },
])
.combine('additionalVBOffset', [0, 4, 120])
)
.fn(t => {
const { format, vbCount, additionalVBOffset } = t.params;
const { format, vbCountVariant, additionalVBOffset } = t.params;
const vbCount = t.makeLimitVariant('maxVertexBuffers', vbCountVariant);
const kVertexCount = 20;
const kInstanceCount = 1;
const formatInfo = kVertexFormatInfo[format];
@ -910,13 +947,17 @@ g.test('max_buffers_and_attribs')
.params(u => u.combine('format', kVertexFormats))
.fn(t => {
const { format } = t.params;
const attributesPerBuffer = Math.ceil(kMaxVertexAttributes / kMaxVertexBuffers);
// In compat mode, @builtin(vertex_index) and @builtin(instance_index) each take an attribute
const maxVertexBuffers = t.device.limits.maxVertexBuffers;
const deviceMaxVertexAttributes = t.device.limits.maxVertexAttributes;
const maxVertexAttributes = deviceMaxVertexAttributes - (t.isCompatibility ? 2 : 0);
const attributesPerBuffer = Math.ceil(maxVertexAttributes / maxVertexBuffers);
let attributesEmitted = 0;
const state = [];
for (let i = 0; i < kMaxVertexBuffers; i++) {
for (let i = 0; i < maxVertexBuffers; i++) {
const attributes = [];
for (let j = 0; j < attributesPerBuffer && attributesEmitted < kMaxVertexAttributes; j++) {
for (let j = 0; j < attributesPerBuffer && attributesEmitted < maxVertexAttributes; j++) {
attributes.push({ format, offset: 0, shaderLocation: attributesEmitted });
attributesEmitted++;
}
@ -941,25 +982,26 @@ g.test('array_stride_zero')
.combine('format', kVertexFormats)
.beginSubcases()
.combine('stepMode', ['vertex', 'instance'])
.expand('offset', p => {
.expand('offsetVariant', p => {
const formatInfo = kVertexFormatInfo[p.format];
const formatSize = formatInfo.bytesPerComponent * formatInfo.componentCount;
return new Set([
0,
4,
8,
formatSize,
formatSize * 2,
kMaxVertexBufferArrayStride / 2,
kMaxVertexBufferArrayStride - formatSize - 4,
kMaxVertexBufferArrayStride - formatSize - 8,
kMaxVertexBufferArrayStride - formatSize,
kMaxVertexBufferArrayStride - formatSize * 2,
return filterUniqueValueTestVariants([
{ mult: 0, add: 0 },
{ mult: 0, add: 4 },
{ mult: 0, add: 8 },
{ mult: 0, add: formatSize },
{ mult: 0, add: formatSize * 2 },
{ mult: 0.5, add: 0 },
{ mult: 1, add: -formatSize - 4 },
{ mult: 1, add: -formatSize - 8 },
{ mult: 1, add: -formatSize },
{ mult: 1, add: -formatSize * 2 },
]);
})
)
.fn(t => {
const { format, stepMode, offset } = t.params;
const { format, stepMode, offsetVariant } = t.params;
const offset = t.makeLimitVariant('maxVertexBufferArrayStride', offsetVariant);
const kCount = 10;
// Create the stride 0 part of the test, first by faking a single vertex being drawn and
@ -1024,7 +1066,7 @@ g.test('discontiguous_location_and_attribs')
.fn(t => {
t.runTest([
{
slot: kMaxVertexBuffers - 1,
slot: t.device.limits.maxVertexBuffers - 1,
arrayStride: 4,
stepMode: 'vertex',
attributes: [
@ -1037,7 +1079,13 @@ g.test('discontiguous_location_and_attribs')
arrayStride: 16,
stepMode: 'instance',
vbOffset: 1000,
attributes: [{ format: 'uint32x4', offset: 0, shaderLocation: kMaxVertexAttributes - 1 }],
attributes: [
{
format: 'uint32x4',
offset: 0,
shaderLocation: t.device.limits.maxVertexAttributes - 1,
},
],
},
]);
});
@ -1051,8 +1099,10 @@ g.test('overlapping_attributes')
.fn(t => {
const { format } = t.params;
// In compat mode, @builtin(vertex_index) and @builtin(instance_index) each take an attribute
const maxVertexAttributes = t.device.limits.maxVertexAttributes - (t.isCompatibility ? 2 : 0);
const attributes = [];
for (let i = 0; i < kMaxVertexAttributes; i++) {
for (let i = 0; i < maxVertexAttributes; i++) {
attributes.push({ format, offset: 0, shaderLocation: i });
}

View file

@ -9,7 +9,6 @@ import {
kAllBufferUsageBits,
kBufferSizeAlignment,
kBufferUsages,
kLimitInfo,
} from '../../../capability_info.js';
import { GPUConst } from '../../../constants.js';
import { kMaxSafeMultipleOf8 } from '../../../util/math.js';
@ -47,18 +46,11 @@ g.test('size')
g.test('limit')
.desc('Test buffer size is validated against maxBufferSize.')
.params(u =>
u
.beginSubcases()
.combine('size', [
kLimitInfo.maxBufferSize.default - 1,
kLimitInfo.maxBufferSize.default,
kLimitInfo.maxBufferSize.default + 1,
])
)
.params(u => u.beginSubcases().combine('sizeAddition', [-1, 0, +1]))
.fn(t => {
const { size } = t.params;
const isValid = size <= kLimitInfo.maxBufferSize.default;
const { sizeAddition } = t.params;
const size = t.makeLimitVariant('maxBufferSize', { mult: 1, add: sizeAddition });
const isValid = size <= t.device.limits.maxBufferSize;
const usage = BufferUsage.COPY_SRC;
t.expectGPUError('validation', () => t.device.createBuffer({ size, usage }), !isValid);
});

View file

@ -4,10 +4,8 @@
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { getGPU } from '../../../../../common/util/navigator_gpu.js';
import { assert, range, reorder } from '../../../../../common/util/util.js';
import { kLimitInfo } from '../../../../capability_info.js';
import { kTextureFormatInfo } from '../../../../format_info.js';
import { getDefaultLimitsForAdapter } from '../../../../capability_info.js';
import { GPUTestBase } from '../../../../gpu_test.js';
import { align } from '../../../../util/math.js';
export const kCreatePipelineTypes = [
'createRenderPipeline',
@ -65,19 +63,6 @@ function getWGSLBindings(order, bindGroupTest, storageDefinitionWGSLSnippetFn, n
).join('\n ');
}
/**
* Given an array of GPUColorTargetState return the number of bytes per sample
*/
export function computeBytesPerSample(targets) {
let bytesPerSample = 0;
for (const { format } of targets) {
const info = kTextureFormatInfo[format];
const alignedBytesPerSample = align(bytesPerSample, info.colorRender.alignment);
bytesPerSample = alignedBytesPerSample + info.colorRender.byteCost;
}
return bytesPerSample;
}
export function getPerStageWGSLForBindingCombinationImpl(
bindingCombination,
order,
@ -251,8 +236,9 @@ export const kMinimumLimitValueTests = [
'underMinimum',
];
export function getDefaultLimit(limit) {
return kLimitInfo[limit].default;
export function getDefaultLimitForAdapter(adapter, limit) {
const limitInfo = getDefaultLimitsForAdapter(adapter);
return limitInfo[limit].default;
}
const kMinimumLimits = new Set([
@ -283,7 +269,7 @@ export class LimitTestsImpl extends GPUTestBase {
const gpu = getGPU(this.rec);
this._adapter = await gpu.requestAdapter();
const limit = this.limit;
this.defaultLimit = getDefaultLimit(limit);
this.defaultLimit = getDefaultLimitForAdapter(this.adapter, limit);
this.adapterLimit = this.adapter.limits[limit];
assert(!Number.isNaN(this.defaultLimit));
assert(!Number.isNaN(this.adapterLimit));
@ -311,7 +297,7 @@ export class LimitTestsImpl extends GPUTestBase {
getDefaultOrAdapterLimit(limit, limitMode) {
switch (limitMode) {
case 'defaultLimit':
return getDefaultLimit(limit);
return getDefaultLimitForAdapter(this.adapter, limit);
case 'adapterLimit':
return this.adapter.limits[limit];
}
@ -332,7 +318,9 @@ export class LimitTestsImpl extends GPUTestBase {
for (const [extraLimitStr, limitMode] of Object.entries(extraLimits)) {
const extraLimit = extraLimitStr;
requiredLimits[extraLimit] =
limitMode === 'defaultLimit' ? getDefaultLimit(extraLimit) : adapter.limits[extraLimit];
limitMode === 'defaultLimit'
? getDefaultLimitForAdapter(adapter, extraLimit)
: adapter.limits[extraLimit];
}
}

View file

@ -1,7 +1,8 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { range } from '../../../../../common/util/util.js';
import { kMaximumLimitBaseParams, getDefaultLimit, makeLimitTestGroup } from './limit_utils.js';
import { kMaxColorAttachmentsToTest } from '../../../../capability_info.js';
import { kMaximumLimitBaseParams, makeLimitTestGroup } from './limit_utils.js';
function getPipelineDescriptor(device, testValue) {
const code = `
@ -106,9 +107,19 @@ g.test('validate,maxColorAttachmentBytesPerSample')
.desc(`Test ${limit} against maxColorAttachmentBytesPerSample`)
.fn(t => {
const { adapter, defaultLimit, adapterLimit: maximumLimit } = t;
const minColorAttachmentBytesPerSample = getDefaultLimit('maxColorAttachmentBytesPerSample');
const minColorAttachmentBytesPerSample = t.getDefaultLimit('maxColorAttachmentBytesPerSample');
// The smallest attachment is 1 byte
// so make sure maxColorAttachments < maxColorAttachmentBytesPerSample
t.expect(defaultLimit <= minColorAttachmentBytesPerSample);
t.expect(maximumLimit <= adapter.limits.maxColorAttachmentBytesPerSample);
});
g.test('validate,kMaxColorAttachmentsToTest')
.desc(
`
Tests that kMaxColorAttachmentsToTest is large enough to test the limits of this device
`
)
.fn(t => {
t.expect(t.adapter.limits.maxColorAttachments <= kMaxColorAttachmentsToTest);
});

View file

@ -1,10 +1,6 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import {
kMaximumLimitBaseParams,
getDefaultLimit,
makeLimitTestGroup,
} from './limit_utils.js';
**/ import { kMaximumLimitBaseParams, makeLimitTestGroup } from './limit_utils.js';
/**
* Given a 3 dimensional size, and a limit, compute
@ -71,11 +67,11 @@ function getDeviceLimitToRequest(limitValueTest, defaultLimit, maximumLimit) {
}
}
function getTestWorkgroupSize(testValueName, requestedLimit) {
function getTestWorkgroupSize(t, testValueName, requestedLimit) {
const maxDimensions = [
getDefaultLimit('maxComputeWorkgroupSizeX'),
getDefaultLimit('maxComputeWorkgroupSizeY'),
getDefaultLimit('maxComputeWorkgroupSizeZ'),
t.getDefaultLimit('maxComputeWorkgroupSizeX'),
t.getDefaultLimit('maxComputeWorkgroupSizeY'),
t.getDefaultLimit('maxComputeWorkgroupSizeZ'),
];
switch (testValueName) {
@ -87,13 +83,14 @@ function getTestWorkgroupSize(testValueName, requestedLimit) {
}
function getDeviceLimitToRequestAndValueToTest(
t,
limitValueTest,
testValueName,
defaultLimit,
maximumLimit
) {
const requestedLimit = getDeviceLimitToRequest(limitValueTest, defaultLimit, maximumLimit);
const workgroupSize = getTestWorkgroupSize(testValueName, requestedLimit);
const workgroupSize = getTestWorkgroupSize(t, testValueName, requestedLimit);
return {
requestedLimit,
workgroupSize,
@ -111,6 +108,7 @@ g.test('createComputePipeline,at_over')
const { defaultLimit, adapterLimit: maximumLimit } = t;
const { requestedLimit, workgroupSize } = getDeviceLimitToRequestAndValueToTest(
t,
limitTest,
testValueName,
defaultLimit,

View file

@ -22,6 +22,7 @@ g.test('createBindGroupLayout,at_over')
limitTest,
testValueName,
async ({ device, testValue, shouldError }) => {
shouldError ||= testValue > t.device.limits.maxStorageBuffersPerShaderStage;
await t.expectValidationError(() => {
device.createBindGroupLayout({
entries: range(testValue, i => ({

View file

@ -26,6 +26,7 @@ g.test('createBindGroupLayout,at_over')
limitTest,
testValueName,
async ({ device, testValue, shouldError }) => {
shouldError ||= testValue > t.device.limits.maxUniformBuffersPerShaderStage;
await t.expectValidationError(() => {
device.createBindGroupLayout({
entries: range(testValue, i => ({

View file

@ -112,6 +112,11 @@ g.test('createRenderPipeline,at_over')
.combine('sampleMaskIn', [false, true])
.combine('sampleMaskOut', [false, true])
)
.beforeAllSubcases(t => {
if (t.isCompatibility && (t.params.sampleMaskIn || t.params.sampleMaskOut)) {
t.skip('sample_mask not supported in compatibility mode');
}
})
.fn(async t => {
const {
limitTest,

View file

@ -1,7 +1,7 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { align, roundDown } from '../../../../util/math.js';
import { kMaximumLimitBaseParams, makeLimitTestGroup, getDefaultLimit } from './limit_utils.js';
import { kMaximumLimitBaseParams, makeLimitTestGroup } from './limit_utils.js';
const kBufferParts = ['wholeBuffer', 'biggerBufferWithOffset'];
@ -139,6 +139,6 @@ g.test('validate,maxBufferSize')
.desc(`Test that ${limit} <= maxBufferSize`)
.fn(t => {
const { adapter, defaultLimit, adapterLimit } = t;
t.expect(defaultLimit <= getDefaultLimit('maxBufferSize'));
t.expect(defaultLimit <= t.getDefaultLimit('maxBufferSize'));
t.expect(adapterLimit <= adapter.limits.maxBufferSize);
});

View file

@ -1,11 +1,6 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import {
getDefaultLimit,
kMaximumLimitBaseParams,
makeLimitTestGroup,
} from './limit_utils.js';
**/ import { kMaximumLimitBaseParams, makeLimitTestGroup } from './limit_utils.js';
const kBufferParts = ['wholeBuffer', 'biggerBufferWithOffset'];
function getSizeAndOffsetForBufferPart(device, bufferPart, size) {
@ -91,6 +86,6 @@ g.test('validate,maxBufferSize')
.desc(`Test that ${limit} <= maxBufferSize`)
.fn(t => {
const { adapter, defaultLimit, adapterLimit } = t;
t.expect(defaultLimit <= getDefaultLimit('maxBufferSize'));
t.expect(defaultLimit <= t.getDefaultLimit('maxBufferSize'));
t.expect(adapterLimit <= adapter.limits.maxBufferSize);
});

View file

@ -6,7 +6,7 @@
TODO: Ensure sure tests cover all createBindGroup validation rules.
`;
import { makeTestGroup } from '../../../common/framework/test_group.js';
import { assert, unreachable } from '../../../common/util/util.js';
import { assert, makeValueTestVariant, unreachable } from '../../../common/util/util.js';
import {
allBindingEntries,
bindingTypeInfo,
@ -16,7 +16,6 @@ import {
kBufferBindingTypes,
kBufferUsages,
kCompareFunctions,
kLimitInfo,
kSamplerBindingTypes,
kTextureUsages,
kTextureViewDimensions,
@ -316,6 +315,8 @@ g.test('texture_must_have_correct_dimension')
dimension: getTextureDimensionFromView(dimension),
});
t.skipIfTextureViewDimensionNotSupported(viewDimension, dimension);
const shouldError = viewDimension !== dimension;
const textureView = texture.createView({ dimension });
@ -884,24 +885,21 @@ g.test('buffer,resource_offset')
u //
.combine('type', kBufferBindingTypes)
.beginSubcases()
.expand('offset', ({ type }) =>
type === 'uniform'
? [
kLimitInfo.minUniformBufferOffsetAlignment.default,
kLimitInfo.minUniformBufferOffsetAlignment.default * 0.5,
kLimitInfo.minUniformBufferOffsetAlignment.default * 1.5,
kLimitInfo.minUniformBufferOffsetAlignment.default + 2,
]
: [
kLimitInfo.minStorageBufferOffsetAlignment.default,
kLimitInfo.minStorageBufferOffsetAlignment.default * 0.5,
kLimitInfo.minStorageBufferOffsetAlignment.default * 1.5,
kLimitInfo.minStorageBufferOffsetAlignment.default + 2,
]
)
.combine('offsetAddMult', [
{ add: 0, mult: 0 },
{ add: 0, mult: 0.5 },
{ add: 0, mult: 1.5 },
{ add: 2, mult: 0 },
])
)
.fn(t => {
const { type, offset } = t.params;
const { type, offsetAddMult } = t.params;
const minAlignment =
t.device.limits[
type === 'uniform' ? 'minUniformBufferOffsetAlignment' : 'minStorageBufferOffsetAlignment'
];
const offset = makeValueTestVariant(minAlignment, offsetAddMult);
const bindGroupLayout = t.device.createBindGroupLayout({
entries: [
@ -913,14 +911,8 @@ g.test('buffer,resource_offset')
],
});
let usage, isValid;
if (type === 'uniform') {
usage = GPUBufferUsage.UNIFORM;
isValid = offset % kLimitInfo.minUniformBufferOffsetAlignment.default === 0;
} else {
usage = GPUBufferUsage.STORAGE;
isValid = offset % kLimitInfo.minStorageBufferOffsetAlignment.default === 0;
}
const usage = type === 'uniform' ? GPUBufferUsage.UNIFORM : GPUBufferUsage.STORAGE;
const isValid = offset % minAlignment === 0;
const buffer = t.device.createBuffer({
size: 1024,
@ -949,22 +941,24 @@ g.test('buffer,resource_binding_size')
.beginSubcases()
// Test a size of 1 (for uniform buffer) or 4 (for storage and read-only storage buffer)
// then values just within and just above the limit.
.expand('bindingSize', ({ type }) =>
type === 'uniform'
? [
1,
kLimitInfo.maxUniformBufferBindingSize.default,
kLimitInfo.maxUniformBufferBindingSize.default + 1,
]
: [
4,
kLimitInfo.maxStorageBufferBindingSize.default,
kLimitInfo.maxStorageBufferBindingSize.default + 4,
]
)
.combine('bindingSize', [
{ base: 1, limit: 0 },
{ base: 0, limit: 1 },
{ base: 1, limit: 1 },
])
)
.fn(t => {
const { type, bindingSize } = t.params;
const {
type,
bindingSize: { base, limit },
} = t.params;
const mult = type === 'uniform' ? 1 : 4;
const maxBindingSize =
t.device.limits[
type === 'uniform' ? 'maxUniformBufferBindingSize' : 'maxStorageBufferBindingSize'
];
const bindingSize = base * mult + maxBindingSize * limit;
const bindGroupLayout = t.device.createBindGroupLayout({
entries: [
@ -976,17 +970,12 @@ g.test('buffer,resource_binding_size')
],
});
let usage, isValid;
if (type === 'uniform') {
usage = GPUBufferUsage.UNIFORM;
isValid = bindingSize <= kLimitInfo.maxUniformBufferBindingSize.default;
} else {
usage = GPUBufferUsage.STORAGE;
isValid = bindingSize <= kLimitInfo.maxStorageBufferBindingSize.default;
}
const usage = type === 'uniform' ? GPUBufferUsage.UNIFORM : GPUBufferUsage.STORAGE;
const isValid = bindingSize <= maxBindingSize;
// MAINTENANCE_TODO: Allocating the max size seems likely to fail. Refactor test.
const buffer = t.device.createBuffer({
size: kLimitInfo.maxStorageBufferBindingSize.default,
size: maxBindingSize,
usage,
});
@ -1009,26 +998,19 @@ g.test('buffer,effective_buffer_binding_size')
u
.combine('type', kBufferBindingTypes)
.beginSubcases()
.expand('offset', ({ type }) =>
type === 'uniform'
? [0, kLimitInfo.minUniformBufferOffsetAlignment.default]
: [0, kLimitInfo.minStorageBufferOffsetAlignment.default]
)
.expand('bufferSize', ({ type }) =>
type === 'uniform'
? [
kLimitInfo.minUniformBufferOffsetAlignment.default + 8,
kLimitInfo.minUniformBufferOffsetAlignment.default + 10,
]
: [
kLimitInfo.minStorageBufferOffsetAlignment.default + 8,
kLimitInfo.minStorageBufferOffsetAlignment.default + 10,
]
)
.combine('offsetMult', [0, 1])
.combine('bufferSizeAddition', [8, 10])
.combine('bindingSize', [undefined, 2, 4, 6])
)
.fn(t => {
const { type, offset, bufferSize, bindingSize } = t.params;
const { type, offsetMult, bufferSizeAddition, bindingSize } = t.params;
const minAlignment =
t.device.limits[
type === 'uniform' ? 'minUniformBufferOffsetAlignment' : 'minStorageBufferOffsetAlignment'
];
const offset = minAlignment * offsetMult;
const bufferSize = minAlignment + bufferSizeAddition;
const bindGroupLayout = t.device.createBindGroupLayout({
entries: [

View file

@ -8,7 +8,6 @@ TODO: make sure tests are complete.
import { kUnitCaseParamsBuilder } from '../../../common/framework/params_builder.js';
import { makeTestGroup } from '../../../common/framework/test_group.js';
import {
kLimitInfo,
kShaderStages,
kShaderStageCombinations,
kStorageTextureAccessValues,
@ -63,27 +62,26 @@ g.test('maximum_binding_limit')
`
)
.paramsSubcasesOnly(u =>
u //
.combine('binding', [
1,
4,
8,
256,
kLimitInfo.maxBindingsPerBindGroup.default - 1,
kLimitInfo.maxBindingsPerBindGroup.default,
])
u.combine('bindingVariant', [1, 4, 8, 256, 'default', 'default-minus-one'])
)
.fn(t => {
const { binding } = t.params;
const { bindingVariant } = t.params;
const entries = [];
const binding =
bindingVariant === 'default'
? t.device.limits.maxBindingsPerBindGroup
: bindingVariant === 'default-minus-one'
? t.device.limits.maxBindingsPerBindGroup - 1
: bindingVariant;
entries.push({
binding,
visibility: GPUShaderStage.COMPUTE,
buffer: { type: 'storage' },
});
const success = binding < kLimitInfo.maxBindingsPerBindGroup.default;
const success = binding < t.device.limits.maxBindingsPerBindGroup;
t.expectValidationError(() => {
t.device.createBindGroupLayout({
@ -234,7 +232,10 @@ g.test('max_dynamic_buffers')
const { type, extraDynamicBuffers, staticBuffers } = t.params;
const info = bufferBindingTypeInfo({ type });
const dynamicBufferCount = info.perPipelineLimitClass.maxDynamic + extraDynamicBuffers;
const limitName = info.perPipelineLimitClass.maxDynamicLimit;
const bufferCount = limitName ? t.getDefaultLimit(limitName) : 0;
const dynamicBufferCount = bufferCount + extraDynamicBuffers;
const perStageLimit = t.getDefaultLimit(info.perStageLimitClass.maxLimit);
const entries = [];
for (let i = 0; i < dynamicBufferCount; i++) {
@ -259,7 +260,7 @@ g.test('max_dynamic_buffers')
t.expectValidationError(() => {
t.device.createBindGroupLayout(descriptor);
}, extraDynamicBuffers > 0);
}, extraDynamicBuffers > 0 || entries.length > perStageLimit);
});
/**
@ -297,7 +298,7 @@ const kMaxResourcesCases = kUnitCaseParamsBuilder
.combine('extraVisibility', kShaderStages)
.filter(p => (bindingTypeInfo(p.extraEntry).validStages & p.extraVisibility) !== 0);
// Should never fail unless kLimitInfo.maxBindingsPerBindGroup.default is exceeded, because the validation for
// Should never fail unless limitInfo.maxBindingsPerBindGroup.default is exceeded, because the validation for
// resources-of-type-per-stage is in pipeline layout creation.
g.test('max_resources_per_stage,in_bind_group_layout')
.desc(
@ -313,7 +314,7 @@ g.test('max_resources_per_stage,in_bind_group_layout')
.fn(t => {
const { maxedEntry, extraEntry, maxedVisibility, extraVisibility } = t.params;
const maxedTypeInfo = bindingTypeInfo(maxedEntry);
const maxedCount = maxedTypeInfo.perStageLimitClass.max;
const maxedCount = t.getDefaultLimit(maxedTypeInfo.perStageLimitClass.maxLimit);
const extraTypeInfo = bindingTypeInfo(extraEntry);
const maxResourceBindings = [];
@ -364,7 +365,7 @@ g.test('max_resources_per_stage,in_pipeline_layout')
.fn(t => {
const { maxedEntry, extraEntry, maxedVisibility, extraVisibility } = t.params;
const maxedTypeInfo = bindingTypeInfo(maxedEntry);
const maxedCount = maxedTypeInfo.perStageLimitClass.max;
const maxedCount = t.getDefaultLimit(maxedTypeInfo.perStageLimitClass.maxLimit);
const extraTypeInfo = bindingTypeInfo(extraEntry);
const maxResourceBindings = [];

View file

@ -34,7 +34,13 @@ g.test('number_of_dynamic_buffers_exceeds_the_maximum_value')
)
.fn(t => {
const { type, visibility } = t.params;
const { maxDynamic } = bufferBindingTypeInfo({ type }).perPipelineLimitClass;
const info = bufferBindingTypeInfo({ type });
const { maxDynamicLimit } = info.perPipelineLimitClass;
const perStageLimit = t.getDefaultLimit(info.perStageLimitClass.maxLimit);
const maxDynamic = Math.min(
maxDynamicLimit ? t.getDefaultLimit(maxDynamicLimit) : 0,
perStageLimit
);
const maxDynamicBufferBindings = [];
for (let binding = 0; binding < maxDynamic; binding++) {
@ -53,15 +59,17 @@ g.test('number_of_dynamic_buffers_exceeds_the_maximum_value')
entries: [{ binding: 0, visibility, buffer: { type, hasDynamicOffset: false } }],
};
const goodPipelineLayoutDescriptor = {
bindGroupLayouts: [
maxDynamicBufferBindGroupLayout,
t.device.createBindGroupLayout(goodDescriptor),
],
};
if (perStageLimit > maxDynamic) {
const goodPipelineLayoutDescriptor = {
bindGroupLayouts: [
maxDynamicBufferBindGroupLayout,
t.device.createBindGroupLayout(goodDescriptor),
],
};
// Control case
t.device.createPipelineLayout(goodPipelineLayoutDescriptor);
// Control case
t.device.createPipelineLayout(goodPipelineLayoutDescriptor);
}
// Check dynamic buffers exceed maximum in pipeline layout.
const badDescriptor = clone(goodDescriptor);

View file

@ -3,8 +3,8 @@
**/ export const description = `createTexture validation tests.`;
import { SkipTestCase } from '../../../common/framework/fixture.js';
import { makeTestGroup } from '../../../common/framework/test_group.js';
import { assert } from '../../../common/util/util.js';
import { kTextureDimensions, kTextureUsages, kLimitInfo } from '../../capability_info.js';
import { assert, makeValueTestVariant } from '../../../common/util/util.js';
import { kTextureDimensions, kTextureUsages } from '../../capability_info.js';
import { GPUConst } from '../../constants.js';
import {
kTextureFormats,
@ -105,6 +105,7 @@ g.test('dimension_type_and_format_compatibility')
.beforeAllSubcases(t => {
const { format } = t.params;
const info = kTextureFormatInfo[format];
t.skipIfTextureFormatNotSupported(format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {
@ -142,6 +143,7 @@ g.test('mipLevelCount,format')
.beforeAllSubcases(t => {
const { format } = t.params;
const info = kTextureFormatInfo[format];
t.skipIfTextureFormatNotSupported(format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {
@ -273,6 +275,7 @@ g.test('sampleCount,various_sampleCount_with_all_formats')
.beforeAllSubcases(t => {
const { format } = t.params;
const info = kTextureFormatInfo[format];
t.skipIfTextureFormatNotSupported(format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {
@ -345,6 +348,7 @@ g.test('sampleCount,valid_sampleCount_with_other_parameter_varies')
.beforeAllSubcases(t => {
const { format } = t.params;
const info = kTextureFormatInfo[format];
t.skipIfTextureFormatNotSupported(format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {
@ -423,6 +427,7 @@ g.test('texture_size,default_value_and_smallest_size,uncompressed_format')
.beforeAllSubcases(t => {
const { format } = t.params;
const info = kTextureFormatInfo[format];
t.skipIfTextureFormatNotSupported(format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {
@ -488,10 +493,10 @@ g.test('texture_size,1d_texture')
// Compressed and depth-stencil textures are invalid for 1D.
.combine('format', kRegularTextureFormats)
.beginSubcases()
.combine('width', [
kLimitInfo.maxTextureDimension1D.default - 1,
kLimitInfo.maxTextureDimension1D.default,
kLimitInfo.maxTextureDimension1D.default + 1,
.combine('widthVariant', [
{ mult: 1, add: -1 },
{ mult: 1, add: 0 },
{ mult: 1, add: 1 },
])
.combine('height', [1, 2])
.combine('depthOrArrayLayers', [1, 2])
@ -499,10 +504,12 @@ g.test('texture_size,1d_texture')
.beforeAllSubcases(t => {
const { format } = t.params;
const info = kTextureFormatInfo[format];
t.skipIfTextureFormatNotSupported(format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {
const { format, width, height, depthOrArrayLayers } = t.params;
const { format, widthVariant, height, depthOrArrayLayers } = t.params;
const width = t.makeLimitVariant('maxTextureDimension1D', widthVariant);
const descriptor = {
size: [width, height, depthOrArrayLayers],
@ -512,7 +519,7 @@ g.test('texture_size,1d_texture')
};
const success =
width <= kLimitInfo.maxTextureDimension1D.default && height === 1 && depthOrArrayLayers === 1;
width <= t.device.limits.maxTextureDimension1D && height === 1 && depthOrArrayLayers === 1;
t.expectValidationError(() => {
t.device.createTexture(descriptor);
@ -525,28 +532,70 @@ g.test('texture_size,2d_texture,uncompressed_format')
u
.combine('dimension', [undefined, '2d'])
.combine('format', kUncompressedTextureFormats)
.combine('size', [
.combine('sizeVariant', [
// Test the bound of width
[kLimitInfo.maxTextureDimension2D.default - 1, 1, 1],
[kLimitInfo.maxTextureDimension2D.default, 1, 1],
[kLimitInfo.maxTextureDimension2D.default + 1, 1, 1],
[
{ mult: 1, add: -1 },
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
],
[
{ mult: 1, add: 0 },
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
],
[
{ mult: 1, add: 1 },
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
],
// Test the bound of height
[1, kLimitInfo.maxTextureDimension2D.default - 1, 1],
[1, kLimitInfo.maxTextureDimension2D.default, 1],
[1, kLimitInfo.maxTextureDimension2D.default + 1, 1],
[
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
{ mult: 0, add: 1 },
],
[
{ mult: 0, add: 1 },
{ mult: 1, add: 0 },
{ mult: 0, add: 1 },
],
[
{ mult: 0, add: 1 },
{ mult: 1, add: 1 },
{ mult: 0, add: 1 },
],
// Test the bound of array layers
[1, 1, kLimitInfo.maxTextureArrayLayers.default - 1],
[1, 1, kLimitInfo.maxTextureArrayLayers.default],
[1, 1, kLimitInfo.maxTextureArrayLayers.default + 1],
[
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
],
[
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
{ mult: 1, add: 0 },
],
[
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
{ mult: 1, add: 1 },
],
])
)
.beforeAllSubcases(t => {
const { format } = t.params;
const info = kTextureFormatInfo[format];
t.skipIfTextureFormatNotSupported(format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {
const { dimension, format, size } = t.params;
const { dimension, format, sizeVariant } = t.params;
const size = [
t.device.limits.maxTextureDimension2D,
t.device.limits.maxTextureDimension2D,
t.device.limits.maxTextureArrayLayers,
].map((limit, ndx) => makeValueTestVariant(limit, sizeVariant[ndx]));
const descriptor = {
size,
@ -556,9 +605,9 @@ g.test('texture_size,2d_texture,uncompressed_format')
};
const success =
size[0] <= kLimitInfo.maxTextureDimension2D.default &&
size[1] <= kLimitInfo.maxTextureDimension2D.default &&
size[2] <= kLimitInfo.maxTextureArrayLayers.default;
size[0] <= t.device.limits.maxTextureDimension2D &&
size[1] <= t.device.limits.maxTextureDimension2D &&
size[2] <= t.device.limits.maxTextureArrayLayers;
t.expectValidationError(() => {
t.device.createTexture(descriptor);
@ -571,40 +620,179 @@ g.test('texture_size,2d_texture,compressed_format')
u
.combine('dimension', [undefined, '2d'])
.combine('format', kCompressedTextureFormats)
.expand('size', p => {
.expand('sizeVariant', p => {
const { blockWidth, blockHeight } = kTextureFormatInfo[p.format];
return [
// Test the bound of width
[kLimitInfo.maxTextureDimension2D.default - 1, 1, 1],
[kLimitInfo.maxTextureDimension2D.default - blockWidth, 1, 1],
[kLimitInfo.maxTextureDimension2D.default - blockWidth, blockHeight, 1],
[kLimitInfo.maxTextureDimension2D.default, 1, 1],
[kLimitInfo.maxTextureDimension2D.default, blockHeight, 1],
[kLimitInfo.maxTextureDimension2D.default + 1, 1, 1],
[kLimitInfo.maxTextureDimension2D.default + blockWidth, 1, 1],
[kLimitInfo.maxTextureDimension2D.default + blockWidth, blockHeight, 1],
[
{ mult: 1, add: -1 },
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
],
[
{ mult: 1, add: -blockWidth },
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
],
[
{ mult: 1, add: -blockWidth },
{ mult: 0, add: blockHeight },
{ mult: 0, add: 1 },
],
[
{ mult: 1, add: 0 },
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
],
[
{ mult: 1, add: 0 },
{ mult: 0, add: blockHeight },
{ mult: 0, add: 1 },
],
[
{ mult: 1, add: 1 },
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
],
[
{ mult: 1, add: blockWidth },
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
],
[
{ mult: 1, add: blockWidth },
{ mult: 0, add: blockHeight },
{ mult: 0, add: 1 },
],
// Test the bound of height
[1, kLimitInfo.maxTextureDimension2D.default - 1, 1],
[1, kLimitInfo.maxTextureDimension2D.default - blockHeight, 1],
[blockWidth, kLimitInfo.maxTextureDimension2D.default - blockHeight, 1],
[1, kLimitInfo.maxTextureDimension2D.default, 1],
[blockWidth, kLimitInfo.maxTextureDimension2D.default, 1],
[1, kLimitInfo.maxTextureDimension2D.default + 1, 1],
[1, kLimitInfo.maxTextureDimension2D.default + blockWidth, 1],
[blockWidth, kLimitInfo.maxTextureDimension2D.default + blockHeight, 1],
[
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
{ mult: 0, add: 1 },
],
[
{ mult: 0, add: 1 },
{ mult: 1, add: -blockHeight },
{ mult: 0, add: 1 },
],
[
{ mult: 0, add: blockWidth },
{ mult: 1, add: -blockHeight },
{ mult: 0, add: 1 },
],
[
{ mult: 0, add: 1 },
{ mult: 1, add: 0 },
{ mult: 0, add: 1 },
],
[
{ mult: 0, add: blockWidth },
{ mult: 1, add: 0 },
{ mult: 0, add: 1 },
],
[
{ mult: 0, add: 1 },
{ mult: 1, add: +1 },
{ mult: 0, add: 1 },
],
[
{ mult: 0, add: 1 },
{ mult: 1, add: +blockWidth },
{ mult: 0, add: 1 },
],
[
{ mult: 0, add: blockWidth },
{ mult: 1, add: +blockHeight },
{ mult: 0, add: 1 },
],
// Test the bound of array layers
[1, 1, kLimitInfo.maxTextureArrayLayers.default - 1],
[blockWidth, 1, kLimitInfo.maxTextureArrayLayers.default - 1],
[1, blockHeight, kLimitInfo.maxTextureArrayLayers.default - 1],
[blockWidth, blockHeight, kLimitInfo.maxTextureArrayLayers.default - 1],
[1, 1, kLimitInfo.maxTextureArrayLayers.default],
[blockWidth, 1, kLimitInfo.maxTextureArrayLayers.default],
[1, blockHeight, kLimitInfo.maxTextureArrayLayers.default],
[blockWidth, blockHeight, kLimitInfo.maxTextureArrayLayers.default],
[1, 1, kLimitInfo.maxTextureArrayLayers.default + 1],
[blockWidth, 1, kLimitInfo.maxTextureArrayLayers.default + 1],
[1, blockHeight, kLimitInfo.maxTextureArrayLayers.default + 1],
[blockWidth, blockHeight, kLimitInfo.maxTextureArrayLayers.default + 1],
[
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
],
[
{ mult: 0, add: blockWidth },
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
],
[
{ mult: 0, add: 1 },
{ mult: 0, add: blockHeight },
{ mult: 1, add: -1 },
],
[
{ mult: 0, add: blockWidth },
{ mult: 0, add: blockHeight },
{ mult: 1, add: -1 },
],
[
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
{ mult: 1, add: 0 },
],
[
{ mult: 0, add: blockWidth },
{ mult: 0, add: 1 },
{ mult: 1, add: 0 },
],
[
{ mult: 0, add: 1 },
{ mult: 0, add: blockHeight },
{ mult: 1, add: 0 },
],
[
{ mult: 0, add: blockWidth },
{ mult: 0, add: blockHeight },
{ mult: 1, add: 0 },
],
[
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
{ mult: 1, add: +1 },
],
[
{ mult: 0, add: blockWidth },
{ mult: 0, add: 1 },
{ mult: 1, add: +1 },
],
[
{ mult: 0, add: 1 },
{ mult: 0, add: blockHeight },
{ mult: 1, add: +1 },
],
[
{ mult: 0, add: blockWidth },
{ mult: 0, add: blockHeight },
{ mult: 1, add: +1 },
],
];
})
)
@ -614,8 +802,13 @@ g.test('texture_size,2d_texture,compressed_format')
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {
const { dimension, format, size } = t.params;
const { dimension, format, sizeVariant } = t.params;
const info = kTextureFormatInfo[format];
const size = [
t.device.limits.maxTextureDimension2D,
t.device.limits.maxTextureDimension2D,
t.device.limits.maxTextureArrayLayers,
].map((limit, ndx) => makeValueTestVariant(limit, sizeVariant[ndx]));
const descriptor = {
size,
@ -627,9 +820,9 @@ g.test('texture_size,2d_texture,compressed_format')
const success =
size[0] % info.blockWidth === 0 &&
size[1] % info.blockHeight === 0 &&
size[0] <= kLimitInfo.maxTextureDimension2D.default &&
size[1] <= kLimitInfo.maxTextureDimension2D.default &&
size[2] <= kLimitInfo.maxTextureArrayLayers.default;
size[0] <= t.device.limits.maxTextureDimension2D &&
size[1] <= t.device.limits.maxTextureDimension2D &&
size[2] <= t.device.limits.maxTextureArrayLayers;
t.expectValidationError(() => {
t.device.createTexture(descriptor);
@ -644,28 +837,67 @@ g.test('texture_size,3d_texture,uncompressed_format')
u //
.combine('format', kRegularTextureFormats)
.beginSubcases()
.combine('size', [
.combine('sizeVariant', [
// Test the bound of width
[kLimitInfo.maxTextureDimension3D.default - 1, 1, 1],
[kLimitInfo.maxTextureDimension3D.default, 1, 1],
[kLimitInfo.maxTextureDimension3D.default + 1, 1, 1],
[
{ mult: 1, add: -1 },
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
],
[
{ mult: 1, add: 0 },
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
],
[
{ mult: 1, add: +1 },
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
],
// Test the bound of height
[1, kLimitInfo.maxTextureDimension3D.default - 1, 1],
[1, kLimitInfo.maxTextureDimension3D.default, 1],
[1, kLimitInfo.maxTextureDimension3D.default + 1, 1],
[
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
{ mult: 0, add: 1 },
],
[
{ mult: 0, add: 1 },
{ mult: 1, add: 0 },
{ mult: 0, add: 1 },
],
[
{ mult: 0, add: 1 },
{ mult: 1, add: +1 },
{ mult: 0, add: 1 },
],
// Test the bound of depth
[1, 1, kLimitInfo.maxTextureDimension3D.default - 1],
[1, 1, kLimitInfo.maxTextureDimension3D.default],
[1, 1, kLimitInfo.maxTextureDimension3D.default + 1],
[
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
],
[
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
{ mult: 1, add: 0 },
],
[
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
{ mult: 1, add: +1 },
],
])
)
.beforeAllSubcases(t => {
const { format } = t.params;
const info = kTextureFormatInfo[format];
t.skipIfTextureFormatNotSupported(format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {
const { format, size } = t.params;
const { format, sizeVariant } = t.params;
const maxTextureDimension3D = t.device.limits.maxTextureDimension3D;
const size = sizeVariant.map(variant => t.makeLimitVariant('maxTextureDimension3D', variant));
const descriptor = {
size,
@ -675,9 +907,9 @@ g.test('texture_size,3d_texture,uncompressed_format')
};
const success =
size[0] <= kLimitInfo.maxTextureDimension3D.default &&
size[1] <= kLimitInfo.maxTextureDimension3D.default &&
size[2] <= kLimitInfo.maxTextureDimension3D.default;
size[0] <= maxTextureDimension3D &&
size[1] <= maxTextureDimension3D &&
size[2] <= maxTextureDimension3D;
t.expectValidationError(() => {
t.device.createTexture(descriptor);
@ -690,40 +922,179 @@ g.test('texture_size,3d_texture,compressed_format')
u //
.combine('format', kCompressedTextureFormats)
.beginSubcases()
.expand('size', p => {
.expand('sizeVariant', p => {
const { blockWidth, blockHeight } = kTextureFormatInfo[p.format];
return [
// Test the bound of width
[kLimitInfo.maxTextureDimension3D.default - 1, 1, 1],
[kLimitInfo.maxTextureDimension3D.default - blockWidth, 1, 1],
[kLimitInfo.maxTextureDimension3D.default - blockWidth, blockHeight, 1],
[kLimitInfo.maxTextureDimension3D.default, 1, 1],
[kLimitInfo.maxTextureDimension3D.default, blockHeight, 1],
[kLimitInfo.maxTextureDimension3D.default + 1, 1, 1],
[kLimitInfo.maxTextureDimension3D.default + blockWidth, 1, 1],
[kLimitInfo.maxTextureDimension3D.default + blockWidth, blockHeight, 1],
[
{ mult: 1, add: -1 },
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
],
[
{ mult: 1, add: -blockWidth },
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
],
[
{ mult: 1, add: -blockWidth },
{ mult: 0, add: blockHeight },
{ mult: 0, add: 1 },
],
[
{ mult: 1, add: 0 },
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
],
[
{ mult: 1, add: 0 },
{ mult: 0, add: blockHeight },
{ mult: 0, add: 1 },
],
[
{ mult: 1, add: +1 },
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
],
[
{ mult: 1, add: +blockWidth },
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
],
[
{ mult: 1, add: +blockWidth },
{ mult: 0, add: blockHeight },
{ mult: 0, add: 1 },
],
// Test the bound of height
[1, kLimitInfo.maxTextureDimension3D.default - 1, 1],
[1, kLimitInfo.maxTextureDimension3D.default - blockHeight, 1],
[blockWidth, kLimitInfo.maxTextureDimension3D.default - blockHeight, 1],
[1, kLimitInfo.maxTextureDimension3D.default, 1],
[blockWidth, kLimitInfo.maxTextureDimension3D.default, 1],
[1, kLimitInfo.maxTextureDimension3D.default + 1, 1],
[1, kLimitInfo.maxTextureDimension3D.default + blockWidth, 1],
[blockWidth, kLimitInfo.maxTextureDimension3D.default + blockHeight, 1],
[
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
{ mult: 0, add: 1 },
],
[
{ mult: 0, add: 1 },
{ mult: 1, add: -blockHeight },
{ mult: 0, add: 1 },
],
[
{ mult: 0, add: blockWidth },
{ mult: 1, add: -blockHeight },
{ mult: 0, add: 1 },
],
[
{ mult: 0, add: 1 },
{ mult: 1, add: 0 },
{ mult: 0, add: 1 },
],
[
{ mult: 0, add: blockWidth },
{ mult: 1, add: 0 },
{ mult: 0, add: 1 },
],
[
{ mult: 0, add: 1 },
{ mult: 1, add: +1 },
{ mult: 0, add: 1 },
],
[
{ mult: 0, add: 1 },
{ mult: 1, add: +blockWidth },
{ mult: 0, add: 1 },
],
[
{ mult: 0, add: blockWidth },
{ mult: 1, add: +blockHeight },
{ mult: 0, add: 1 },
],
// Test the bound of depth
[1, 1, kLimitInfo.maxTextureDimension3D.default - 1],
[blockWidth, 1, kLimitInfo.maxTextureDimension3D.default - 1],
[1, blockHeight, kLimitInfo.maxTextureDimension3D.default - 1],
[blockWidth, blockHeight, kLimitInfo.maxTextureDimension3D.default - 1],
[1, 1, kLimitInfo.maxTextureDimension3D.default],
[blockWidth, 1, kLimitInfo.maxTextureDimension3D.default],
[1, blockHeight, kLimitInfo.maxTextureDimension3D.default],
[blockWidth, blockHeight, kLimitInfo.maxTextureDimension3D.default],
[1, 1, kLimitInfo.maxTextureDimension3D.default + 1],
[blockWidth, 1, kLimitInfo.maxTextureDimension3D.default + 1],
[1, blockHeight, kLimitInfo.maxTextureDimension3D.default + 1],
[blockWidth, blockHeight, kLimitInfo.maxTextureDimension3D.default + 1],
[
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
],
[
{ mult: 0, add: blockWidth },
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
],
[
{ mult: 0, add: 1 },
{ mult: 0, add: blockHeight },
{ mult: 1, add: -1 },
],
[
{ mult: 0, add: blockWidth },
{ mult: 0, add: blockHeight },
{ mult: 1, add: -1 },
],
[
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
{ mult: 1, add: 0 },
],
[
{ mult: 0, add: blockWidth },
{ mult: 0, add: 1 },
{ mult: 1, add: 0 },
],
[
{ mult: 0, add: 1 },
{ mult: 0, add: blockHeight },
{ mult: 1, add: 0 },
],
[
{ mult: 0, add: blockWidth },
{ mult: 0, add: blockHeight },
{ mult: 1, add: 0 },
],
[
{ mult: 0, add: 1 },
{ mult: 0, add: 1 },
{ mult: 1, add: +1 },
],
[
{ mult: 0, add: blockWidth },
{ mult: 0, add: 1 },
{ mult: 1, add: +1 },
],
[
{ mult: 0, add: 1 },
{ mult: 0, add: blockHeight },
{ mult: 1, add: +1 },
],
[
{ mult: 0, add: blockWidth },
{ mult: 0, add: blockHeight },
{ mult: 1, add: +1 },
],
];
})
)
@ -736,12 +1107,15 @@ g.test('texture_size,3d_texture,compressed_format')
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {
const { format, size } = t.params;
const { format, sizeVariant } = t.params;
const info = kTextureFormatInfo[format];
const maxTextureDimension3D = t.device.limits.maxTextureDimension3D;
const size = sizeVariant.map(variant => t.makeLimitVariant('maxTextureDimension3D', variant));
assert(
kLimitInfo.maxTextureDimension3D.default % info.blockWidth === 0 &&
kLimitInfo.maxTextureDimension3D.default % info.blockHeight === 0
maxTextureDimension3D % info.blockWidth === 0 &&
maxTextureDimension3D % info.blockHeight === 0
);
const descriptor = {
@ -754,9 +1128,9 @@ g.test('texture_size,3d_texture,compressed_format')
const success =
size[0] % info.blockWidth === 0 &&
size[1] % info.blockHeight === 0 &&
size[0] <= kLimitInfo.maxTextureDimension3D.default &&
size[1] <= kLimitInfo.maxTextureDimension3D.default &&
size[2] <= kLimitInfo.maxTextureDimension3D.default;
size[0] <= maxTextureDimension3D &&
size[1] <= maxTextureDimension3D &&
size[2] <= maxTextureDimension3D;
t.expectValidationError(() => {
t.device.createTexture(descriptor);
@ -781,6 +1155,7 @@ g.test('texture_usage')
.beforeAllSubcases(t => {
const { format } = t.params;
const info = kTextureFormatInfo[format];
t.skipIfTextureFormatNotSupported(format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {
@ -837,6 +1212,8 @@ g.test('viewFormats')
const { format, viewFormat } = t.params;
const { blockWidth, blockHeight } = kTextureFormatInfo[format];
t.skipIfTextureFormatNotSupported(format, viewFormat);
const compatible = viewCompatible(format, viewFormat);
// Test the viewFormat in the list.

View file

@ -55,6 +55,8 @@ g.test('format')
const { textureFormat, viewFormat, useViewFormatList } = t.params;
const { blockWidth, blockHeight } = kTextureFormatInfo[textureFormat];
t.skipIfTextureFormatNotSupported(textureFormat, viewFormat);
const compatible = viewFormat === undefined || viewCompatible(textureFormat, viewFormat);
const texture = t.device.createTexture({
@ -89,6 +91,9 @@ g.test('dimension')
.combine('textureDimension', kTextureDimensions)
.combine('viewDimension', [...kTextureViewDimensions, undefined])
)
.beforeAllSubcases(t => {
t.skipIfTextureViewDimensionNotSupported(t.params.viewDimension);
})
.fn(t => {
const { textureDimension, viewDimension } = t.params;
@ -213,6 +218,8 @@ g.test('array_layers')
arrayLayerCount,
} = t.params;
t.skipIfTextureViewDimensionNotSupported(viewDimension);
const kWidth = 1 << (kLevels - 1); // 32
const textureDescriptor = {
format: 'rgba8unorm',
@ -272,6 +279,8 @@ g.test('mip_levels')
mipLevelCount,
} = t.params;
t.skipIfTextureViewDimensionNotSupported(viewDimension);
const textureDescriptor = {
format: 'rgba8unorm',
dimension: textureDimension,
@ -310,6 +319,8 @@ g.test('cube_faces_square')
.fn(t => {
const { dimension, size } = t.params;
t.skipIfTextureViewDimensionNotSupported(dimension);
const texture = t.device.createTexture({
format: 'rgba8unorm',
size,

View file

@ -6,7 +6,8 @@ API validation test for compute pass
Does **not** test usage scopes (resource_usages/) or programmable pass stuff (programmable_pass).
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { kBufferUsages, kLimitInfo } from '../../../../capability_info.js';
import { makeValueTestVariant } from '../../../../../common/util/util.js';
import { kBufferUsages } from '../../../../capability_info.js';
import { GPUConst } from '../../../../constants.js';
import { kResourceStates } from '../../../../gpu_test.js';
import { ValidationTest } from '../../validation_test.js';
@ -89,7 +90,6 @@ g.test('pipeline,device_mismatch')
validateFinish(!mismatched);
});
const kMaxDispatch = kLimitInfo.maxComputeWorkgroupsPerDimension.default;
g.test('dispatch_sizes')
.desc(
`Test 'direct' and 'indirect' dispatch with various sizes.
@ -105,13 +105,22 @@ g.test('dispatch_sizes')
.params(u =>
u
.combine('dispatchType', ['direct', 'indirect'])
.combine('largeDimValue', [0, 1, kMaxDispatch, kMaxDispatch + 1, 0x7fff_ffff, 0xffff_ffff])
.combine('largeDimValueVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 1, add: 0 },
{ mult: 1, add: 1 },
{ mult: 0, add: 0x7fff_ffff },
{ mult: 0, add: 0xffff_ffff },
])
.beginSubcases()
.combine('largeDimIndex', [0, 1, 2])
.combine('smallDimValue', [0, 1])
)
.fn(t => {
const { dispatchType, largeDimIndex, smallDimValue, largeDimValue } = t.params;
const { dispatchType, largeDimIndex, smallDimValue, largeDimValueVariant } = t.params;
const maxDispatch = t.device.limits.maxComputeWorkgroupsPerDimension;
const largeDimValue = makeValueTestVariant(maxDispatch, largeDimValueVariant);
const pipeline = t.createNoOpComputePipeline();
@ -132,7 +141,7 @@ g.test('dispatch_sizes')
const shouldError =
dispatchType === 'direct' &&
(workSizes[0] > kMaxDispatch || workSizes[1] > kMaxDispatch || workSizes[2] > kMaxDispatch);
(workSizes[0] > maxDispatch || workSizes[1] > maxDispatch || workSizes[2] > maxDispatch);
validateFinishAndSubmit(!shouldError, true);
});

View file

@ -356,6 +356,8 @@ Test the formats of textures in copyTextureToTexture must be copy-compatible.
const srcFormatInfo = kTextureFormatInfo[srcFormat];
const dstFormatInfo = kTextureFormatInfo[dstFormat];
t.skipIfTextureFormatNotSupported(srcFormat, dstFormat);
const textureSize = {
width: lcm(srcFormatInfo.blockWidth, dstFormatInfo.blockWidth),
height: lcm(srcFormatInfo.blockHeight, dstFormatInfo.blockHeight),
@ -771,6 +773,7 @@ TODO: Express the offsets in "block size" so as to be able to test non-4x4 compr
.beforeAllSubcases(t => {
const { format } = t.params;
t.selectDeviceOrSkipTestCase(kTextureFormatInfo[format].feature);
t.skipIfCopyTextureToTextureNotSupportedForFormat(format);
})
.fn(t => {
const { format, dimension, copyBoxOffsets, srcCopyLevel, dstCopyLevel } = t.params;

View file

@ -4,7 +4,7 @@
Validation tests for setVertexBuffer on render pass and render bundle.
`;
import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
import { kLimitInfo } from '../../../../../capability_info.js';
import { makeValueTestVariant } from '../../../../../../common/util/util.js';
import { GPUConst } from '../../../../../constants.js';
import { kResourceStates } from '../../../../../gpu_test.js';
import { ValidationTest } from '../../../validation_test.js';
@ -20,14 +20,17 @@ Tests slot must be less than the maxVertexBuffers in device limits.
`
)
.paramsSubcasesOnly(
kRenderEncodeTypeParams.combine('slot', [
0,
kLimitInfo.maxVertexBuffers.default - 1,
kLimitInfo.maxVertexBuffers.default,
kRenderEncodeTypeParams.combine('slotVariant', [
{ mult: 0, add: 0 },
{ mult: 1, add: -1 },
{ mult: 1, add: 0 },
])
)
.fn(t => {
const { encoderType, slot } = t.params;
const { encoderType, slotVariant } = t.params;
const maxVertexBuffers = t.device.limits.maxVertexBuffers;
const slot = makeValueTestVariant(maxVertexBuffers, slotVariant);
const vertexBuffer = t.createBufferWithState('valid', {
size: 16,
usage: GPUBufferUsage.VERTEX,
@ -35,7 +38,7 @@ Tests slot must be less than the maxVertexBuffers in device limits.
const { encoder, validateFinish } = t.createEncoder(encoderType);
encoder.setVertexBuffer(slot, vertexBuffer);
validateFinish(slot < kLimitInfo.maxVertexBuffers.default);
validateFinish(slot < maxVertexBuffers);
});
g.test('vertex_buffer_state')

View file

@ -12,11 +12,10 @@ TODO: merge these notes and implement.
> - setBindGroup in different orders (e.g. 0,1,2 vs 2,0,1)
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { range, unreachable } from '../../../../../common/util/util.js';
import { makeValueTestVariant, range, unreachable } from '../../../../../common/util/util.js';
import {
kBufferBindingTypes,
kMinDynamicBufferOffsetAlignment,
kLimitInfo,
} from '../../../../capability_info.js';
import { kResourceStates } from '../../../../gpu_test.js';
import { kProgrammableEncoderTypes } from '../../../../util/command_buffer_maker.js';
@ -379,28 +378,25 @@ g.test('buffer_dynamic_offsets')
.combine('type', kBufferBindingTypes)
.combine('encoderType', kProgrammableEncoderTypes)
.beginSubcases()
.expand('dynamicOffset', ({ type }) =>
type === 'uniform'
? [
kLimitInfo.minUniformBufferOffsetAlignment.default,
kLimitInfo.minUniformBufferOffsetAlignment.default * 0.5,
kLimitInfo.minUniformBufferOffsetAlignment.default * 1.5,
kLimitInfo.minUniformBufferOffsetAlignment.default * 2,
kLimitInfo.minUniformBufferOffsetAlignment.default + 2,
]
: [
kLimitInfo.minStorageBufferOffsetAlignment.default,
kLimitInfo.minStorageBufferOffsetAlignment.default * 0.5,
kLimitInfo.minStorageBufferOffsetAlignment.default * 1.5,
kLimitInfo.minStorageBufferOffsetAlignment.default * 2,
kLimitInfo.minStorageBufferOffsetAlignment.default + 2,
]
)
.combine('dynamicOffsetVariant', [
{ mult: 1, add: 0 },
{ mult: 0.5, add: 0 },
{ mult: 1.5, add: 0 },
{ mult: 2, add: 0 },
{ mult: 1, add: 2 },
])
)
.fn(t => {
const { type, dynamicOffset, encoderType } = t.params;
const { type, dynamicOffsetVariant, encoderType } = t.params;
const kBindingSize = 12;
const minAlignment =
t.device.limits[
type === 'uniform' ? 'minUniformBufferOffsetAlignment' : 'minStorageBufferOffsetAlignment'
];
const dynamicOffset = makeValueTestVariant(minAlignment, dynamicOffsetVariant);
const bindGroupLayout = t.device.createBindGroupLayout({
entries: [
{
@ -411,14 +407,8 @@ g.test('buffer_dynamic_offsets')
],
});
let usage, isValid;
if (type === 'uniform') {
usage = GPUBufferUsage.UNIFORM;
isValid = dynamicOffset % kLimitInfo.minUniformBufferOffsetAlignment.default === 0;
} else {
usage = GPUBufferUsage.STORAGE;
isValid = dynamicOffset % kLimitInfo.minStorageBufferOffsetAlignment.default === 0;
}
const usage = type === 'uniform' ? GPUBufferUsage.UNIFORM : GPUBufferUsage.STORAGE;
const isValid = dynamicOffset % minAlignment === 0;
const buffer = t.device.createBuffer({
size: 3 * kMinDynamicBufferOffsetAlignment,

View file

@ -5,8 +5,9 @@ createRenderBundleEncoder validation tests.
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { range } from '../../../../common/util/util.js';
import { kMaxColorAttachments } from '../../../capability_info.js';
import { kMaxColorAttachmentsToTest } from '../../../capability_info.js';
import {
computeBytesPerSampleFromFormats,
kAllTextureFormats,
kDepthStencilFormats,
kTextureFormatInfo,
@ -21,11 +22,17 @@ g.test('attachment_state,limits,maxColorAttachments')
.params(u =>
u.beginSubcases().combine(
'colorFormatCount',
range(kMaxColorAttachments + 1, i => i + 1) // 1-9
range(kMaxColorAttachmentsToTest, i => i + 1)
)
)
.fn(t => {
const { colorFormatCount } = t.params;
const maxColorAttachments = t.device.limits.maxColorAttachments;
t.skipIf(
colorFormatCount > maxColorAttachments,
`${colorFormatCount} > maxColorAttachments: ${maxColorAttachments}`
);
t.expectValidationError(() => {
t.device.createRenderBundleEncoder({
colorFormats: Array(colorFormatCount).fill('r8unorm'),
@ -47,11 +54,20 @@ g.test('attachment_state,limits,maxColorAttachmentBytesPerSample,aligned')
.beginSubcases()
.combine(
'colorFormatCount',
range(kMaxColorAttachments, i => i + 1)
range(kMaxColorAttachmentsToTest, i => i + 1)
)
)
.beforeAllSubcases(t => {
t.skipIfTextureFormatNotSupported(t.params.format);
})
.fn(t => {
const { format, colorFormatCount } = t.params;
const maxColorAttachments = t.device.limits.maxColorAttachments;
t.skipIf(
colorFormatCount > maxColorAttachments,
`${colorFormatCount} > maxColorAttachments: ${maxColorAttachments}`
);
const info = kTextureFormatInfo[format];
const shouldError =
!info.colorRender ||
@ -81,24 +97,28 @@ g.test('attachment_state,limits,maxColorAttachmentBytesPerSample,unaligned')
// is allowed: 4+8+16+1+1 < 32.
{
formats: ['r8unorm', 'r32float', 'rgba8unorm', 'rgba32float', 'r8unorm'],
_shouldError: true,
},
{
formats: ['r32float', 'rgba8unorm', 'rgba32float', 'r8unorm', 'r8unorm'],
_shouldError: false,
},
])
)
.fn(t => {
const { formats, _shouldError } = t.params;
const { formats } = t.params;
t.skipIf(
formats.length > t.device.limits.maxColorAttachments,
`numColorAttachments: ${formats.length} > maxColorAttachments: ${t.device.limits.maxColorAttachments}`
);
const shouldError =
computeBytesPerSampleFromFormats(formats) > t.device.limits.maxColorAttachmentBytesPerSample;
t.expectValidationError(() => {
t.device.createRenderBundleEncoder({
colorFormats: formats,
});
}, _shouldError);
}, shouldError);
});
g.test('attachment_state,empty_color_formats')

View file

@ -68,7 +68,7 @@ g.test('import_multiple_times_in_same_task_scope')
.desc(
`
Tests that GPUExternalTexture is valid after been imported in the task.
Tests that in the same task scope, import twice on the same video source should return
Tests that in the same task scope, import twice on the same video source may return
the same GPUExternalTexture and bindGroup doesn't need to be updated.
`
)
@ -99,12 +99,20 @@ g.test('import_multiple_times_in_same_task_scope')
t.submitCommandBuffer(bindGroup, true);
// Import again in the same task scope should return same object.
const shouldBeTheSameExternalTexture = t.device.importExternalTexture({
const mayBeTheSameExternalTexture = t.device.importExternalTexture({
source: source,
});
assert(externalTexture === shouldBeTheSameExternalTexture);
t.submitCommandBuffer(bindGroup, true);
if (externalTexture === mayBeTheSameExternalTexture) {
t.submitCommandBuffer(bindGroup, true);
} else {
bindGroup = t.device.createBindGroup({
layout: t.getDefaultBindGroupLayout(),
entries: [{ binding: 0, resource: externalTexture }],
});
t.submitCommandBuffer(bindGroup, true);
}
});
});
@ -153,7 +161,8 @@ g.test('import_and_use_in_different_task')
.desc(
`
Tests that in the different task scope, previous imported GPUExternalTexture
should be expired.
should be expired if it is imported from HTMLVideoElment. GPUExternalTexture
imported from WebCodec VideoFrame is not expired.
`
)
.params(u =>
@ -184,8 +193,10 @@ g.test('import_and_use_in_different_task')
});
await waitForNextTask(() => {
// Enter in another task scope, previous GPUExternalTexture should be expired.
t.submitCommandBuffer(bindGroup, false);
// Enter in another task scope. For GPUExternalTexture imported from WebCodec,
// it shouldn't be expired because VideoFrame is not 'closed'.
// For GPUExternalTexutre imported from HTMLVideoElement, it should be expired.
t.submitCommandBuffer(bindGroup, sourceType === 'VideoFrame' ? true : false);
});
});
@ -193,26 +204,17 @@ g.test('use_import_to_refresh')
.desc(
`
Tests that in the different task scope, imported GPUExternalTexture
again on the same source frame should return the same GPUExternalTexture
object and refresh it.
again on the same HTMLVideoElement should return active GPUExternalTexture.
`
)
.params(u =>
u //
.combine('sourceType', ['VideoElement', 'VideoFrame'])
)
.fn(async t => {
const sourceType = t.params.sourceType;
const videoElement = t.getDefaultVideoElementAndCheck();
let bindGroup;
let externalTexture;
let source;
await startPlayingAndWaitForVideo(videoElement, async () => {
source =
sourceType === 'VideoFrame'
? await getVideoFrameFromVideoElement(t, videoElement)
: videoElement;
await startPlayingAndWaitForVideo(videoElement, () => {
source = videoElement;
externalTexture = t.device.importExternalTexture({
source: source,
});
@ -226,14 +228,20 @@ g.test('use_import_to_refresh')
});
await waitForNextTask(() => {
// Video frame is not updated, import should return the same GPUExternalTexture object.
const shouldBeTheSameExternalTexture = t.device.importExternalTexture({
const mayBeTheSameExternalTexture = t.device.importExternalTexture({
source: source,
});
assert(externalTexture === shouldBeTheSameExternalTexture);
// ImportExternalTexture should refresh expired GPUExternalTexture.
t.submitCommandBuffer(bindGroup, true);
if (externalTexture === mayBeTheSameExternalTexture) {
// ImportExternalTexture should refresh expired GPUExternalTexture.
t.submitCommandBuffer(bindGroup, true);
} else {
bindGroup = t.device.createBindGroup({
layout: t.getDefaultBindGroupLayout(),
entries: [{ binding: 0, resource: externalTexture }],
});
t.submitCommandBuffer(bindGroup, true);
}
});
});

View file

@ -185,6 +185,7 @@ Test that bytesPerRow must be a multiple of 256 for CopyB2T and CopyT2B if it is
)
.beforeAllSubcases(t => {
const info = kTextureFormatInfo[t.params.format];
t.skipIfTextureFormatNotSupported(t.params.format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {

View file

@ -3,6 +3,7 @@
**/ import {
depthStencilFormatCopyableAspects,
kTextureFormatInfo,
isCompressedTextureFormat,
} from '../../../format_info.js';
import { align } from '../../../util/math.js';
@ -44,6 +45,11 @@ export class ImageCopyTest extends ValidationTest {
break;
}
case 'CopyT2B': {
if (this.isCompatibility && isCompressedTextureFormat(textureCopyView.texture.format)) {
this.skip(
'copyTextureToBuffer is not supported for compressed texture formats in compatibility mode.'
);
}
const buffer = this.device.createBuffer({
size: dataSize,
usage: GPUBufferUsage.COPY_DST,
@ -130,6 +136,11 @@ export class ImageCopyTest extends ValidationTest {
break;
}
case 'CopyT2B': {
if (this.isCompatibility && isCompressedTextureFormat(texture.format)) {
this.skip(
'copyTextureToBuffer is not supported for compressed texture formats in compatibility mode.'
);
}
const { encoder, validateFinish, validateFinishAndSubmit } = this.createEncoder('non-pass');
encoder.copyTextureToBuffer({ texture }, { buffer, ...textureDataLayout }, size);

View file

@ -180,6 +180,7 @@ Test the computation of requiredBytesInCopy by computing the minimum data size f
)
.beforeAllSubcases(t => {
const info = kTextureFormatInfo[t.params.format];
t.skipIfTextureFormatNotSupported(t.params.format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {
@ -252,6 +253,7 @@ Test that rowsPerImage has no alignment constraints.
)
.beforeAllSubcases(t => {
const info = kTextureFormatInfo[t.params.format];
t.skipIfTextureFormatNotSupported(t.params.format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {
@ -294,6 +296,7 @@ Test the alignment requirement on the linear data offset (block size, or 4 for d
)
.beforeAllSubcases(t => {
const info = kTextureFormatInfo[t.params.format];
t.skipIfTextureFormatNotSupported(t.params.format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {
@ -398,6 +401,7 @@ Test that bytesPerRow, if specified must be big enough for a full copy row.
)
.beforeAllSubcases(t => {
const info = kTextureFormatInfo[t.params.format];
t.skipIfTextureFormatNotSupported(t.params.format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {

View file

@ -267,6 +267,7 @@ Test the copy must be a full subresource if the texture's format is depth/stenci
)
.beforeAllSubcases(t => {
const info = kTextureFormatInfo[t.params.format];
t.skipIfTextureFormatNotSupported(t.params.format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {
@ -358,6 +359,7 @@ Test that the texture copy origin must be aligned to the format's block size.
)
.beforeAllSubcases(t => {
const info = kTextureFormatInfo[t.params.format];
t.skipIfTextureFormatNotSupported(t.params.format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {
@ -421,6 +423,7 @@ Test that the copy size must be aligned to the texture's format's block size.
)
.beforeAllSubcases(t => {
const info = kTextureFormatInfo[t.params.format];
t.skipIfTextureFormatNotSupported(t.params.format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {

View file

@ -660,6 +660,7 @@ g.test('destination_texture,format')
)
.beforeAllSubcases(t => {
const { format } = t.params;
t.skipIfTextureFormatNotSupported(format);
t.selectDeviceOrSkipTestCase(kTextureFormatInfo[format].feature);
})
.fn(async t => {

View file

@ -5,7 +5,7 @@ Validation for attachment compatibility between render passes, bundles, and pipe
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { range } from '../../../../common/util/util.js';
import { kTextureSampleCounts, kMaxColorAttachments } from '../../../capability_info.js';
import { kMaxColorAttachmentsToTest, kTextureSampleCounts } from '../../../capability_info.js';
import {
kRegularTextureFormats,
kSizedDepthStencilFormats,
@ -16,7 +16,7 @@ import {
} from '../../../format_info.js';
import { ValidationTest } from '../validation_test.js';
const kColorAttachmentCounts = range(kMaxColorAttachments, i => i + 1);
const kColorAttachmentCounts = range(kMaxColorAttachmentsToTest, i => i + 1);
const kColorAttachments = kColorAttachmentCounts
.map(count => {
// generate cases with 0..1 null attachments at different location
@ -166,6 +166,9 @@ g.test('render_pass_and_bundle,color_format')
)
.fn(t => {
const { passFormat, bundleFormat } = t.params;
t.skipIfTextureFormatNotSupported(passFormat, bundleFormat);
const bundleEncoder = t.device.createRenderBundleEncoder({
colorFormats: [bundleFormat],
});
@ -228,6 +231,18 @@ g.test('render_pass_and_bundle,color_sparse')
)
.fn(t => {
const { passAttachments, bundleAttachments } = t.params;
const maxColorAttachments = t.device.limits.maxColorAttachments;
t.skipIf(
passAttachments.length > maxColorAttachments,
`num passAttachments: ${passAttachments.length} > maxColorAttachments for device: ${maxColorAttachments}`
);
t.skipIf(
bundleAttachments.length > maxColorAttachments,
`num bundleAttachments: ${bundleAttachments.length} > maxColorAttachments for device: ${maxColorAttachments}`
);
const colorFormats = bundleAttachments.map(i => (i ? 'rgba8uint' : null));
const bundleEncoder = t.device.createRenderBundleEncoder({
colorFormats,
@ -351,6 +366,9 @@ Test that color attachment formats in render passes or bundles match the pipelin
)
.fn(t => {
const { encoderType, encoderFormat, pipelineFormat } = t.params;
t.skipIfTextureFormatNotSupported(encoderFormat, pipelineFormat);
const pipeline = t.createRenderPipeline([{ format: pipelineFormat, writeMask: 0 }]);
const { encoder, validateFinishAndSubmit } = t.createEncoder(encoderType, {
@ -409,6 +427,16 @@ Test that each of color attachments in render passes or bundles match that of th
)
.fn(t => {
const { encoderType, encoderAttachments, pipelineAttachments } = t.params;
const maxColorAttachments = t.device.limits.maxColorAttachments;
t.skipIf(
encoderAttachments.length > maxColorAttachments,
`num encoderAttachments: ${encoderAttachments.length} > maxColorAttachments for device: ${maxColorAttachments}`
);
t.skipIf(
pipelineAttachments.length > maxColorAttachments,
`num pipelineAttachments: ${pipelineAttachments.length} > maxColorAttachments for device: ${maxColorAttachments}`
);
const colorTargets = pipelineAttachments.map(i =>
i ? { format: 'rgba8uint', writeMask: 0 } : null

View file

@ -7,9 +7,10 @@ TODO: review for completeness
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { range } from '../../../../common/util/util.js';
import { kMaxColorAttachments, kQueryTypes } from '../../../capability_info.js';
import { kMaxColorAttachmentsToTest, kQueryTypes } from '../../../capability_info.js';
import { GPUConst } from '../../../constants.js';
import {
computeBytesPerSampleFromFormats,
kDepthStencilFormats,
kRenderableColorTextureFormats,
kTextureFormatInfo,
@ -107,7 +108,7 @@ g.test('color_attachments,empty')
)
.paramsSubcasesOnly(u =>
u
.combine('colorAttachments', [
.combine('unclampedColorAttachments', [
[],
[undefined],
[undefined, undefined],
@ -117,7 +118,11 @@ g.test('color_attachments,empty')
.combine('hasDepthStencilAttachment', [false, true])
)
.fn(t => {
const { colorAttachments, hasDepthStencilAttachment } = t.params;
const { unclampedColorAttachments, hasDepthStencilAttachment } = t.params;
const colorAttachments = unclampedColorAttachments.slice(
0,
t.device.limits.maxColorAttachments
);
let isEmptyColorTargets = true;
for (let i = 0; i < colorAttachments.length; i++) {
@ -145,11 +150,15 @@ g.test('color_attachments,limits,maxColorAttachments')
`
)
.paramsSimple([
{ colorAttachmentsCount: 8, _success: true }, // Control case
{ colorAttachmentsCount: 9, _success: false }, // Out of bounds
{ colorAttachmentsCountVariant: { mult: 1, add: 0 }, _success: true }, // Control case
{ colorAttachmentsCountVariant: { mult: 1, add: 1 }, _success: false }, // Out of bounds
])
.fn(t => {
const { colorAttachmentsCount, _success } = t.params;
const { colorAttachmentsCountVariant, _success } = t.params;
const colorAttachmentsCount = t.makeLimitVariant(
'maxColorAttachments',
colorAttachmentsCountVariant
);
const colorAttachments = [];
for (let i = 0; i < colorAttachmentsCount; i++) {
@ -173,13 +182,21 @@ g.test('color_attachments,limits,maxColorAttachmentBytesPerSample,aligned')
.beginSubcases()
.combine(
'attachmentCount',
range(kMaxColorAttachments, i => i + 1)
range(kMaxColorAttachmentsToTest, i => i + 1)
)
)
.beforeAllSubcases(t => {
t.skipIfTextureFormatNotSupported(t.params.format);
})
.fn(t => {
const { format, attachmentCount } = t.params;
const info = kTextureFormatInfo[format];
t.skipIf(
attachmentCount > t.device.limits.maxColorAttachments,
`attachmentCount: ${attachmentCount} > maxColorAttachments: ${t.device.limits.maxColorAttachments}`
);
const colorAttachments = [];
for (let i = 0; i < attachmentCount; i++) {
const colorTexture = t.createTexture({ format });
@ -187,7 +204,7 @@ g.test('color_attachments,limits,maxColorAttachmentBytesPerSample,aligned')
}
const shouldError =
info.colorRender === undefined ||
info.colorRender.byteCost * attachmentCount >
computeBytesPerSampleFromFormats(range(attachmentCount, () => format)) >
t.device.limits.maxColorAttachmentBytesPerSample;
t.tryRenderPass(!shouldError, { colorAttachments });
@ -208,25 +225,30 @@ g.test('color_attachments,limits,maxColorAttachmentBytesPerSample,unaligned')
// is allowed: 4+8+16+1+1 < 32.
{
formats: ['r8unorm', 'r32float', 'rgba8unorm', 'rgba32float', 'r8unorm'],
_success: false,
},
{
formats: ['r32float', 'rgba8unorm', 'rgba32float', 'r8unorm', 'r8unorm'],
_success: true,
},
])
)
.fn(t => {
const { formats, _success } = t.params;
const { formats } = t.params;
t.skipIf(
formats.length > t.device.limits.maxColorAttachments,
`numColorAttachments: ${formats.length} > maxColorAttachments: ${t.device.limits.maxColorAttachments}`
);
const colorAttachments = [];
for (const format of formats) {
const colorTexture = t.createTexture({ format });
colorAttachments.push(t.getColorAttachment(colorTexture));
}
t.tryRenderPass(_success, { colorAttachments });
const success =
computeBytesPerSampleFromFormats(formats) <= t.device.limits.maxColorAttachmentBytesPerSample;
t.tryRenderPass(success, { colorAttachments });
});
g.test('attachments,same_size')
@ -934,6 +956,9 @@ g.test('resolveTarget,format_supports_resolve')
.combine('format', kRenderableColorTextureFormats)
.filter(t => kTextureFormatInfo[t.format].multisample)
)
.beforeAllSubcases(t => {
t.skipIfTextureFormatNotSupported(t.params.format);
})
.fn(t => {
const { format } = t.params;
const info = kTextureFormatInfo[format];

View file

@ -19,6 +19,7 @@ g.test('format')
.beforeAllSubcases(t => {
const { format } = t.params;
const info = kTextureFormatInfo[format];
t.skipIfTextureFormatNotSupported(format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {

View file

@ -5,11 +5,16 @@ This test dedicatedly tests validation of GPUFragmentState of createRenderPipeli
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { range } from '../../../../common/util/util.js';
import { kBlendFactors, kBlendOperations, kMaxColorAttachments } from '../../../capability_info.js';
import {
kBlendFactors,
kBlendOperations,
kMaxColorAttachmentsToTest,
} from '../../../capability_info.js';
import {
kTextureFormats,
kRenderableColorTextureFormats,
kTextureFormatInfo,
computeBytesPerSampleFromFormats,
} from '../../../format_info.js';
import {
getFragmentShaderCodeWithOutput,
@ -51,6 +56,7 @@ g.test('targets_format_renderable')
.beforeAllSubcases(t => {
const { format } = t.params;
const info = kTextureFormatInfo[format];
t.skipIfTextureFormatNotSupported(t.params.format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {
@ -66,16 +72,27 @@ g.test('limits,maxColorAttachments')
.desc(
`Tests that color state targets length must not be larger than device.limits.maxColorAttachments.`
)
.params(u => u.combine('isAsync', [false, true]).combine('targetsLength', [8, 9]))
.params(u =>
u.combine('isAsync', [false, true]).combine('targetsLengthVariant', [
{ mult: 1, add: 0 },
{ mult: 1, add: 1 },
])
)
.fn(t => {
const { isAsync, targetsLength } = t.params;
const { isAsync, targetsLengthVariant } = t.params;
const targetsLength = t.makeLimitVariant('maxColorAttachments', targetsLengthVariant);
const descriptor = t.getDescriptor({
targets: range(targetsLength, i => {
// Set writeMask to 0 for attachments without fragment output
return { format: 'rg8unorm', writeMask: i === 0 ? 0xf : 0 };
return { format: 'rg8unorm', writeMask: 0 };
}),
fragmentShaderCode: kDefaultFragmentShaderCode,
// add a depth stencil so that we can set writeMask to 0 for all color attachments
depthStencil: {
format: 'depth24plus',
depthWriteEnabled: true,
depthCompare: 'always',
},
});
t.doCreateRenderPipelineTest(
@ -98,14 +115,22 @@ g.test('limits,maxColorAttachmentBytesPerSample,aligned')
.beginSubcases()
.combine(
'attachmentCount',
range(kMaxColorAttachments, i => i + 1)
range(kMaxColorAttachmentsToTest, i => i + 1)
)
.combine('isAsync', [false, true])
)
.beforeAllSubcases(t => {
t.skipIfTextureFormatNotSupported(t.params.format);
})
.fn(t => {
const { format, attachmentCount, isAsync } = t.params;
const info = kTextureFormatInfo[format];
t.skipIf(
attachmentCount > t.device.limits.maxColorAttachments,
`attachmentCount: ${attachmentCount} > maxColorAttachments: ${t.device.limits.maxColorAttachments}`
);
const descriptor = t.getDescriptor({
targets: range(attachmentCount, () => {
return { format, writeMask: 0 };
@ -135,20 +160,24 @@ g.test('limits,maxColorAttachmentBytesPerSample,unaligned')
// is allowed: 4+8+16+1+1 < 32.
{
formats: ['r8unorm', 'r32float', 'rgba8unorm', 'rgba32float', 'r8unorm'],
_success: false,
},
{
formats: ['r32float', 'rgba8unorm', 'rgba32float', 'r8unorm', 'r8unorm'],
_success: true,
},
])
.beginSubcases()
.combine('isAsync', [false, true])
)
.fn(t => {
const { formats, _success, isAsync } = t.params;
const { formats, isAsync } = t.params;
t.skipIf(
formats.length > t.device.limits.maxColorAttachments,
`numColorAttachments: ${formats.length} > maxColorAttachments: ${t.device.limits.maxColorAttachments}`
);
const success =
computeBytesPerSampleFromFormats(formats) <= t.device.limits.maxColorAttachmentBytesPerSample;
const descriptor = t.getDescriptor({
targets: formats.map(f => {
@ -156,7 +185,7 @@ g.test('limits,maxColorAttachmentBytesPerSample,unaligned')
}),
});
t.doCreateRenderPipelineTest(isAsync, _success, descriptor);
t.doCreateRenderPipelineTest(isAsync, success, descriptor);
});
g.test('targets_format_filterable')
@ -176,6 +205,7 @@ g.test('targets_format_filterable')
.beforeAllSubcases(t => {
const { format } = t.params;
const info = kTextureFormatInfo[format];
t.skipIfTextureFormatNotSupported(format);
t.selectDeviceOrSkipTestCase(info.feature);
})
.fn(t => {

View file

@ -61,6 +61,10 @@ g.test('alpha_to_coverage,sample_mask')
.fn(t => {
const { isAsync, alphaToCoverageEnabled, hasSampleMaskOutput } = t.params;
if (t.isCompatibility && hasSampleMaskOutput) {
t.skip('WGSL sample_mask is not supported in compatibility mode');
}
const descriptor = t.getDescriptor({
multisample: { alphaToCoverageEnabled, count: 4 },
fragmentShaderCode: hasSampleMaskOutput

View file

@ -5,12 +5,10 @@ This test dedicatedly tests validation of GPUVertexState of createRenderPipeline
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import {
kMaxVertexAttributes,
kMaxVertexBufferArrayStride,
kMaxVertexBuffers,
kVertexFormats,
kVertexFormatInfo,
} from '../../../capability_info.js';
filterUniqueValueTestVariants,
makeValueTestVariant,
} from '../../../../common/util/util.js';
import { kVertexFormats, kVertexFormatInfo } from '../../../capability_info.js';
import { ValidationTest } from '../validation_test.js';
const VERTEX_SHADER_CODE_WITH_NO_INPUT = `
@ -138,12 +136,17 @@ g.test('max_vertex_buffer_limit')
)
.paramsSubcasesOnly(u =>
u //
.combine('count', [0, 1, kMaxVertexBuffers, kMaxVertexBuffers + 1])
.combine('countVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 1, add: 0 },
{ mult: 1, add: 1 },
])
.combine('lastEmpty', [false, true])
)
.fn(t => {
const { count, lastEmpty } = t.params;
const { countVariant, lastEmpty } = t.params;
const count = t.makeLimitVariant('maxVertexBuffers', countVariant);
const vertexBuffers = [];
for (let i = 0; i < count; i++) {
if (lastEmpty || i !== count - 1) {
@ -156,7 +159,7 @@ g.test('max_vertex_buffer_limit')
}
}
const success = count <= kMaxVertexBuffers;
const success = count <= t.device.limits.maxVertexBuffers;
t.testVertexState(success, vertexBuffers);
});
@ -168,11 +171,17 @@ g.test('max_vertex_attribute_limit')
)
.paramsSubcasesOnly(u =>
u //
.combine('attribCount', [0, 1, kMaxVertexAttributes, kMaxVertexAttributes + 1])
.combine('attribCountVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 1, add: 0 },
{ mult: 1, add: 1 },
])
.combine('attribsPerBuffer', [0, 1, 4])
)
.fn(t => {
const { attribCount, attribsPerBuffer } = t.params;
const { attribCountVariant, attribsPerBuffer } = t.params;
const attribCount = t.makeLimitVariant('maxVertexAttributes', attribCountVariant);
const vertexBuffers = [];
@ -180,7 +189,7 @@ g.test('max_vertex_attribute_limit')
while (attribsAdded !== attribCount) {
// Choose how many attributes to add for this buffer. The last buffer gets all remaining attributes.
let targetCount = Math.min(attribCount, attribsAdded + attribsPerBuffer);
if (vertexBuffers.length === kMaxVertexBuffers - 1) {
if (vertexBuffers.length === t.device.limits.maxVertexBuffers - 1) {
targetCount = attribCount;
}
@ -193,7 +202,7 @@ g.test('max_vertex_attribute_limit')
vertexBuffers.push({ arrayStride: 0, attributes });
}
const success = attribCount <= kMaxVertexAttributes;
const success = attribCount <= t.device.limits.maxVertexAttributes;
t.testVertexState(success, vertexBuffers);
});
@ -205,23 +214,28 @@ g.test('max_vertex_buffer_array_stride_limit')
)
.paramsSubcasesOnly(u =>
u //
.combine('vertexBufferIndex', [0, 1, kMaxVertexBuffers - 1])
.combine('arrayStride', [
0,
4,
256,
kMaxVertexBufferArrayStride - 4,
kMaxVertexBufferArrayStride,
kMaxVertexBufferArrayStride + 4,
.combine('vertexBufferIndexVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
])
.combine('arrayStrideVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 4 },
{ mult: 0, add: 256 },
{ mult: 1, add: -4 },
{ mult: 1, add: 0 },
{ mult: 1, add: +4 },
])
)
.fn(t => {
const { vertexBufferIndex, arrayStride } = t.params;
const { vertexBufferIndexVariant, arrayStrideVariant } = t.params;
const vertexBufferIndex = t.makeLimitVariant('maxVertexBuffers', vertexBufferIndexVariant);
const arrayStride = t.makeLimitVariant('maxVertexBufferArrayStride', arrayStrideVariant);
const vertexBuffers = [];
vertexBuffers[vertexBufferIndex] = { arrayStride, attributes: [] };
const success = arrayStride <= kMaxVertexBufferArrayStride;
const success = arrayStride <= t.device.limits.maxVertexBufferArrayStride;
t.testVertexState(success, vertexBuffers);
});
@ -233,19 +247,25 @@ g.test('vertex_buffer_array_stride_limit_alignment')
)
.paramsSubcasesOnly(u =>
u //
.combine('vertexBufferIndex', [0, 1, kMaxVertexBuffers - 1])
.combine('arrayStride', [
0,
1,
2,
4,
kMaxVertexBufferArrayStride - 4,
kMaxVertexBufferArrayStride - 2,
kMaxVertexBufferArrayStride,
.combine('vertexBufferIndexVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
])
.combine('arrayStrideVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 0, add: 2 },
{ mult: 0, add: 4 },
{ mult: 1, add: -4 },
{ mult: 1, add: -2 },
{ mult: 1, add: 0 },
])
)
.fn(t => {
const { vertexBufferIndex, arrayStride } = t.params;
const { vertexBufferIndexVariant, arrayStrideVariant } = t.params;
const vertexBufferIndex = t.makeLimitVariant('maxVertexBuffers', vertexBufferIndexVariant);
const arrayStride = t.makeLimitVariant('maxVertexBufferArrayStride', arrayStrideVariant);
const vertexBuffers = [];
vertexBuffers[vertexBufferIndex] = { arrayStride, attributes: [] };
@ -263,18 +283,38 @@ g.test('vertex_attribute_shaderLocation_limit')
)
.paramsSubcasesOnly(u =>
u //
.combine('vertexBufferIndex', [0, 1, kMaxVertexBuffers - 1])
.combine('extraAttributeCount', [0, 1, kMaxVertexAttributes - 1])
.combine('vertexBufferIndexVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
])
.combine('extraAttributeCountVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
])
.combine('testAttributeAtStart', [false, true])
.combine('testShaderLocation', [0, 1, kMaxVertexAttributes - 1, kMaxVertexAttributes])
.combine('testShaderLocationVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
{ mult: 1, add: 0 },
])
)
.fn(t => {
const {
vertexBufferIndex,
extraAttributeCount,
testShaderLocation,
vertexBufferIndexVariant,
extraAttributeCountVariant,
testShaderLocationVariant,
testAttributeAtStart,
} = t.params;
const vertexBufferIndex = t.makeLimitVariant('maxVertexBuffers', vertexBufferIndexVariant);
const extraAttributeCount = t.makeLimitVariant(
'maxVertexAttributes',
extraAttributeCountVariant
);
const testShaderLocation = t.makeLimitVariant('maxVertexAttributes', testShaderLocationVariant);
const attributes = [];
addTestAttributes(attributes, {
@ -287,7 +327,7 @@ g.test('vertex_attribute_shaderLocation_limit')
const vertexBuffers = [];
vertexBuffers[vertexBufferIndex] = { arrayStride: 256, attributes };
const success = testShaderLocation < kMaxVertexAttributes;
const success = testShaderLocation < t.device.limits.maxVertexAttributes;
t.testVertexState(success, vertexBuffers);
});
@ -300,24 +340,46 @@ g.test('vertex_attribute_shaderLocation_unique')
)
.paramsSubcasesOnly(u =>
u //
.combine('vertexBufferIndexA', [0, 1, kMaxVertexBuffers - 1])
.combine('vertexBufferIndexB', [0, 1, kMaxVertexBuffers - 1])
.combine('vertexBufferIndexAVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
])
.combine('vertexBufferIndexBVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
])
.combine('testAttributeAtStartA', [false, true])
.combine('testAttributeAtStartB', [false, true])
.combine('shaderLocationA', [0, 1, 7, kMaxVertexAttributes - 1])
.combine('shaderLocationB', [0, 1, 7, kMaxVertexAttributes - 1])
.combine('shaderLocationAVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 0, add: 7 },
{ mult: 1, add: -1 },
])
.combine('shaderLocationBVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 0, add: 7 },
{ mult: 1, add: -1 },
])
.combine('extraAttributeCount', [0, 4])
)
.fn(t => {
const {
vertexBufferIndexA,
vertexBufferIndexB,
vertexBufferIndexAVariant,
vertexBufferIndexBVariant,
testAttributeAtStartA,
testAttributeAtStartB,
shaderLocationA,
shaderLocationB,
shaderLocationAVariant,
shaderLocationBVariant,
extraAttributeCount,
} = t.params;
const vertexBufferIndexA = t.makeLimitVariant('maxVertexBuffers', vertexBufferIndexAVariant);
const vertexBufferIndexB = t.makeLimitVariant('maxVertexBuffers', vertexBufferIndexBVariant);
const shaderLocationA = t.makeLimitVariant('maxVertexAttributes', shaderLocationAVariant);
const shaderLocationB = t.makeLimitVariant('maxVertexAttributes', shaderLocationBVariant);
// Depending on the params, the vertexBuffer for A and B can be the same or different. To support
// both cases without code changes we treat `vertexBufferAttributes` as a map from indices to
@ -363,10 +425,17 @@ g.test('vertex_shader_input_location_limit')
)
.paramsSubcasesOnly(u =>
u //
.combine('testLocation', [0, 1, kMaxVertexAttributes - 1, kMaxVertexAttributes, 2 ** 31 - 1])
.combine('testLocationVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
{ mult: 1, add: 0 },
{ mult: 0, add: 2 ** 31 - 1 },
])
)
.fn(t => {
const { testLocation } = t.params;
const { testLocationVariant } = t.params;
const testLocation = t.makeLimitVariant('maxVertexAttributes', testLocationVariant);
const shader = t.generateTestVertexShader([
{
@ -388,7 +457,7 @@ g.test('vertex_shader_input_location_limit')
},
];
const success = testLocation < kMaxVertexAttributes;
const success = testLocation < t.device.limits.maxVertexAttributes;
t.testVertexState(success, vertexBuffers, shader);
});
@ -400,18 +469,39 @@ g.test('vertex_shader_input_location_in_vertex_state')
)
.paramsSubcasesOnly(u =>
u //
.combine('vertexBufferIndex', [0, 1, kMaxVertexBuffers - 1])
.combine('extraAttributeCount', [0, 1, kMaxVertexAttributes - 1])
.combine('vertexBufferIndexVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
])
.combine('extraAttributeCountVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
])
.combine('testAttributeAtStart', [false, true])
.combine('testShaderLocation', [0, 1, 4, 7, kMaxVertexAttributes - 1])
.combine('testShaderLocationVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 0, add: 4 },
{ mult: 0, add: 5 },
{ mult: 1, add: -1 },
])
)
.fn(t => {
const {
vertexBufferIndex,
extraAttributeCount,
vertexBufferIndexVariant,
extraAttributeCountVariant,
testAttributeAtStart,
testShaderLocation,
testShaderLocationVariant,
} = t.params;
const vertexBufferIndex = t.makeLimitVariant('maxVertexBuffers', vertexBufferIndexVariant);
const extraAttributeCount = t.makeLimitVariant(
'maxVertexAttributes',
extraAttributeCountVariant
);
const testShaderLocation = t.makeLimitVariant('maxVertexAttributes', testShaderLocationVariant);
// We have a shader using `testShaderLocation`.
const shader = t.generateTestVertexShader([
{
@ -502,37 +592,55 @@ g.test('vertex_attribute_offset_alignment')
.params(u =>
u
.combine('format', kVertexFormats)
.combine('arrayStride', [256, kMaxVertexBufferArrayStride])
.expand('offset', p => {
.combine('arrayStrideVariant', [
{ mult: 0, add: 256 },
{ mult: 1, add: 0 },
])
.expand('offsetVariant', p => {
const { bytesPerComponent, componentCount } = kVertexFormatInfo[p.format];
const formatSize = bytesPerComponent * componentCount;
return new Set([
0,
Math.floor(formatSize / 2),
formatSize,
2,
4,
p.arrayStride - formatSize,
p.arrayStride - formatSize - Math.floor(formatSize / 2),
p.arrayStride - formatSize - 4,
p.arrayStride - formatSize - 2,
return filterUniqueValueTestVariants([
{ mult: 0, add: 0 },
{ mult: 0, add: Math.floor(formatSize / 2) },
{ mult: 0, add: formatSize },
{ mult: 0, add: 2 },
{ mult: 0, add: 4 },
{ mult: 1, add: -formatSize },
{ mult: 1, add: -formatSize - Math.floor(formatSize / 2) },
{ mult: 1, add: -formatSize - 4 },
{ mult: 1, add: -formatSize - 2 },
]);
})
.beginSubcases()
.combine('vertexBufferIndex', [0, 1, kMaxVertexBuffers - 1])
.combine('extraAttributeCount', [0, 1, kMaxVertexAttributes - 1])
.combine('vertexBufferIndexVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
])
.combine('extraAttributeCountVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
])
.combine('testAttributeAtStart', [false, true])
)
.fn(t => {
const {
format,
arrayStride,
offset,
vertexBufferIndex,
extraAttributeCount,
arrayStrideVariant,
offsetVariant,
vertexBufferIndexVariant,
extraAttributeCountVariant,
testAttributeAtStart,
} = t.params;
const arrayStride = t.makeLimitVariant('maxVertexBufferArrayStride', arrayStrideVariant);
const vertexBufferIndex = t.makeLimitVariant('maxVertexBuffers', vertexBufferIndexVariant);
const extraAttributeCount = t.makeLimitVariant(
'maxVertexAttributes',
extraAttributeCountVariant
);
const offset = makeValueTestVariant(arrayStride, offsetVariant);
const attributes = [];
addTestAttributes(attributes, {
@ -565,46 +673,61 @@ g.test('vertex_attribute_contained_in_stride')
u
.combine('format', kVertexFormats)
.beginSubcases()
.combine('arrayStride', [
0,
256,
kMaxVertexBufferArrayStride - 4,
kMaxVertexBufferArrayStride,
.combine('arrayStrideVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 256 },
{ mult: 1, add: -4 },
{ mult: 1, add: 0 },
])
.expand('offset', function* (p) {
.expand('offsetVariant', function* (p) {
// Compute a bunch of test offsets to test.
const { bytesPerComponent, componentCount } = kVertexFormatInfo[p.format];
const formatSize = bytesPerComponent * componentCount;
yield 0;
yield 4;
// arrayStride = 0 is a special case because for the offset validation it acts the same
// as arrayStride = kMaxVertexBufferArrayStride. We special case here so as to avoid adding
// negative offsets that would cause an IDL exception to be thrown instead of a validation
// error.
const stride = p.arrayStride !== 0 ? p.arrayStride : kMaxVertexBufferArrayStride;
yield stride - formatSize;
yield stride - formatSize + 4;
yield { mult: 0, add: 0 };
yield { mult: 0, add: 4 };
yield { mult: 1, add: -formatSize };
yield { mult: 1, add: -formatSize + 4 };
// Avoid adding duplicate cases when formatSize == 4 (it is already tested above)
if (formatSize !== 4) {
yield formatSize;
yield stride;
yield { mult: 0, add: formatSize };
yield { mult: 1, add: 0 };
}
})
.combine('vertexBufferIndex', [0, 1, kMaxVertexBuffers - 1])
.combine('extraAttributeCount', [0, 1, kMaxVertexAttributes - 1])
.combine('vertexBufferIndexVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
])
.combine('extraAttributeCountVariant', [
{ mult: 0, add: 0 },
{ mult: 0, add: 1 },
{ mult: 1, add: -1 },
])
.combine('testAttributeAtStart', [false, true])
)
.fn(t => {
const {
format,
arrayStride,
offset,
vertexBufferIndex,
extraAttributeCount,
arrayStrideVariant,
offsetVariant,
vertexBufferIndexVariant,
extraAttributeCountVariant,
testAttributeAtStart,
} = t.params;
const arrayStride = t.makeLimitVariant('maxVertexBufferArrayStride', arrayStrideVariant);
const vertexBufferIndex = t.makeLimitVariant('maxVertexBuffers', vertexBufferIndexVariant);
const extraAttributeCount = t.makeLimitVariant(
'maxVertexAttributes',
extraAttributeCountVariant
);
// arrayStride = 0 is a special case because for the offset validation it acts the same
// as arrayStride = device.limits.maxVertexBufferArrayStride. We special case here so as to avoid adding
// negative offsets that would cause an IDL exception to be thrown instead of a validation
// error.
const stride = arrayStride !== 0 ? arrayStride : t.device.limits.maxVertexBufferArrayStride;
const offset = makeValueTestVariant(stride, offsetVariant);
const attributes = [];
addTestAttributes(attributes, {
@ -619,7 +742,7 @@ g.test('vertex_attribute_contained_in_stride')
const formatInfo = kVertexFormatInfo[format];
const formatSize = formatInfo.bytesPerComponent * formatInfo.componentCount;
const limit = arrayStride === 0 ? kMaxVertexBufferArrayStride : arrayStride;
const limit = arrayStride === 0 ? t.device.limits.maxVertexBufferArrayStride : arrayStride;
const success = offset + formatSize <= limit;
t.testVertexState(success, vertexBuffers);
@ -631,7 +754,7 @@ g.test('many_attributes_overlapping')
// Create many attributes, each of them intersects with at least 3 others.
const attributes = [];
const formats = ['float32x4', 'uint32x4', 'sint32x4'];
for (let i = 0; i < kMaxVertexAttributes; i++) {
for (let i = 0; i < t.device.limits.maxVertexAttributes; i++) {
attributes.push({ format: formats[i % 3], offset: i * 4, shaderLocation: i });
}

View file

@ -412,6 +412,17 @@ g.test('subresources_and_binding_types_combination_for_color')
arrayLayerCount: layerCount1,
});
const viewsAreSame =
dimension0 === dimension1 &&
layerCount0 === layerCount1 &&
BASE_LEVEL === baseLevel1 &&
levelCount0 === levelCount1 &&
BASE_LAYER === baseLayer1 &&
layerCount0 === layerCount1;
if (!viewsAreSame && t.isCompatibility) {
t.skip('different views of same texture are not supported in compatibility mode');
}
const encoder = t.device.createCommandEncoder();
if (type0 === 'render-target') {
// Note that type1 is 'render-target' too. So we don't need to create bindings.

View file

@ -6,6 +6,7 @@ Tests for device lost induced via destroy.
- After device destruction, runs the same APIs. No expected observable results, so test crash or future failures are the only current failure indicators.
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { assert } from '../../../../../common/util/util.js';
import {
allBindingEntries,
bindingTypeInfo,
@ -109,7 +110,6 @@ Tests creating buffers on destroyed device. Tests valid combinations of:
.params(u =>
u
.combine('usageType', kBufferUsageKeys)
.beginSubcases()
.combine('usageCopy', kBufferUsageCopy)
.combine('awaitLost', [true, false])
.filter(({ usageType, usageCopy }) => {
@ -148,7 +148,6 @@ Tests creating 2d uncompressed textures on destroyed device. Tests valid combina
.params(u =>
u
.combine('format', kRegularTextureFormats)
.beginSubcases()
.combine('usageType', kTextureUsageType)
.combine('usageCopy', kTextureUsageCopy)
.combine('awaitLost', [true, false])
@ -160,6 +159,9 @@ Tests creating 2d uncompressed textures on destroyed device. Tests valid combina
);
})
)
.beforeAllSubcases(t => {
t.skipIfTextureFormatNotSupported(t.params.format);
})
.fn(async t => {
const { awaitLost, format, usageType, usageCopy } = t.params;
const { blockWidth, blockHeight } = kTextureFormatInfo[format];
@ -183,7 +185,6 @@ Tests creating 2d compressed textures on destroyed device. Tests valid combinati
.params(u =>
u
.combine('format', kCompressedTextureFormats)
.beginSubcases()
.combine('usageType', kTextureUsageType)
.combine('usageCopy', kTextureUsageCopy)
.combine('awaitLost', [true, false])
@ -222,7 +223,6 @@ Tests creating texture views on 2d uncompressed textures from destroyed device.
.params(u =>
u
.combine('format', kRegularTextureFormats)
.beginSubcases()
.combine('usageType', kTextureUsageType)
.combine('usageCopy', kTextureUsageCopy)
.combine('awaitLost', [true, false])
@ -234,6 +234,9 @@ Tests creating texture views on 2d uncompressed textures from destroyed device.
);
})
)
.beforeAllSubcases(t => {
t.skipIfTextureFormatNotSupported(t.params.format);
})
.fn(async t => {
const { awaitLost, format, usageType, usageCopy } = t.params;
const { blockWidth, blockHeight } = kTextureFormatInfo[format];
@ -258,7 +261,6 @@ Tests creating texture views on 2d compressed textures from destroyed device. Te
.params(u =>
u
.combine('format', kCompressedTextureFormats)
.beginSubcases()
.combine('usageType', kTextureUsageType)
.combine('usageCopy', kTextureUsageCopy)
.combine('awaitLost', [true, false])
@ -293,7 +295,7 @@ g.test('createSampler')
Tests creating samplers on destroyed device.
`
)
.params(u => u.beginSubcases().combine('awaitLost', [true, false]))
.params(u => u.combine('awaitLost', [true, false]))
.fn(async t => {
const { awaitLost } = t.params;
await t.executeAfterDestroy(() => {
@ -309,9 +311,7 @@ Tests creating bind group layouts on destroyed device. Tests valid combinations
- Maximum set of visibility for each binding entry
`
)
.params(u =>
u.combine('entry', allBindingEntries(false)).beginSubcases().combine('awaitLost', [true, false])
)
.params(u => u.combine('entry', allBindingEntries(false)).combine('awaitLost', [true, false]))
.fn(async t => {
const { awaitLost, entry } = t.params;
const visibility = bindingTypeInfo(entry).validStages;
@ -349,7 +349,6 @@ Tests creating bind group on destroyed device. Tests valid combinations of:
return info.resource === resourceType;
}
})
.beginSubcases()
.combine('awaitLost', [true, false])
)
.fn(async t => {
@ -372,9 +371,7 @@ Tests creating pipeline layouts on destroyed device. Tests valid combinations of
- Maximum set of visibility for each binding entry
`
)
.params(u =>
u.combine('entry', allBindingEntries(false)).beginSubcases().combine('awaitLost', [true, false])
)
.params(u => u.combine('entry', allBindingEntries(false)).combine('awaitLost', [true, false]))
.fn(async t => {
const { awaitLost, entry } = t.params;
const visibility = bindingTypeInfo(entry).validStages;
@ -395,9 +392,7 @@ Tests creating shader modules on destroyed device.
- Tests all shader stages: vertex, fragment, compute
`
)
.params(u =>
u.combine('stage', kShaderStageKeys).beginSubcases().combine('awaitLost', [true, false])
)
.params(u => u.combine('stage', kShaderStageKeys).combine('awaitLost', [true, false]))
.fn(async t => {
const { awaitLost, stage } = t.params;
await t.executeAfterDestroy(() => {
@ -412,7 +407,7 @@ Tests creating compute pipeline on destroyed device.
- Tests with a valid no-op compute shader
`
)
.params(u => u.beginSubcases().combine('awaitLost', [true, false]))
.params(u => u.combine('awaitLost', [true, false]))
.fn(async t => {
const { awaitLost } = t.params;
const cShader = t.device.createShaderModule({ code: t.getNoOpShaderCode('COMPUTE') });
@ -431,7 +426,7 @@ Tests creating render pipeline on destroyed device.
- Tests with valid no-op vertex and fragment shaders
`
)
.params(u => u.beginSubcases().combine('awaitLost', [true, false]))
.params(u => u.combine('awaitLost', [true, false]))
.fn(async t => {
const { awaitLost } = t.params;
const vShader = t.device.createShaderModule({ code: t.getNoOpShaderCode('VERTEX') });
@ -449,13 +444,177 @@ Tests creating render pipeline on destroyed device.
}, awaitLost);
});
g.test('createComputePipelineAsync')
.desc(
`
Tests creating a pipeline asynchronously while destroying the device and on a destroyed device
- valid={true, false}, use an invalid or valid pipeline descriptor
- awaitLost={true, false}, check results before/after waiting for the device lost promise
`
)
.params(u => u.combine('valid', [true, false]).combine('awaitLost', [true, false]))
.fn(async t => {
const { valid, awaitLost } = t.params;
const cShader = t.device.createShaderModule({ code: t.getNoOpShaderCode('COMPUTE') });
const fn = () =>
t.device.createComputePipelineAsync({
layout: 'auto',
compute: { module: cShader, entryPoint: valid ? 'main' : 'does_not_exist' },
});
// Kick off async creation
const p = fn();
// Track whether or not the device is lost.
let isLost = false;
void t.device.lost.then(() => {
isLost = true;
});
if (valid) {
// The async creation should resolve successfully.
t.shouldResolve(
(async () => {
const pipeline = await p;
assert(pipeline instanceof GPUComputePipeline, 'Pipeline was not a GPUComputePipeline');
})()
);
} else {
// The async creation should resolve successfully if the device is lost.
// If the device is not lost, it should see a validation error.
// Note: this could be a race!
t.shouldResolve(
p.then(
pipeline => {
assert(
isLost,
'Invalid async creation should "succeed" if the device is already lost.'
);
assert(pipeline instanceof GPUComputePipeline, 'Pipeline was not a GPUComputePipeline');
},
err => {
assert(
!isLost,
'Invalid async creation should only fail if the device is not yet lost.'
);
assert(err instanceof GPUPipelineError, 'Error was not a GPUPipelineError');
assert(err.reason === 'validation', 'Expected validation error');
}
)
);
}
// Destroy the device, and expect it to be lost.
t.expectDeviceLost('destroyed');
t.device.destroy();
if (awaitLost) {
const lostInfo = await t.device.lost;
t.expect(lostInfo.reason === 'destroyed');
}
// After device destroy, creation should still resolve successfully.
t.shouldResolve(
(async () => {
const pipeline = await fn();
assert(pipeline instanceof GPUComputePipeline, 'Pipeline was not a GPUComputePipeline');
})()
);
});
g.test('createRenderPipelineAsync')
.desc(
`
Tests creating a pipeline asynchronously while destroying the device and on a destroyed device
- valid={true, false}, use an invalid or valid pipeline descriptor
- awaitLost={true, false}, check results before/after waiting for the device lost promise
`
)
.params(u => u.combine('valid', [true, false]).combine('awaitLost', [true, false]))
.fn(async t => {
const { valid, awaitLost } = t.params;
const vShader = t.device.createShaderModule({ code: t.getNoOpShaderCode('VERTEX') });
const fShader = t.device.createShaderModule({ code: t.getNoOpShaderCode('FRAGMENT') });
const fn = () =>
t.device.createRenderPipelineAsync({
layout: 'auto',
vertex: { module: vShader, entryPoint: 'main' },
fragment: {
module: fShader,
entryPoint: valid ? 'main' : 'does_not_exist',
targets: [{ format: 'rgba8unorm', writeMask: 0 }],
},
});
// Kick off async creation
const p = fn();
// Track whether or not the device is lost.
let isLost = false;
void t.device.lost.then(() => {
isLost = true;
});
if (valid) {
// The async creation should resolve successfully.
t.shouldResolve(
(async () => {
const pipeline = await p;
assert(pipeline instanceof GPURenderPipeline, 'Pipeline was not a GPURenderPipeline');
})()
);
} else {
// The async creation should resolve successfully if the device is lost.
// If the device is not lost, it should see a validation error.
// Note: this could be a race!
t.shouldResolve(
p.then(
pipeline => {
assert(
isLost,
'Invalid async creation should "succeed" if the device is already lost.'
);
assert(pipeline instanceof GPURenderPipeline, 'Pipeline was not a GPURenderPipeline');
},
err => {
assert(
!isLost,
'Invalid async creation should only fail if the device is not yet lost.'
);
assert(err instanceof GPUPipelineError, 'Error was not a GPUPipelineError');
assert(err.reason === 'validation', 'Expected validation error');
}
)
);
}
// Destroy the device, and expect it to be lost.
t.expectDeviceLost('destroyed');
t.device.destroy();
if (awaitLost) {
const lostInfo = await t.device.lost;
t.expect(lostInfo.reason === 'destroyed');
}
// After device destroy, creation should still resolve successfully.
t.shouldResolve(
(async () => {
const pipeline = await fn();
assert(pipeline instanceof GPURenderPipeline, 'Pipeline was not a GPURenderPipeline');
})()
);
});
g.test('createCommandEncoder')
.desc(
`
Tests creating command encoders on destroyed device.
`
)
.params(u => u.beginSubcases().combine('awaitLost', [true, false]))
.params(u => u.combine('awaitLost', [true, false]))
.fn(async t => {
const { awaitLost } = t.params;
await t.executeAfterDestroy(() => {
@ -471,10 +630,7 @@ Tests creating render bundle encoders on destroyed device.
`
)
.params(u =>
u
.combine('format', kRenderableColorTextureFormats)
.beginSubcases()
.combine('awaitLost', [true, false])
u.combine('format', kRenderableColorTextureFormats).combine('awaitLost', [true, false])
)
.fn(async t => {
const { awaitLost, format } = t.params;
@ -490,7 +646,7 @@ Tests creating query sets on destroyed device.
- Tests various query set types
`
)
.params(u => u.combine('type', kQueryTypes).beginSubcases().combine('awaitLost', [true, false]))
.params(u => u.combine('type', kQueryTypes).combine('awaitLost', [true, false]))
.beforeAllSubcases(t => {
const { type } = t.params;
t.selectDeviceForQueryTypeOrSkipTestCase(type);
@ -510,10 +666,7 @@ Tests import external texture on destroyed device. Tests valid combinations of:
`
)
.params(u =>
u
.combine('sourceType', ['VideoElement', 'VideoFrame'])
.beginSubcases()
.combine('awaitLost', [true, false])
u.combine('sourceType', ['VideoElement', 'VideoFrame']).combine('awaitLost', [true, false])
)
.fn(async t => {
const { awaitLost, sourceType } = t.params;
@ -554,9 +707,7 @@ g.test('command,copyBufferToBuffer')
Tests copyBufferToBuffer command with various uncompressed formats on destroyed device.
`
)
.params(u =>
u.beginSubcases().combine('stage', kCommandValidationStages).combine('awaitLost', [true, false])
)
.params(u => u.combine('stage', kCommandValidationStages).combine('awaitLost', [true, false]))
.fn(async t => {
const { stage, awaitLost } = t.params;
const kBufferSize = 16;
@ -582,9 +733,7 @@ Tests copyBufferToTexture command on destroyed device.
- Tests submitting command on destroyed device
`
)
.params(u =>
u.beginSubcases().combine('stage', kCommandValidationStages).combine('awaitLost', [true, false])
)
.params(u => u.combine('stage', kCommandValidationStages).combine('awaitLost', [true, false]))
.fn(async t => {
const { stage, awaitLost } = t.params;
const format = 'rgba32uint';
@ -621,9 +770,7 @@ Tests copyTextureToBuffer command on destroyed device.
- Tests submitting command on destroyed device
`
)
.params(u =>
u.beginSubcases().combine('stage', kCommandValidationStages).combine('awaitLost', [true, false])
)
.params(u => u.combine('stage', kCommandValidationStages).combine('awaitLost', [true, false]))
.fn(async t => {
const { stage, awaitLost } = t.params;
const format = 'rgba32uint';
@ -660,9 +807,7 @@ Tests copyTextureToTexture command on destroyed device.
- Tests submitting command on destroyed device
`
)
.params(u =>
u.beginSubcases().combine('stage', kCommandValidationStages).combine('awaitLost', [true, false])
)
.params(u => u.combine('stage', kCommandValidationStages).combine('awaitLost', [true, false]))
.fn(async t => {
const { stage, awaitLost } = t.params;
const format = 'rgba32uint';
@ -696,9 +841,7 @@ Tests encoding and finishing a clearBuffer command on destroyed device.
- Tests submitting command on destroyed device
`
)
.params(u =>
u.beginSubcases().combine('stage', kCommandValidationStages).combine('awaitLost', [true, false])
)
.params(u => u.combine('stage', kCommandValidationStages).combine('awaitLost', [true, false]))
.fn(async t => {
const { stage, awaitLost } = t.params;
const kBufferSize = 16;
@ -723,7 +866,6 @@ Tests encoding and finishing a writeTimestamp command on destroyed device.
.params(u =>
u
.combine('type', kQueryTypes)
.beginSubcases()
.combine('stage', kCommandValidationStages)
.combine('awaitLost', [true, false])
)
@ -755,9 +897,7 @@ Tests encoding and finishing a resolveQuerySet command on destroyed device.
- Tests submitting command on destroyed device
`
)
.params(u =>
u.beginSubcases().combine('stage', kCommandValidationStages).combine('awaitLost', [true, false])
)
.params(u => u.combine('stage', kCommandValidationStages).combine('awaitLost', [true, false]))
.fn(async t => {
const { stage, awaitLost } = t.params;
const kQueryCount = 2;
@ -781,9 +921,7 @@ Tests encoding and dispatching a simple valid compute pass on destroyed device.
- Tests submitting command on destroyed device
`
)
.params(u =>
u.beginSubcases().combine('stage', kCommandValidationStages).combine('awaitLost', [true, false])
)
.params(u => u.combine('stage', kCommandValidationStages).combine('awaitLost', [true, false]))
.fn(async t => {
const { stage, awaitLost } = t.params;
const cShader = t.device.createShaderModule({ code: t.getNoOpShaderCode('COMPUTE') });
@ -807,9 +945,7 @@ Tests encoding and finishing a simple valid render pass on destroyed device.
- Tests submitting command on destroyed device
`
)
.params(u =>
u.beginSubcases().combine('stage', kCommandValidationStages).combine('awaitLost', [true, false])
)
.params(u => u.combine('stage', kCommandValidationStages).combine('awaitLost', [true, false]))
.fn(async t => {
const { stage, awaitLost } = t.params;
const vShader = t.device.createShaderModule({ code: t.getNoOpShaderCode('VERTEX') });
@ -839,9 +975,7 @@ Tests encoding and drawing a render pass including a render bundle on destroyed
- Tests submitting command on destroyed device
`
)
.params(u =>
u.beginSubcases().combine('stage', kCommandValidationStages).combine('awaitLost', [true, false])
)
.params(u => u.combine('stage', kCommandValidationStages).combine('awaitLost', [true, false]))
.fn(async t => {
const { stage, awaitLost } = t.params;
const vShader = t.device.createShaderModule({ code: t.getNoOpShaderCode('VERTEX') });
@ -868,9 +1002,7 @@ g.test('queue,writeBuffer')
Tests writeBuffer on queue on destroyed device.
`
)
.params(u =>
u.combine('numElements', [4, 8, 16]).beginSubcases().combine('awaitLost', [true, false])
)
.params(u => u.combine('numElements', [4, 8, 16]).combine('awaitLost', [true, false]))
.fn(async t => {
const { numElements, awaitLost } = t.params;
const buffer = t.device.createBuffer({
@ -889,9 +1021,10 @@ g.test('queue,writeTexture,2d,uncompressed_format')
Tests writeTexture on queue on destroyed device with uncompressed formats.
`
)
.params(u =>
u.combine('format', kRegularTextureFormats).beginSubcases().combine('awaitLost', [true, false])
)
.params(u => u.combine('format', kRegularTextureFormats).combine('awaitLost', [true, false]))
.beforeAllSubcases(t => {
t.skipIfTextureFormatNotSupported(t.params.format);
})
.fn(async t => {
const { format, awaitLost } = t.params;
const {
@ -921,12 +1054,7 @@ g.test('queue,writeTexture,2d,compressed_format')
Tests writeTexture on queue on destroyed device with compressed formats.
`
)
.params(u =>
u
.combine('format', kCompressedTextureFormats)
.beginSubcases()
.combine('awaitLost', [true, false])
)
.params(u => u.combine('format', kCompressedTextureFormats).combine('awaitLost', [true, false]))
.beforeAllSubcases(t => {
const { format } = t.params;
t.selectDeviceOrSkipTestCase(kTextureFormatInfo[format].feature);
@ -964,7 +1092,6 @@ Tests copyExternalImageToTexture from canvas on queue on destroyed device.
u
.combine('canvasType', kAllCanvasTypes)
.combine('contextType', kValidCanvasContextIds)
.beginSubcases()
.combine('awaitLost', [true, false])
)
.fn(async t => {
@ -998,7 +1125,7 @@ g.test('queue,copyExternalImageToTexture,imageBitmap')
Tests copyExternalImageToTexture from canvas on queue on destroyed device.
`
)
.params(u => u.beginSubcases().combine('awaitLost', [true, false]))
.params(u => u.combine('awaitLost', [true, false]))
.fn(async t => {
const { awaitLost } = t.params;
if (typeof createImageBitmap === 'undefined') {

View file

@ -3,7 +3,12 @@
**/ // MAINTENANCE_TODO: The generated Typedoc for this file is hard to navigate because it's
// alphabetized. Consider using namespaces or renames to fix this?
import { keysOf, makeTable, numericKeysOf } from '../common/util/data_tables.js';
import {
keysOf,
makeTable,
makeTableRenameAndFilter,
numericKeysOf,
} from '../common/util/data_tables.js';
import { assertTypeTrue } from '../common/util/types.js';
import { unreachable } from '../common/util/util.js';
@ -263,22 +268,22 @@ export const kMinDynamicBufferOffsetAlignment = 256;
/** Default `PerShaderStage` binding limits, by spec. */
export const kPerStageBindingLimits = {
uniformBuf: { class: 'uniformBuf', max: 12 },
storageBuf: { class: 'storageBuf', max: 8 },
sampler: { class: 'sampler', max: 16 },
sampledTex: { class: 'sampledTex', max: 16 },
storageTex: { class: 'storageTex', max: 4 },
uniformBuf: { class: 'uniformBuf', maxLimit: 'maxUniformBuffersPerShaderStage' },
storageBuf: { class: 'storageBuf', maxLimit: 'maxStorageBuffersPerShaderStage' },
sampler: { class: 'sampler', maxLimit: 'maxSamplersPerShaderStage' },
sampledTex: { class: 'sampledTex', maxLimit: 'maxSampledTexturesPerShaderStage' },
storageTex: { class: 'storageTex', maxLimit: 'maxStorageTexturesPerShaderStage' },
};
/**
* Default `PerPipelineLayout` binding limits, by spec.
*/
export const kPerPipelineBindingLimits = {
uniformBuf: { class: 'uniformBuf', maxDynamic: 8 },
storageBuf: { class: 'storageBuf', maxDynamic: 4 },
sampler: { class: 'sampler', maxDynamic: 0 },
sampledTex: { class: 'sampledTex', maxDynamic: 0 },
storageTex: { class: 'storageTex', maxDynamic: 0 },
uniformBuf: { class: 'uniformBuf', maxDynamicLimit: 'maxDynamicUniformBuffersPerPipelineLayout' },
storageBuf: { class: 'storageBuf', maxDynamicLimit: 'maxDynamicStorageBuffersPerPipelineLayout' },
sampler: { class: 'sampler', maxDynamicLimit: '' },
sampledTex: { class: 'sampledTex', maxDynamicLimit: '' },
storageTex: { class: 'storageTex', maxDynamicLimit: '' },
};
const kBindingKind = {
@ -515,6 +520,17 @@ export const kShaderStageCombinationsWithStage = [1, 2, 3, 4, 5, 6, 7];
*/
export const kTextureSampleCounts = [1, 4];
// Sampler info
/** List of all mipmap filter modes. */
export const kMipmapFilterModes = ['nearest', 'linear'];
assertTypeTrue();
/** List of address modes. */
export const kAddressModes = ['clamp-to-edge', 'repeat', 'mirror-repeat'];
assertTypeTrue();
// Blend factors and Blend components
/** List of all GPUBlendFactor values. */
@ -558,62 +574,107 @@ export const kIndexFormat = ['uint16', 'uint32'];
assertTypeTrue();
/** Info for each entry of GPUSupportedLimits */
export const kLimitInfo = makeTable(
['class', 'default', 'maximumValue'],
['maximum', , kMaxUnsignedLongValue],
const [kLimitInfoKeys, kLimitInfoDefaults, kLimitInfoData] = [
['class', 'core', 'compatibility', 'maximumValue'],
['maximum', , , kMaxUnsignedLongValue],
{
maxTextureDimension1D: [, 8192],
maxTextureDimension2D: [, 8192],
maxTextureDimension3D: [, 2048],
maxTextureArrayLayers: [, 256],
maxTextureDimension1D: [, 8192, 4096],
maxTextureDimension2D: [, 8192, 4096],
maxTextureDimension3D: [, 2048, 1024],
maxTextureArrayLayers: [, 256, 256],
maxBindGroups: [, 4],
maxBindingsPerBindGroup: [, 1000],
maxDynamicUniformBuffersPerPipelineLayout: [, 8],
maxDynamicStorageBuffersPerPipelineLayout: [, 4],
maxSampledTexturesPerShaderStage: [, 16],
maxSamplersPerShaderStage: [, 16],
maxStorageBuffersPerShaderStage: [, 8],
maxStorageTexturesPerShaderStage: [, 4],
maxUniformBuffersPerShaderStage: [, 12],
maxBindGroups: [, 4, 4],
maxBindingsPerBindGroup: [, 1000, 1000],
maxDynamicUniformBuffersPerPipelineLayout: [, 8, 8],
maxDynamicStorageBuffersPerPipelineLayout: [, 4, 4],
maxSampledTexturesPerShaderStage: [, 16, 16],
maxSamplersPerShaderStage: [, 16, 16],
maxStorageBuffersPerShaderStage: [, 8, 4],
maxStorageTexturesPerShaderStage: [, 4, 4],
maxUniformBuffersPerShaderStage: [, 12, 12],
maxUniformBufferBindingSize: [, 65536, kMaxUnsignedLongLongValue],
maxStorageBufferBindingSize: [, 134217728, kMaxUnsignedLongLongValue],
minUniformBufferOffsetAlignment: ['alignment', 256],
minStorageBufferOffsetAlignment: ['alignment', 256],
maxUniformBufferBindingSize: [, 65536, 16384, kMaxUnsignedLongLongValue],
maxStorageBufferBindingSize: [, 134217728, 134217728, kMaxUnsignedLongLongValue],
minUniformBufferOffsetAlignment: ['alignment', 256, 256],
minStorageBufferOffsetAlignment: ['alignment', 256, 256],
maxVertexBuffers: [, 8],
maxBufferSize: [, 268435456, kMaxUnsignedLongLongValue],
maxVertexAttributes: [, 16],
maxVertexBufferArrayStride: [, 2048],
maxInterStageShaderComponents: [, 60],
maxInterStageShaderVariables: [, 16],
maxVertexBuffers: [, 8, 8],
maxBufferSize: [, 268435456, 268435456, kMaxUnsignedLongLongValue],
maxVertexAttributes: [, 16, 16],
maxVertexBufferArrayStride: [, 2048, 2048],
maxInterStageShaderComponents: [, 60, 60],
maxInterStageShaderVariables: [, 16, 16],
maxColorAttachments: [, 8],
maxColorAttachmentBytesPerSample: [, 32],
maxColorAttachments: [, 8, 4],
maxColorAttachmentBytesPerSample: [, 32, 32],
maxComputeWorkgroupStorageSize: [, 16384],
maxComputeInvocationsPerWorkgroup: [, 256],
maxComputeWorkgroupSizeX: [, 256],
maxComputeWorkgroupSizeY: [, 256],
maxComputeWorkgroupSizeZ: [, 64],
maxComputeWorkgroupsPerDimension: [, 65535],
}
maxComputeWorkgroupStorageSize: [, 16384, 16384],
maxComputeInvocationsPerWorkgroup: [, 256, 128],
maxComputeWorkgroupSizeX: [, 256, 128],
maxComputeWorkgroupSizeY: [, 256, 128],
maxComputeWorkgroupSizeZ: [, 64, 64],
maxComputeWorkgroupsPerDimension: [, 65535, 65535],
},
];
/**
* Feature levels corresponding to core WebGPU and WebGPU
* in compatibility mode. They can be passed to
* getDefaultLimits though if you have access to an adapter
* it's preferred to use getDefaultLimitsForAdapter.
*/
export const kFeatureLevels = ['core', 'compatibility'];
const kLimitKeys = ['class', 'default', 'maximumValue'];
const kLimitInfoCore = makeTableRenameAndFilter(
{ default: 'core' },
kLimitKeys,
kLimitInfoKeys,
kLimitInfoDefaults,
kLimitInfoData
);
const kLimitInfoCompatibility = makeTableRenameAndFilter(
{ default: 'compatibility' },
kLimitKeys,
kLimitInfoKeys,
kLimitInfoDefaults,
kLimitInfoData
);
const kLimitInfos = {
core: kLimitInfoCore,
compatibility: kLimitInfoCompatibility,
};
export const kLimitClasses = Object.fromEntries(
Object.entries(kLimitInfoCore).map(([k, { class: c }]) => [k, c])
);
export function getDefaultLimits(featureLevel) {
return kLimitInfos[featureLevel];
}
export function getDefaultLimitsForAdapter(adapter) {
// MAINTENANCE_TODO: Remove casts when GPUAdapter IDL has isCompatibilityMode.
return getDefaultLimits(adapter.isCompatibilityMode ? 'compatibility' : 'core');
}
/** List of all entries of GPUSupportedLimits. */
export const kLimits = keysOf(kLimitInfo);
export const kLimits = keysOf(kLimitInfoCore);
// Pipeline limits
/** Maximum number of color attachments to a render pass, by spec. */
export const kMaxColorAttachments = kLimitInfo.maxColorAttachments.default;
/** `maxVertexBuffers` per GPURenderPipeline, by spec. */
export const kMaxVertexBuffers = kLimitInfo.maxVertexBuffers.default;
/** `maxVertexAttributes` per GPURenderPipeline, by spec. */
export const kMaxVertexAttributes = kLimitInfo.maxVertexAttributes.default;
/** `maxVertexBufferArrayStride` in a vertex buffer in a GPURenderPipeline, by spec. */
export const kMaxVertexBufferArrayStride = kLimitInfo.maxVertexBufferArrayStride.default;
/**
* The number of color attachments to test.
* The CTS needs to generate a consistent list of tests.
* We can't use any default limits since they different from core to compat mode
* So, tests should use this value and filter out any values that are out of
* range for the current device.
*
* The test in maxColorAttachments.spec.ts tests that kMaxColorAttachmentsToTest
* is large enough to cover all devices tested.
*/
export const kMaxColorAttachmentsToTest = 32;
/** The size of indirect draw parameters in the indirectBuffer of drawIndirect */
export const kDrawIndirectParametersSize = 4;

View file

@ -0,0 +1,45 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Tests limitations of copyTextureToBuffer in compat mode.
`;
import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
import { kCompressedTextureFormats, kTextureFormatInfo } from '../../../../../format_info.js';
import { align } from '../../../../../util/math.js';
import { CompatibilityTest } from '../../../../compatibility_test.js';
export const g = makeTestGroup(CompatibilityTest);
g.test('compressed')
.desc(`Tests that you can not call copyTextureToBuffer with compressed textures in compat mode.`)
.params(u => u.combine('format', kCompressedTextureFormats))
.beforeAllSubcases(t => {
const { format } = t.params;
t.selectDeviceOrSkipTestCase([kTextureFormatInfo[format].feature]);
})
.fn(t => {
const { format } = t.params;
const { blockWidth, blockHeight, bytesPerBlock } = kTextureFormatInfo[format];
const texture = t.device.createTexture({
size: [blockWidth, blockHeight, 1],
format,
usage: GPUTextureUsage.COPY_SRC,
});
t.trackForCleanup(texture);
const bytesPerRow = align(bytesPerBlock, 256);
const buffer = t.device.createBuffer({
size: bytesPerRow,
usage: GPUBufferUsage.COPY_DST,
});
t.trackForCleanup(buffer);
const encoder = t.device.createCommandEncoder();
encoder.copyTextureToBuffer({ texture }, { buffer, bytesPerRow }, [blockWidth, blockHeight, 1]);
t.expectGPUError('validation', () => {
encoder.finish();
});
});

View file

@ -0,0 +1,391 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Tests limitations of bind group usage in a pipeline in compat mode.
`;
import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
import { keysOf } from '../../../../../../common/util/data_tables.js';
import { kRenderEncodeTypes } from '../../../../../util/command_buffer_maker.js';
import { CompatibilityTest } from '../../../../compatibility_test.js';
const kTextureTypes = ['regular', 'storage'];
function getTextureTypeWGSL(textureType) {
return textureType === 'storage' ? 'texture_storage_2d<rgba8unorm, write>' : 'texture_2d<f32>';
}
/**
* Gets the WGSL needed for testing a render pipeline using texture_2d or texture_storage_2d
* and either 2 bindgroups or 1
*/
function getRenderShaderModule(device, textureType, bindConfig) {
const textureTypeWGSL = getTextureTypeWGSL(textureType);
const secondGroup = bindConfig === 'one bindgroup' ? 0 : 1;
const secondBinding = secondGroup === 0 ? 1 : 0;
return device.createShaderModule({
code: `
@vertex
fn vs(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4f {
var pos = array(
vec4f(-1, 3, 0, 1),
vec4f( 3, -1, 0, 1),
vec4f(-1, -1, 0, 1));
return pos[VertexIndex];
}
@group(0) @binding(0) var tex0 : ${textureTypeWGSL};
@group(${secondGroup}) @binding(${secondBinding}) var tex1 : ${textureTypeWGSL};
@fragment
fn fs(@builtin(position) pos: vec4f) -> @location(0) vec4f {
_ = tex0;
_ = tex1;
return vec4f(0);
}
`,
});
}
/**
* Gets the WGSL needed for testing a compute pipeline using texture_2d or texture_storage_2d
* and either 2 bindgroups or 1
*/
function getComputeShaderModule(device, textureType, bindConfig) {
const textureTypeWGSL = getTextureTypeWGSL(textureType);
const secondGroup = bindConfig === 'one bindgroup' ? 0 : 1;
const secondBinding = secondGroup === 0 ? 1 : 0;
return device.createShaderModule({
code: `
@group(0) @binding(0) var tex0 : ${textureTypeWGSL};
@group(${secondGroup}) @binding(${secondBinding}) var tex1 : ${textureTypeWGSL};
@compute @workgroup_size(1)
fn cs() {
_ = tex0;
_ = tex1;
}
`,
});
}
const kBindCases = {
'incompatible views in the same bindGroup': {
bindConfig: 'one bindgroup',
fn(device, pipeline, encoder, texture) {
const bindGroup = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: texture.createView({ baseMipLevel: 0, mipLevelCount: 1 }) },
{ binding: 1, resource: texture.createView({ baseMipLevel: 1, mipLevelCount: 1 }) },
],
});
encoder.setBindGroup(0, bindGroup);
return { shouldSucceed: false };
},
},
'incompatible views in different bindGroups': {
bindConfig: 'two bindgroups',
fn(device, pipeline, encoder, texture) {
const bindGroup0 = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: texture.createView({ baseMipLevel: 0, mipLevelCount: 1 }) },
],
});
const bindGroup1 = device.createBindGroup({
layout: pipeline.getBindGroupLayout(1),
entries: [
{ binding: 0, resource: texture.createView({ baseMipLevel: 1, mipLevelCount: 1 }) },
],
});
encoder.setBindGroup(0, bindGroup0);
encoder.setBindGroup(1, bindGroup1);
return { shouldSucceed: false };
},
},
'can bind same view in different bindGroups': {
bindConfig: 'two bindgroups',
fn(device, pipeline, encoder, texture) {
const bindGroup0 = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: texture.createView({ baseMipLevel: 1, mipLevelCount: 1 }) },
],
});
const bindGroup1 = device.createBindGroup({
layout: pipeline.getBindGroupLayout(1),
entries: [
{ binding: 0, resource: texture.createView({ baseMipLevel: 1, mipLevelCount: 1 }) },
],
});
encoder.setBindGroup(0, bindGroup0);
encoder.setBindGroup(1, bindGroup1);
return { shouldSucceed: true };
},
},
'binding incompatible bindGroups then fix': {
bindConfig: 'one bindgroup',
fn(device, pipeline, encoder, texture) {
const badBindGroup = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: texture.createView({ baseMipLevel: 0, mipLevelCount: 1 }) },
{ binding: 1, resource: texture.createView({ baseMipLevel: 1, mipLevelCount: 1 }) },
],
});
const goodBindGroup = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: texture.createView({ baseMipLevel: 1, mipLevelCount: 1 }) },
{ binding: 1, resource: texture.createView({ baseMipLevel: 1, mipLevelCount: 1 }) },
],
});
encoder.setBindGroup(0, badBindGroup);
encoder.setBindGroup(0, goodBindGroup);
return { shouldSucceed: true };
},
},
};
function createAndBindTwoBindGroupsWithDifferentViewsOfSameTexture(
device,
pipeline,
encoder,
texture
) {
const bindGroup0 = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [{ binding: 0, resource: texture.createView({ baseMipLevel: 0, mipLevelCount: 1 }) }],
});
const bindGroup1 = device.createBindGroup({
layout: pipeline.getBindGroupLayout(1),
entries: [{ binding: 0, resource: texture.createView({ baseMipLevel: 1, mipLevelCount: 1 }) }],
});
encoder.setBindGroup(0, bindGroup0);
encoder.setBindGroup(1, bindGroup1);
}
const kBindCaseNames = keysOf(kBindCases);
const kDrawUseCases = {
draw: (t, encoder) => {
encoder.draw(3);
},
drawIndexed: (t, encoder) => {
const indexBuffer = t.makeBufferWithContents(new Uint16Array([0, 1, 2]), GPUBufferUsage.INDEX);
encoder.setIndexBuffer(indexBuffer, 'uint16');
encoder.drawIndexed(3);
},
drawIndirect(t, encoder) {
const indirectBuffer = t.makeBufferWithContents(
new Uint32Array([3, 1, 0, 0]),
GPUBufferUsage.INDIRECT
);
encoder.drawIndirect(indirectBuffer, 0);
},
drawIndexedIndirect(t, encoder) {
const indexBuffer = t.makeBufferWithContents(new Uint16Array([0, 1, 2]), GPUBufferUsage.INDEX);
encoder.setIndexBuffer(indexBuffer, 'uint16');
const indirectBuffer = t.makeBufferWithContents(
new Uint32Array([3, 1, 0, 0, 0]),
GPUBufferUsage.INDIRECT
);
encoder.drawIndexedIndirect(indirectBuffer, 0);
},
};
const kDrawCaseNames = keysOf(kDrawUseCases);
const kDispatchUseCases = {
dispatchWorkgroups(t, encoder) {
encoder.dispatchWorkgroups(1);
},
dispatchWorkgroupsIndirect(t, encoder) {
const indirectBuffer = t.makeBufferWithContents(
new Uint32Array([1, 1, 1]),
GPUBufferUsage.INDIRECT
);
encoder.dispatchWorkgroupsIndirect(indirectBuffer, 0);
},
};
const kDispatchCaseNames = keysOf(kDispatchUseCases);
function createResourcesForRenderPassTest(t, textureType, bindConfig) {
const texture = t.device.createTexture({
size: [2, 1, 1],
mipLevelCount: 2,
format: 'rgba8unorm',
usage:
textureType === 'storage' ? GPUTextureUsage.STORAGE_BINDING : GPUTextureUsage.TEXTURE_BINDING,
});
t.trackForCleanup(texture);
const module = getRenderShaderModule(t.device, textureType, bindConfig);
const pipeline = t.device.createRenderPipeline({
layout: 'auto',
vertex: {
module,
entryPoint: 'vs',
},
fragment: {
module,
entryPoint: 'fs',
targets: [{ format: 'rgba8unorm' }],
},
});
return { texture, pipeline };
}
function createResourcesForComputePassTest(t, textureType, bindConfig) {
const texture = t.device.createTexture({
size: [2, 1, 1],
mipLevelCount: 2,
format: 'rgba8unorm',
usage:
textureType === 'storage' ? GPUTextureUsage.STORAGE_BINDING : GPUTextureUsage.TEXTURE_BINDING,
});
t.trackForCleanup(texture);
const module = getComputeShaderModule(t.device, textureType, bindConfig);
const pipeline = t.device.createComputePipeline({
layout: 'auto',
compute: {
module,
entryPoint: 'cs',
},
});
return { texture, pipeline };
}
export const g = makeTestGroup(CompatibilityTest);
g.test('twoDifferentTextureViews,render_pass,used')
.desc(
`
Tests that you can not use 2 different views of the same texture in a render pass in compat mode.
- Test you can not use incompatible views in the same bindGroup
- Test you can not use incompatible views in different bindGroups
- Test you can bind the same view in different bindGroups
- Test binding incompatible bindGroups is ok as long as they are fixed before draw/dispatch
The last test is to check validation happens at the correct time (draw/dispatch) and not
at setBindGroup.
`
)
.params(u =>
u
.combine('encoderType', kRenderEncodeTypes)
.combine('bindCase', kBindCaseNames)
.combine('useCase', kDrawCaseNames)
.combine('textureType', kTextureTypes)
.filter(
// storage textures can't have 2 bind groups point to the same
// view even in non-compat. They can have different views in
// non-compat but not compat.
p =>
!(
p.textureType === 'storage' &&
(p.bindCase === 'can bind same view in different bindGroups' ||
p.bindCase === 'binding incompatible bindGroups then fix')
)
)
)
.fn(t => {
const { encoderType, bindCase, useCase, textureType } = t.params;
const { bindConfig, fn } = kBindCases[bindCase];
const { texture, pipeline } = createResourcesForRenderPassTest(t, textureType, bindConfig);
const { encoder, validateFinish } = t.createEncoder(encoderType);
encoder.setPipeline(pipeline);
const { shouldSucceed } = fn(t.device, pipeline, encoder, texture);
kDrawUseCases[useCase](t, encoder);
validateFinish(shouldSucceed);
});
g.test('twoDifferentTextureViews,render_pass,unused')
.desc(
`
Tests that binding 2 different views of the same texture but not using them does not generate a validation error.
`
)
.params(u => u.combine('encoderType', kRenderEncodeTypes).combine('textureType', kTextureTypes))
.fn(t => {
const { encoderType, textureType } = t.params;
const { texture, pipeline } = createResourcesForRenderPassTest(
t,
textureType,
'two bindgroups'
);
const { encoder, validateFinish } = t.createEncoder(encoderType);
encoder.setPipeline(pipeline);
createAndBindTwoBindGroupsWithDifferentViewsOfSameTexture(t.device, pipeline, encoder, texture);
validateFinish(true);
});
g.test('twoDifferentTextureViews,compute_pass,used')
.desc(
`
Tests that you can not use 2 different views of the same texture in a compute pass in compat mode.
- Test you can not use incompatible views in the same bindGroup
- Test you can not use incompatible views in different bindGroups
- Test can bind the same view in different bindGroups
- Test that binding incompatible bindGroups is ok as long as they are fixed before draw/dispatch
The last test is to check validation happens at the correct time (draw/dispatch) and not
at setBindGroup.
`
)
.params(u =>
u
.combine('bindCase', kBindCaseNames)
.combine('useCase', kDispatchCaseNames)
.combine('textureType', kTextureTypes)
.filter(
// storage textures can't have 2 bind groups point to the same
// view even in non-compat. They can have different views in
// non-compat but not compat.
p =>
!(
p.textureType === 'storage' &&
(p.bindCase === 'can bind same view in different bindGroups' ||
p.bindCase === 'binding incompatible bindGroups then fix')
)
)
)
.fn(t => {
const { bindCase, useCase, textureType } = t.params;
const { bindConfig, fn } = kBindCases[bindCase];
const { texture, pipeline } = createResourcesForComputePassTest(t, textureType, bindConfig);
const { encoder, validateFinish } = t.createEncoder('compute pass');
encoder.setPipeline(pipeline);
const { shouldSucceed } = fn(t.device, pipeline, encoder, texture);
kDispatchUseCases[useCase](t, encoder);
validateFinish(shouldSucceed);
});
g.test('twoDifferentTextureViews,compute_pass,unused')
.desc(
`
Tests that binding 2 different views of the same texture but not using them does not generate a validation error.
`
)
.params(u => u.combine('textureType', kTextureTypes))
.fn(t => {
const { textureType } = t.params;
const { texture, pipeline } = createResourcesForComputePassTest(
t,
textureType,
'two bindgroups'
);
const { encoder, validateFinish } = t.createEncoder('compute pass');
encoder.setPipeline(pipeline);
createAndBindTwoBindGroupsWithDifferentViewsOfSameTexture(t.device, pipeline, encoder, texture);
validateFinish(true);
});

View file

@ -0,0 +1,127 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Tests that you can not create a render pipeline with different per target blend state or write mask in compat mode.
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { keysOf } from '../../../../../common/util/data_tables.js';
import { CompatibilityTest } from '../../../compatibility_test.js';
export const g = makeTestGroup(CompatibilityTest);
const cases = {
default(targets) {
return true;
},
noBlendTarget0(targets) {
delete targets[0].blend;
return false;
},
noBlendTarget1(targets) {
delete targets[2].blend;
return false;
},
colorOperation(targets) {
targets[2].blend.color.operation = 'subtract';
return false;
},
colorSrcFactor(targets) {
targets[2].blend.color.srcFactor = 'src-alpha';
return false;
},
colorDstFactor(targets) {
targets[2].blend.color.dstFactor = 'dst-alpha';
return false;
},
alphaOperation(targets) {
targets[2].blend.alpha.operation = 'subtract';
return false;
},
alphaSrcFactor(targets) {
targets[2].blend.alpha.srcFactor = 'src-alpha';
return false;
},
alphaDstFactor(targets) {
targets[2].blend.alpha.dstFactor = 'dst-alpha';
return false;
},
writeMask(targets) {
targets[2].writeMask = GPUColorWrite.GREEN;
return false;
},
};
const caseNames = keysOf(cases);
g.test('colorState')
.desc(
`
Tests that you can not create a render pipeline with different per target blend state or write mask in compat mode.
- Test no blend state vs some blend state
- Test different operation, srcFactor, dstFactor for color and alpha
- Test different writeMask
`
)
.params(u => u.combine('caseName', caseNames))
.fn(t => {
const { caseName } = t.params;
const module = t.device.createShaderModule({
code: `
@vertex fn vs() -> @builtin(position) vec4f {
return vec4f(0);
}
struct FragmentOut {
@location(0) fragColor0 : vec4f,
@location(1) fragColor1 : vec4f,
@location(2) fragColor2 : vec4f,
}
@fragment fn fs() -> FragmentOut {
var output : FragmentOut;
output.fragColor0 = vec4f(0);
output.fragColor1 = vec4f(0);
output.fragColor2 = vec4f(0);
return output;
}
`,
});
const targets = [
{
format: 'rgba8unorm',
blend: {
color: {},
alpha: {},
},
},
null,
{
format: 'rgba8unorm',
blend: {
color: {},
alpha: {},
},
},
];
const pipelineDescriptor = {
layout: 'auto',
vertex: {
module,
entryPoint: 'vs',
},
fragment: {
module,
entryPoint: 'fs',
targets,
},
};
const isValid = cases[caseName](targets);
t.expectGPUError(
'validation',
() => t.device.createRenderPipeline(pipelineDescriptor),
!isValid
);
});

View file

@ -0,0 +1,73 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Tests limitations of createRenderPipeline related to shader modules in compat mode.
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { CompatibilityTest } from '../../../compatibility_test.js';
export const g = makeTestGroup(CompatibilityTest);
g.test('sample_mask')
.desc(
`
Tests that you can not create a render pipeline with a shader module that uses sample_mask in compat mode.
- Test that a pipeline with a shader that uses sample_mask fails.
- Test that a pipeline that references a module that has a shader that uses sample_mask
but the pipeline does not reference that shader succeeds.
`
)
.params(u => u.combine('entryPoint', ['fsWithoutSampleMaskUsage', 'fsWithSampleMaskUsage']))
.fn(t => {
const { entryPoint } = t.params;
const module = t.device.createShaderModule({
code: `
@vertex fn vs() -> @builtin(position) vec4f {
return vec4f(1);
}
struct Output {
@builtin(sample_mask) mask_out: u32,
@location(0) color : vec4f,
}
@fragment fn fsWithoutSampleMaskUsage() -> @location(0) vec4f {
return vec4f(1.0, 1.0, 1.0, 1.0);
}
@fragment fn fsWithSampleMaskUsage() -> Output {
var o: Output;
// We need to make sure this sample_mask isn't optimized out even if its value equals "no op".
o.mask_out = 0xFFFFFFFFu;
o.color = vec4f(1.0, 1.0, 1.0, 1.0);
return o;
}
`,
});
const pipelineDescriptor = {
layout: 'auto',
vertex: {
module,
entryPoint: 'vs',
},
fragment: {
module,
entryPoint,
targets: [
{
format: 'rgba8unorm',
},
],
},
multisample: {
count: 4,
},
};
const isValid = entryPoint === 'fsWithoutSampleMaskUsage';
t.expectGPUError(
'validation',
() => t.device.createRenderPipeline(pipelineDescriptor),
!isValid
);
});

View file

@ -0,0 +1,92 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Tests limitations of createRenderPipeline related to vertex state in compat mode.
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { range } from '../../../../../common/util/util.js';
import { CompatibilityTest } from '../../../compatibility_test.js';
export const g = makeTestGroup(CompatibilityTest);
g.test('maxVertexAttributesVertexIndexInstanceIndex')
.desc(
`
Tests @builtin(vertex_index) and @builtin(instance_index) each count as an attribute.
- Test that you can use maxVertexAttributes
- Test that you can not use maxVertexAttributes and @builtin(vertex_index)
- Test that you can not use maxVertexAttributes and @builtin(instance_index)
- Test that you can use maxVertexAttributes - 1 and @builtin(vertex_index)
- Test that you can use maxVertexAttributes - 1 and @builtin(instance_index)
- Test that you can not use maxVertexAttributes - 1 and both @builtin(vertex_index) and @builtin(instance_index)
- Test that you can use maxVertexAttributes - 2 and both @builtin(vertex_index) and @builtin(instance_index)
`
)
.params(u =>
u
.combine('useVertexIndex', [false, true])
.combine('useInstanceIndex', [false, true])
.combine('numAttribsToReserve', [0, 1, 2])
.combine('isAsync', [false, true])
)
.fn(t => {
const { useVertexIndex, useInstanceIndex, numAttribsToReserve, isAsync } = t.params;
const numAttribs = t.device.limits.maxVertexAttributes - numAttribsToReserve;
const numBuiltinsUsed = (useVertexIndex ? 1 : 0) + (useInstanceIndex ? 1 : 0);
const isValid = numAttribs + numBuiltinsUsed <= t.device.limits.maxVertexAttributes;
const inputs = range(numAttribs, i => `@location(${i}) v${i}: vec4f`);
const outputs = range(numAttribs, i => `v${i}`);
if (useVertexIndex) {
inputs.push('@builtin(vertex_index) vNdx: u32');
outputs.push('vec4f(f32(vNdx))');
}
if (useInstanceIndex) {
inputs.push('@builtin(instance_index) iNdx: u32');
outputs.push('vec4f(f32(iNdx))');
}
const module = t.device.createShaderModule({
code: `
@fragment fn fs() -> @location(0) vec4f {
return vec4f(1);
}
@vertex fn vs(${inputs.join(', ')}) -> @builtin(position) vec4f {
return ${outputs.join(' + ')};
}
`,
});
const pipelineDescriptor = {
layout: 'auto',
vertex: {
module,
entryPoint: 'vs',
buffers: [
{
arrayStride: 16,
attributes: range(numAttribs, i => ({
shaderLocation: i,
format: 'float32x4',
offset: 0,
})),
},
],
},
fragment: {
module,
entryPoint: 'fs',
targets: [
{
format: 'rgba8unorm',
},
],
},
};
t.doCreateRenderPipelineTest(isAsync, isValid, pipelineDescriptor);
});

View file

@ -0,0 +1,42 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Tests that you can not use bgra8unorm-srgb in compat mode.
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { CompatibilityTest } from '../../../compatibility_test.js';
export const g = makeTestGroup(CompatibilityTest);
g.test('unsupportedTextureFormats')
.desc(`Tests that you can not create a bgra8unorm-srgb texture in compat mode.`)
.fn(t => {
t.expectGPUError(
'validation',
() =>
t.device.createTexture({
size: [1, 1, 1],
format: 'bgra8unorm-srgb',
usage: GPUTextureUsage.TEXTURE_BINDING,
}),
true
);
});
g.test('unsupportedTextureViewFormats')
.desc(
`Tests that you can not create a bgra8unorm texture with a bgra8unorm-srgb viewFormat in compat mode.`
)
.fn(t => {
t.expectGPUError(
'validation',
() =>
t.device.createTexture({
size: [1, 1, 1],
format: 'bgra8unorm',
viewFormats: ['bgra8unorm-srgb'],
usage: GPUTextureUsage.TEXTURE_BINDING,
}),
true
);
});

View file

@ -0,0 +1,27 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Tests that you can not create cube array views in compat mode.
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { CompatibilityTest } from '../../../compatibility_test.js';
export const g = makeTestGroup(CompatibilityTest);
g.test('cube_array')
.desc('Test you cannot create a cube array texture view.')
.params(u => u.combine('dimension', ['cube', 'cube-array']))
.fn(t => {
const { dimension } = t.params;
const texture = t.device.createTexture({
size: [1, 1, 6],
format: 'rgba8unorm',
usage: GPUTextureUsage.TEXTURE_BINDING,
});
const isValid = dimension === 'cube';
t.expectGPUError(
'validation',
() => texture.createView({ dimension, format: 'rgba8unorm' }),
!isValid
);
});

View file

@ -0,0 +1,11 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { ValidationTest } from '../api/validation/validation_test.js';
export class CompatibilityTest extends ValidationTest {
async init() {
await super.init();
if (!this.isCompatibility) {
this.skip('compatibility tests do not work on non-compatibility mode');
}
}
}

View file

@ -2,6 +2,7 @@
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { keysOf } from '../common/util/data_tables.js';
import { assert } from '../common/util/util.js';
import { align } from './util/math.js';
//
// Texture format tables
@ -644,6 +645,24 @@ const kRegularTextureFormatInfo = formatTableWithDefaults({
// plain, mixed component width, 32 bits per texel
rgb10a2uint: {
color: { type: 'uint', copySrc: true, copyDst: true, storage: false, bytes: 4 },
colorRender: { blend: false, resolve: false, byteCost: 8, alignment: 4 },
renderable: true,
get renderTargetComponentAlignment() {
return this.colorRender.alignment;
},
get renderTargetPixelByteCost() {
return this.colorRender.byteCost;
},
multisample: true,
get sampleType() {
return this.color.type;
},
get bytesPerBlock() {
return this.color.bytes;
},
},
rgb10a2unorm: {
color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 4 },
colorRender: { blend: true, resolve: true, byteCost: 8, alignment: 4 },
@ -1471,4 +1490,28 @@ export function filterFormatsByFeature(feature, formats) {
return formats.filter(f => f === undefined || kTextureFormatInfo[f].feature === feature);
}
export function isCompressedTextureFormat(format) {
return format in kCompressedTextureFormatInfo;
}
export const kFeaturesForFormats = getFeaturesForFormats(kTextureFormats);
/**
* Given an array of texture formats return the number of bytes per sample.
*/
export function computeBytesPerSampleFromFormats(formats) {
let bytesPerSample = 0;
for (const format of formats) {
const info = kTextureFormatInfo[format];
const alignedBytesPerSample = align(bytesPerSample, info.colorRender.alignment);
bytesPerSample = alignedBytesPerSample + info.colorRender.byteCost;
}
return bytesPerSample;
}
/**
* Given an array of GPUColorTargetState return the number of bytes per sample
*/
export function computeBytesPerSample(targets) {
return computeBytesPerSampleFromFormats(targets.map(({ format }) => format));
}

View file

@ -1,13 +1,15 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ import { Fixture, SubcaseBatchState } from '../common/framework/fixture.js';
import { assert, range, unreachable } from '../common/util/util.js';
**/ import { Fixture, SkipTestCase, SubcaseBatchState } from '../common/framework/fixture.js';
import { globalTestConfig } from '../common/framework/test_config.js';
import { assert, makeValueTestVariant, memcpy, range, unreachable } from '../common/util/util.js';
import { kQueryTypeInfo } from './capability_info.js';
import { getDefaultLimits, kQueryTypeInfo } from './capability_info.js';
import {
kTextureFormatInfo,
kEncodableTextureFormats,
resolvePerAspectFormat,
isCompressedTextureFormat,
} from './format_info.js';
import { makeBufferWithContents } from './util/buffer.js';
import { checkElementsEqual, checkElementsBetween } from './util/check_contents.js';
@ -16,7 +18,12 @@ import { CommandBufferMaker } from './util/command_buffer_maker.js';
import { DevicePool } from './util/device_pool.js';
import { align, roundDown } from './util/math.js';
import { createTextureFromTexelView, createTextureFromTexelViews } from './util/texture.js';
import { getTextureCopyLayout, getTextureSubCopyLayout } from './util/texture/layout.js';
import { physicalMipSizeFromTexture, virtualMipSize } from './util/texture/base.js';
import {
bytesInACompleteRow,
getTextureCopyLayout,
getTextureSubCopyLayout,
} from './util/texture/layout.js';
import { kTexelRepresentationInfo } from './util/texture/texel_data.js';
import { TexelView } from './util/texture/texel_view.js';
import { textureContentIsOKByT2B } from './util/texture/texture_ok.js';
@ -75,6 +82,14 @@ export class GPUTestSubcaseBatchState extends SubcaseBatchState {
return this.provider;
}
get isCompatibility() {
return globalTestConfig.compatibility;
}
getDefaultLimits() {
return getDefaultLimits(this.isCompatibility ? 'compatibility' : 'core');
}
/**
* Some tests or cases need particular feature flags or limits to be enabled.
* Call this function with a descriptor or feature name (or `undefined`) to select a
@ -105,6 +120,7 @@ export class GPUTestSubcaseBatchState extends SubcaseBatchState {
const features = new Set();
for (const format of formats) {
if (format !== undefined) {
this.skipIfTextureFormatNotSupported(format);
features.add(kTextureFormatInfo[format].feature);
}
}
@ -151,6 +167,51 @@ export class GPUTestSubcaseBatchState extends SubcaseBatchState {
// Suppress uncaught promise rejection (we'll catch it later).
this.mismatchedProvider.catch(() => {});
}
/** Throws an exception marking the subcase as skipped. */
skip(msg) {
throw new SkipTestCase(msg);
}
/** Throws an exception making the subcase as skipped if condition is true */
skipIf(cond, msg = '') {
if (cond) {
this.skip(typeof msg === 'function' ? msg() : msg);
}
}
/**
* Skips test if any format is not supported.
*/
skipIfTextureFormatNotSupported(...formats) {
if (this.isCompatibility) {
for (const format of formats) {
if (format === 'bgra8unorm-srgb') {
this.skip(`texture format '${format} is not supported`);
}
}
}
}
skipIfCopyTextureToTextureNotSupportedForFormat(...formats) {
if (this.isCompatibility) {
for (const format of formats) {
if (format && isCompressedTextureFormat(format)) {
this.skip(`copyTextureToTexture with ${format} is not supported`);
}
}
}
}
skipIfTextureViewDimensionNotSupported(...dimensions) {
if (this.isCompatibility) {
for (const dimension of dimensions) {
if (dimension === 'cube-array') {
this.skip(`texture view dimension '${dimension}' is not supported`);
}
}
}
}
}
/**
@ -175,6 +236,26 @@ export class GPUTestBase extends Fixture {
return this.device.queue;
}
get isCompatibility() {
return globalTestConfig.compatibility;
}
getDefaultLimits() {
return getDefaultLimits(this.isCompatibility ? 'compatibility' : 'core');
}
getDefaultLimit(limit) {
return this.getDefaultLimits()[limit].default;
}
makeLimitVariant(limit, variant) {
return makeValueTestVariant(this.device.limits[limit], variant);
}
canCallCopyTextureToBufferWithTextureFormat(format) {
return !this.isCompatibility || !isCompressedTextureFormat(format);
}
/** Snapshot a GPUBuffer's contents, returning a new GPUBuffer with the `MAP_READ` usage. */
createCopyForMapRead(src, srcOffset, size) {
assert(srcOffset % 4 === 0);
@ -253,6 +334,29 @@ export class GPUTestBase extends Fixture {
};
}
/**
* Skips test if any format is not supported.
*/
skipIfTextureFormatNotSupported(...formats) {
if (this.isCompatibility) {
for (const format of formats) {
if (format === 'bgra8unorm-srgb') {
this.skip(`texture format '${format} is not supported`);
}
}
}
}
skipIfTextureViewDimensionNotSupported(...dimensions) {
if (this.isCompatibility) {
for (const dimension of dimensions) {
if (dimension === 'cube-array') {
this.skip(`texture view dimension '${dimension}' is not supported`);
}
}
}
}
/**
* Expect a GPUBuffer's contents to pass the provided check.
*
@ -887,6 +991,65 @@ export class GPUTest extends GPUTestBase {
* related expectation helpers.
*/
const s_deviceToResourcesMap = new WeakMap();
/**
* Gets a (cached) pipeline to render a texture to an rgba8unorm texture
*/
function getPipelineToRenderTextureToRGB8UnormTexture(device) {
if (!s_deviceToResourcesMap.has(device)) {
const module = device.createShaderModule({
code: `
struct VSOutput {
@builtin(position) position: vec4f,
@location(0) texcoord: vec2f,
};
@vertex fn vs(
@builtin(vertex_index) vertexIndex : u32
) -> VSOutput {
let pos = array(
vec2f(-1, -1),
vec2f(-1, 3),
vec2f( 3, -1),
);
var vsOutput: VSOutput;
let xy = pos[vertexIndex];
vsOutput.position = vec4f(xy, 0.0, 1.0);
vsOutput.texcoord = xy * vec2f(0.5, -0.5) + vec2f(0.5);
return vsOutput;
}
@group(0) @binding(0) var ourSampler: sampler;
@group(0) @binding(1) var ourTexture: texture_2d<f32>;
@fragment fn fs(fsInput: VSOutput) -> @location(0) vec4f {
return textureSample(ourTexture, ourSampler, fsInput.texcoord);
}
`,
});
const pipeline = device.createRenderPipeline({
layout: 'auto',
vertex: {
module,
entryPoint: 'vs',
},
fragment: {
module,
entryPoint: 'fs',
targets: [{ format: 'rgba8unorm' }],
},
});
s_deviceToResourcesMap.set(device, { pipeline });
}
const { pipeline } = s_deviceToResourcesMap.get(device);
return pipeline;
}
export function TextureTestMixin(Base) {
class TextureExpectations extends Base {
createTextureFromTexelView(texelView, desc) {
@ -999,6 +1162,228 @@ export function TextureTestMixin(Base) {
)
);
}
expectTexturesToMatchByRendering(actualTexture, expectedTexture, mipLevel, origin, size) {
// Render every layer of both textures at mipLevel to an rgba8unorm texture
// that matches the size of the mipLevel. After each render, copy the
// result to a buffer and expect the results from both textures to match.
const pipeline = getPipelineToRenderTextureToRGB8UnormTexture(this.device);
const readbackPromisesPerTexturePerLayer = [actualTexture, expectedTexture].map(
(texture, ndx) => {
const attachmentSize = virtualMipSize('2d', [texture.width, texture.height, 1], mipLevel);
const attachment = this.device.createTexture({
label: `readback${ndx}`,
size: attachmentSize,
format: 'rgba8unorm',
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
});
this.trackForCleanup(attachment);
const sampler = this.device.createSampler();
const numLayers = texture.depthOrArrayLayers;
const readbackPromisesPerLayer = [];
for (let layer = 0; layer < numLayers; ++layer) {
const bindGroup = this.device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: sampler },
{
binding: 1,
resource: texture.createView({
baseMipLevel: mipLevel,
mipLevelCount: 1,
baseArrayLayer: layer,
arrayLayerCount: 1,
dimension: '2d',
}),
},
],
});
const encoder = this.device.createCommandEncoder();
const pass = encoder.beginRenderPass({
colorAttachments: [
{
view: attachment.createView(),
clearValue: [0.5, 0.5, 0.5, 0.5],
loadOp: 'clear',
storeOp: 'store',
},
],
});
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.draw(3);
pass.end();
this.queue.submit([encoder.finish()]);
const buffer = this.copyWholeTextureToNewBufferSimple(attachment, 0);
readbackPromisesPerLayer.push(
this.readGPUBufferRangeTyped(buffer, {
type: Uint8Array,
typedLength: buffer.size,
})
);
}
return readbackPromisesPerLayer;
}
);
this.eventualAsyncExpectation(async niceStack => {
const readbacksPerTexturePerLayer = [];
// Wait for all buffers to be ready
for (const readbackPromises of readbackPromisesPerTexturePerLayer) {
readbacksPerTexturePerLayer.push(await Promise.all(readbackPromises));
}
function arrayNotAllTheSameValue(arr, msg) {
const first = arr[0];
return arr.length <= 1 || arr.findIndex(v => v !== first) >= 0
? undefined
: Error(`array is entirely ${first} so likely nothing was tested: ${msg || ''}`);
}
// Compare each layer of each texture as read from buffer.
const [actualReadbacksPerLayer, expectedReadbacksPerLayer] = readbacksPerTexturePerLayer;
for (let layer = 0; layer < actualReadbacksPerLayer.length; ++layer) {
const actualReadback = actualReadbacksPerLayer[layer];
const expectedReadback = expectedReadbacksPerLayer[layer];
const sameOk =
size.width === 0 ||
size.height === 0 ||
layer < origin.z ||
layer >= origin.z + size.depthOrArrayLayers;
this.expectOK(
sameOk ? undefined : arrayNotAllTheSameValue(actualReadback.data, 'actualTexture')
);
this.expectOK(
sameOk ? undefined : arrayNotAllTheSameValue(expectedReadback.data, 'expectedTexture')
);
this.expectOK(checkElementsEqual(actualReadback.data, expectedReadback.data), {
mode: 'fail',
niceStack,
});
actualReadback.cleanup();
expectedReadback.cleanup();
}
});
}
copyWholeTextureToNewBufferSimple(texture, mipLevel) {
const { blockWidth, blockHeight, bytesPerBlock } = kTextureFormatInfo[texture.format];
const mipSize = physicalMipSizeFromTexture(texture, mipLevel);
assert(bytesPerBlock !== undefined);
const blocksPerRow = mipSize[0] / blockWidth;
const blocksPerColumn = mipSize[1] / blockHeight;
assert(blocksPerRow % 1 === 0);
assert(blocksPerColumn % 1 === 0);
const bytesPerRow = align(blocksPerRow * bytesPerBlock, 256);
const byteLength = bytesPerRow * blocksPerColumn * mipSize[2];
return this.copyWholeTextureToNewBuffer(
{ texture, mipLevel },
{
bytesPerBlock,
bytesPerRow,
rowsPerImage: blocksPerColumn,
byteLength,
}
);
}
copyWholeTextureToNewBuffer({ texture, mipLevel }, resultDataLayout) {
const { byteLength, bytesPerRow, rowsPerImage } = resultDataLayout;
const buffer = this.device.createBuffer({
size: align(byteLength, 4), // this is necessary because we need to copy and map data from this buffer
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
});
this.trackForCleanup(buffer);
const mipSize = physicalMipSizeFromTexture(texture, mipLevel || 0);
const encoder = this.device.createCommandEncoder();
encoder.copyTextureToBuffer(
{ texture, mipLevel },
{ buffer, bytesPerRow, rowsPerImage },
mipSize
);
this.device.queue.submit([encoder.finish()]);
return buffer;
}
updateLinearTextureDataSubBox(format, copySize, copyParams) {
const { src, dest } = copyParams;
const rowLength = bytesInACompleteRow(copySize.width, format);
for (const texel of this.iterateBlockRows(copySize, format)) {
const srcOffsetElements = this.getTexelOffsetInBytes(
src.dataLayout,
format,
texel,
src.origin
);
const dstOffsetElements = this.getTexelOffsetInBytes(
dest.dataLayout,
format,
texel,
dest.origin
);
memcpy(
{ src: src.data, start: srcOffsetElements, length: rowLength },
{ dst: dest.data, start: dstOffsetElements }
);
}
}
/** Offset for a particular texel in the linear texture data */
getTexelOffsetInBytes(textureDataLayout, format, texel, origin = { x: 0, y: 0, z: 0 }) {
const { offset, bytesPerRow, rowsPerImage } = textureDataLayout;
const info = kTextureFormatInfo[format];
assert(texel.x % info.blockWidth === 0);
assert(texel.y % info.blockHeight === 0);
assert(origin.x % info.blockWidth === 0);
assert(origin.y % info.blockHeight === 0);
const bytesPerImage = rowsPerImage * bytesPerRow;
return (
offset +
(texel.z + origin.z) * bytesPerImage +
((texel.y + origin.y) / info.blockHeight) * bytesPerRow +
((texel.x + origin.x) / info.blockWidth) * info.color.bytes
);
}
*iterateBlockRows(size, format) {
if (size.width === 0 || size.height === 0 || size.depthOrArrayLayers === 0) {
// do not iterate anything for an empty region
return;
}
const info = kTextureFormatInfo[format];
assert(size.height % info.blockHeight === 0);
// Note: it's important that the order is in increasing memory address order.
for (let z = 0; z < size.depthOrArrayLayers; ++z) {
for (let y = 0; y < size.height; y += info.blockHeight) {
yield {
x: 0,
y,
z,
};
}
}
}
}
return TextureExpectations;
}

View file

@ -1586,6 +1586,71 @@ export const listing = [
"rg11b10ufloat_renderable"
]
},
{
"file": [
"compat",
"api",
"validation",
"encoding",
"cmds",
"copyTextureToBuffer"
]
},
{
"file": [
"compat",
"api",
"validation",
"encoding",
"programmable",
"pipeline_bind_group_compat"
]
},
{
"file": [
"compat",
"api",
"validation",
"render_pipeline",
"fragment_state"
]
},
{
"file": [
"compat",
"api",
"validation",
"render_pipeline",
"shader_module"
]
},
{
"file": [
"compat",
"api",
"validation",
"render_pipeline",
"vertex_state"
]
},
{
"file": [
"compat",
"api",
"validation",
"texture",
"createTexture"
]
},
{
"file": [
"compat",
"api",
"validation",
"texture",
"cubeArray"
]
},
{
"file": [
"examples"
@ -1617,6 +1682,60 @@ export const listing = [
],
"readme": "Tests that check the result of valid shader execution."
},
{
"file": [
"shader",
"execution",
"expression",
"binary",
"af_addition"
]
},
{
"file": [
"shader",
"execution",
"expression",
"binary",
"af_comparison"
]
},
{
"file": [
"shader",
"execution",
"expression",
"binary",
"af_matrix_addition"
]
},
{
"file": [
"shader",
"execution",
"expression",
"binary",
"af_matrix_subtraction"
]
},
{
"file": [
"shader",
"execution",
"expression",
"binary",
"af_multiplication"
]
},
{
"file": [
"shader",
"execution",
"expression",
"binary",
"af_subtraction"
]
},
{
"file": [
"shader",
@ -1644,6 +1763,15 @@ export const listing = [
"bool_logical"
]
},
{
"file": [
"shader",
"execution",
"expression",
"binary",
"f16_addition"
]
},
{
"file": [
"shader",
@ -1653,6 +1781,87 @@ export const listing = [
"f16_comparison"
]
},
{
"file": [
"shader",
"execution",
"expression",
"binary",
"f16_division"
]
},
{
"file": [
"shader",
"execution",
"expression",
"binary",
"f16_matrix_addition"
]
},
{
"file": [
"shader",
"execution",
"expression",
"binary",
"f16_matrix_matrix_multiplication"
]
},
{
"file": [
"shader",
"execution",
"expression",
"binary",
"f16_matrix_scalar_multiplication"
]
},
{
"file": [
"shader",
"execution",
"expression",
"binary",
"f16_matrix_subtraction"
]
},
{
"file": [
"shader",
"execution",
"expression",
"binary",
"f16_matrix_vector_multiplication"
]
},
{
"file": [
"shader",
"execution",
"expression",
"binary",
"f16_multiplication"
]
},
{
"file": [
"shader",
"execution",
"expression",
"binary",
"f16_remainder"
]
},
{
"file": [
"shader",
"execution",
"expression",
"binary",
"f16_subtraction"
]
},
{
"file": [
"shader",
@ -2905,7 +3114,16 @@ export const listing = [
"execution",
"expression",
"unary",
"abstract_float_assignment"
"af_arithmetic"
]
},
{
"file": [
"shader",
"execution",
"expression",
"unary",
"af_assignment"
]
},
{
@ -2926,6 +3144,24 @@ export const listing = [
"bool_logical"
]
},
{
"file": [
"shader",
"execution",
"expression",
"unary",
"f16_arithmetic"
]
},
{
"file": [
"shader",
"execution",
"expression",
"unary",
"f16_conversion"
]
},
{
"file": [
"shader",
@ -3189,6 +3425,47 @@ export const listing = [
"const_assert"
]
},
{
"file": [
"shader",
"validation",
"decl",
"const"
]
},
{
"file": [
"shader",
"validation",
"decl",
"override"
]
},
{
"file": [
"shader",
"validation",
"decl",
"ptr_spelling"
]
},
{
"file": [
"shader",
"validation",
"decl",
"var_access_mode"
]
},
{
"file": [
"shader",
"validation",
"expression",
"access",
"vector"
]
},
{
"file": [
"shader",
@ -3198,6 +3475,86 @@ export const listing = [
"bitwise_shift"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"abs"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"acos"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"acosh"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"asin"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"asinh"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"atan"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"atan2"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"atanh"
]
},
{
"file": [
"shader",
@ -3208,6 +3565,232 @@ export const listing = [
"atomics"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"bitcast"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"ceil"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"clamp"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"cos"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"cosh"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"degrees"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"exp"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"exp2"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"inverseSqrt"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"length"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"log"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"log2"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"modf"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"radians"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"round"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"saturate"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"sign"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"sin"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"sinh"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"sqrt"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"tan"
]
},
{
"file": [
"shader",
"validation",
"functions",
"alias_analysis"
]
},
{
"file": [
"shader",
"validation",
"functions",
"restrictions"
]
},
{
"file": [
"shader",
@ -3280,6 +3863,14 @@ export const listing = [
"const_assert"
]
},
{
"file": [
"shader",
"validation",
"parse",
"diagnostic"
]
},
{
"file": [
"shader",
@ -3312,6 +3903,14 @@ export const listing = [
"literal"
]
},
{
"file": [
"shader",
"validation",
"parse",
"must_use"
]
},
{
"file": [
"shader",
@ -3356,8 +3955,8 @@ export const listing = [
"file": [
"shader",
"validation",
"resource_interface",
"bindings"
"shader_io",
"binding"
]
},
{
@ -3376,6 +3975,30 @@ export const listing = [
"entry_point"
]
},
{
"file": [
"shader",
"validation",
"shader_io",
"group"
]
},
{
"file": [
"shader",
"validation",
"shader_io",
"group_and_binding"
]
},
{
"file": [
"shader",
"validation",
"shader_io",
"id"
]
},
{
"file": [
"shader",
@ -3400,6 +4023,46 @@ export const listing = [
"locations"
]
},
{
"file": [
"shader",
"validation",
"shader_io",
"size"
]
},
{
"file": [
"shader",
"validation",
"shader_io",
"workgroup_size"
]
},
{
"file": [
"shader",
"validation",
"types",
"alias"
]
},
{
"file": [
"shader",
"validation",
"types",
"struct"
]
},
{
"file": [
"shader",
"validation",
"types",
"vector"
]
},
{
"file": [
"shader",
@ -3477,6 +4140,13 @@ export const listing = [
"ImageBitmap"
]
},
{
"file": [
"web_platform",
"copyToTexture",
"ImageData"
]
},
{
"file": [
"web_platform",
@ -3491,6 +4161,13 @@ export const listing = [
"canvas"
]
},
{
"file": [
"web_platform",
"copyToTexture",
"image"
]
},
{
"file": [
"web_platform",

View file

@ -0,0 +1,153 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Execution Tests for non-matrix AbstractFloat addition expression
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../gpu_test.js';
import { TypeAbstractFloat, TypeVec } from '../../../../util/conversion.js';
import { FP } from '../../../../util/floating_point.js';
import { sparseF64Range, sparseVectorF64Range } from '../../../../util/math.js';
import { makeCaseCache } from '../case_cache.js';
import { onlyConstInputSource, run } from '../expression.js';
import { abstractBinary } from './binary.js';
const additionVectorScalarInterval = (v, s) => {
return FP.abstract.toVector(v.map(e => FP.abstract.additionInterval(e, s)));
};
const additionScalarVectorInterval = (s, v) => {
return FP.abstract.toVector(v.map(e => FP.abstract.additionInterval(s, e)));
};
export const g = makeTestGroup(GPUTest);
const scalar_cases = {
['scalar']: () => {
return FP.abstract.generateScalarPairToIntervalCases(
sparseF64Range(),
sparseF64Range(),
'finite',
FP.abstract.additionInterval
);
},
};
const vector_scalar_cases = [2, 3, 4]
.map(dim => ({
[`vec${dim}_scalar`]: () => {
return FP.abstract.generateVectorScalarToVectorCases(
sparseVectorF64Range(dim),
sparseF64Range(),
'finite',
additionVectorScalarInterval
);
},
}))
.reduce((a, b) => ({ ...a, ...b }), {});
const scalar_vector_cases = [2, 3, 4]
.map(dim => ({
[`scalar_vec${dim}`]: () => {
return FP.abstract.generateScalarVectorToVectorCases(
sparseF64Range(),
sparseVectorF64Range(dim),
'finite',
additionScalarVectorInterval
);
},
}))
.reduce((a, b) => ({ ...a, ...b }), {});
export const d = makeCaseCache('binary/af_addition', {
...scalar_cases,
...vector_scalar_cases,
...scalar_vector_cases,
});
g.test('scalar')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x + y, where x and y are scalars
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', onlyConstInputSource))
.fn(async t => {
const cases = await d.get('scalar');
await run(
t,
abstractBinary('+'),
[TypeAbstractFloat, TypeAbstractFloat],
TypeAbstractFloat,
t.params,
cases
);
});
g.test('vector')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x + y, where x and y are vectors
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', onlyConstInputSource).combine('vectorize', [2, 3, 4]))
.fn(async t => {
const cases = await d.get('scalar'); // Using vectorize to generate vector cases based on scalar cases
await run(
t,
abstractBinary('+'),
[TypeAbstractFloat, TypeAbstractFloat],
TypeAbstractFloat,
t.params,
cases
);
});
g.test('vector_scalar')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x + y, where x is a vector and y is a scalar
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', onlyConstInputSource).combine('dim', [2, 3, 4]))
.fn(async t => {
const dim = t.params.dim;
const cases = await d.get(`vec${dim}_scalar`);
await run(
t,
abstractBinary('+'),
[TypeVec(dim, TypeAbstractFloat), TypeAbstractFloat],
TypeVec(dim, TypeAbstractFloat),
t.params,
cases
);
});
g.test('scalar_vector')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x + y, where x is a scalar and y is a vector
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', onlyConstInputSource).combine('dim', [2, 3, 4]))
.fn(async t => {
const dim = t.params.dim;
const cases = await d.get(`scalar_vec${dim}`);
await run(
t,
abstractBinary('+'),
[TypeAbstractFloat, TypeVec(dim, TypeAbstractFloat)],
TypeVec(dim, TypeAbstractFloat),
t.params,
cases
);
});

View file

@ -0,0 +1,205 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Execution Tests for the AbstractFloat comparison operations
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../gpu_test.js';
import { anyOf } from '../../../../util/compare.js';
import { abstractFloat, bool, TypeAbstractFloat, TypeBool } from '../../../../util/conversion.js';
import { flushSubnormalNumberF64, vectorF64Range } from '../../../../util/math.js';
import { makeCaseCache } from '../case_cache.js';
import { allInputSources, run } from '../expression.js';
import { binary } from './binary.js';
export const g = makeTestGroup(GPUTest);
/**
* @returns a test case for the provided left hand & right hand values and truth function.
* Handles quantization and subnormals.
*/
function makeCase(lhs, rhs, truthFunc) {
// Subnormal float values may be flushed at any time.
// https://www.w3.org/TR/WGSL/#floating-point-evaluation
const af_lhs = abstractFloat(lhs);
const af_rhs = abstractFloat(rhs);
const lhs_options = new Set([af_lhs, abstractFloat(flushSubnormalNumberF64(lhs))]);
const rhs_options = new Set([af_rhs, abstractFloat(flushSubnormalNumberF64(rhs))]);
const expected = [];
lhs_options.forEach(l => {
rhs_options.forEach(r => {
const result = bool(truthFunc(l, r));
if (!expected.includes(result)) {
expected.push(result);
}
});
});
return { input: [af_lhs, af_rhs], expected: anyOf(...expected) };
}
export const d = makeCaseCache('binary/af_logical', {
equals: () => {
const truthFunc = (lhs, rhs) => {
return lhs.value === rhs.value;
};
return vectorF64Range(2).map(v => {
return makeCase(v[0], v[1], truthFunc);
});
},
not_equals: () => {
const truthFunc = (lhs, rhs) => {
return lhs.value !== rhs.value;
};
return vectorF64Range(2).map(v => {
return makeCase(v[0], v[1], truthFunc);
});
},
less_than: () => {
const truthFunc = (lhs, rhs) => {
return lhs.value < rhs.value;
};
return vectorF64Range(2).map(v => {
return makeCase(v[0], v[1], truthFunc);
});
},
less_equals: () => {
const truthFunc = (lhs, rhs) => {
return lhs.value <= rhs.value;
};
return vectorF64Range(2).map(v => {
return makeCase(v[0], v[1], truthFunc);
});
},
greater_than: () => {
const truthFunc = (lhs, rhs) => {
return lhs.value > rhs.value;
};
return vectorF64Range(2).map(v => {
return makeCase(v[0], v[1], truthFunc);
});
},
greater_equals: () => {
const truthFunc = (lhs, rhs) => {
return lhs.value >= rhs.value;
};
return vectorF64Range(2).map(v => {
return makeCase(v[0], v[1], truthFunc);
});
},
});
g.test('equals')
.specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
.desc(
`
Expression: x == y
Accuracy: Correct result
`
)
.params(u =>
u
.combine('inputSource', [allInputSources[0]] /* const */)
.combine('vectorize', [undefined, 2, 3, 4])
)
.fn(async t => {
const cases = await d.get('equals');
await run(t, binary('=='), [TypeAbstractFloat, TypeAbstractFloat], TypeBool, t.params, cases);
});
g.test('not_equals')
.specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
.desc(
`
Expression: x != y
Accuracy: Correct result
`
)
.params(u =>
u
.combine('inputSource', [allInputSources[0]] /* const */)
.combine('vectorize', [undefined, 2, 3, 4])
)
.fn(async t => {
const cases = await d.get('not_equals');
await run(t, binary('!='), [TypeAbstractFloat, TypeAbstractFloat], TypeBool, t.params, cases);
});
g.test('less_than')
.specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
.desc(
`
Expression: x < y
Accuracy: Correct result
`
)
.params(u =>
u
.combine('inputSource', [allInputSources[0]] /* const */)
.combine('vectorize', [undefined, 2, 3, 4])
)
.fn(async t => {
const cases = await d.get('less_than');
await run(t, binary('<'), [TypeAbstractFloat, TypeAbstractFloat], TypeBool, t.params, cases);
});
g.test('less_equals')
.specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
.desc(
`
Expression: x <= y
Accuracy: Correct result
`
)
.params(u =>
u
.combine('inputSource', [allInputSources[0]] /* const */)
.combine('vectorize', [undefined, 2, 3, 4])
)
.fn(async t => {
const cases = await d.get('less_equals');
await run(t, binary('<='), [TypeAbstractFloat, TypeAbstractFloat], TypeBool, t.params, cases);
});
g.test('greater_than')
.specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
.desc(
`
Expression: x > y
Accuracy: Correct result
`
)
.params(u =>
u
.combine('inputSource', [allInputSources[0]] /* const */)
.combine('vectorize', [undefined, 2, 3, 4])
)
.fn(async t => {
const cases = await d.get('greater_than');
await run(t, binary('>'), [TypeAbstractFloat, TypeAbstractFloat], TypeBool, t.params, cases);
});
g.test('greater_equals')
.specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
.desc(
`
Expression: x >= y
Accuracy: Correct result
`
)
.params(u =>
u
.combine('inputSource', [allInputSources[0]] /* const */)
.combine('vectorize', [undefined, 2, 3, 4])
)
.fn(async t => {
const cases = await d.get('greater_equals');
await run(t, binary('>='), [TypeAbstractFloat, TypeAbstractFloat], TypeBool, t.params, cases);
});

View file

@ -0,0 +1,62 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Execution Tests for matrix AbstractFloat addition expressions
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../gpu_test.js';
import { TypeAbstractFloat, TypeMat } from '../../../../util/conversion.js';
import { FP } from '../../../../util/floating_point.js';
import { sparseMatrixF64Range } from '../../../../util/math.js';
import { makeCaseCache } from '../case_cache.js';
import { onlyConstInputSource, run } from '../expression.js';
import { abstractBinary } from './binary.js';
export const g = makeTestGroup(GPUTest);
// Cases: matCxR
const mat_cases = [2, 3, 4]
.flatMap(cols =>
[2, 3, 4].map(rows => ({
[`mat${cols}x${rows}`]: () => {
return FP.abstract.generateMatrixPairToMatrixCases(
sparseMatrixF64Range(cols, rows),
sparseMatrixF64Range(cols, rows),
'finite',
FP.abstract.additionMatrixMatrixInterval
);
},
}))
)
.reduce((a, b) => ({ ...a, ...b }), {});
export const d = makeCaseCache('binary/af_matrix_addition', mat_cases);
g.test('matrix')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x + y, where x and y are matrices
Accuracy: Correctly rounded
`
)
.params(u =>
u
.combine('inputSource', onlyConstInputSource)
.combine('cols', [2, 3, 4])
.combine('rows', [2, 3, 4])
)
.fn(async t => {
const cols = t.params.cols;
const rows = t.params.rows;
const cases = await d.get(`mat${cols}x${rows}`);
await run(
t,
abstractBinary('+'),
[TypeMat(cols, rows, TypeAbstractFloat), TypeMat(cols, rows, TypeAbstractFloat)],
TypeMat(cols, rows, TypeAbstractFloat),
t.params,
cases
);
});

View file

@ -0,0 +1,62 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Execution Tests for matrix AbstractFloat subtraction expression
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../gpu_test.js';
import { TypeAbstractFloat, TypeMat } from '../../../../util/conversion.js';
import { FP } from '../../../../util/floating_point.js';
import { sparseMatrixF64Range } from '../../../../util/math.js';
import { makeCaseCache } from '../case_cache.js';
import { onlyConstInputSource, run } from '../expression.js';
import { abstractBinary } from './binary.js';
export const g = makeTestGroup(GPUTest);
// Cases: matCxR
const mat_cases = [2, 3, 4]
.flatMap(cols =>
[2, 3, 4].map(rows => ({
[`mat${cols}x${rows}`]: () => {
return FP.abstract.generateMatrixPairToMatrixCases(
sparseMatrixF64Range(cols, rows),
sparseMatrixF64Range(cols, rows),
'finite',
FP.abstract.subtractionMatrixMatrixInterval
);
},
}))
)
.reduce((a, b) => ({ ...a, ...b }), {});
export const d = makeCaseCache('binary/af_matrix_subtraction', mat_cases);
g.test('matrix')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x - y, where x and y are matrices
Accuracy: Correctly rounded
`
)
.params(u =>
u
.combine('inputSource', onlyConstInputSource)
.combine('cols', [2, 3, 4])
.combine('rows', [2, 3, 4])
)
.fn(async t => {
const cols = t.params.cols;
const rows = t.params.rows;
const cases = await d.get(`mat${cols}x${rows}`);
await run(
t,
abstractBinary('-'),
[TypeMat(cols, rows, TypeAbstractFloat), TypeMat(cols, rows, TypeAbstractFloat)],
TypeMat(cols, rows, TypeAbstractFloat),
t.params,
cases
);
});

View file

@ -0,0 +1,153 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Execution Tests for non-matrix AbstractFloat multiplication expression
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../gpu_test.js';
import { TypeAbstractFloat, TypeVec } from '../../../../util/conversion.js';
import { FP } from '../../../../util/floating_point.js';
import { sparseF64Range, sparseVectorF64Range } from '../../../../util/math.js';
import { makeCaseCache } from '../case_cache.js';
import { onlyConstInputSource, run } from '../expression.js';
import { abstractBinary } from './binary.js';
const multiplicationVectorScalarInterval = (v, s) => {
return FP.abstract.toVector(v.map(e => FP.abstract.multiplicationInterval(e, s)));
};
const multiplicationScalarVectorInterval = (s, v) => {
return FP.abstract.toVector(v.map(e => FP.abstract.multiplicationInterval(s, e)));
};
export const g = makeTestGroup(GPUTest);
const scalar_cases = {
['scalar']: () => {
return FP.abstract.generateScalarPairToIntervalCases(
sparseF64Range(),
sparseF64Range(),
'finite',
FP.abstract.multiplicationInterval
);
},
};
const vector_scalar_cases = [2, 3, 4]
.map(dim => ({
[`vec${dim}_scalar`]: () => {
return FP.abstract.generateVectorScalarToVectorCases(
sparseVectorF64Range(dim),
sparseF64Range(),
'finite',
multiplicationVectorScalarInterval
);
},
}))
.reduce((a, b) => ({ ...a, ...b }), {});
const scalar_vector_cases = [2, 3, 4]
.map(dim => ({
[`scalar_vec${dim}`]: () => {
return FP.abstract.generateScalarVectorToVectorCases(
sparseF64Range(),
sparseVectorF64Range(dim),
'finite',
multiplicationScalarVectorInterval
);
},
}))
.reduce((a, b) => ({ ...a, ...b }), {});
export const d = makeCaseCache('binary/af_multiplication', {
...scalar_cases,
...vector_scalar_cases,
...scalar_vector_cases,
});
g.test('scalar')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x * y, where x and y are scalars
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', onlyConstInputSource))
.fn(async t => {
const cases = await d.get('scalar');
await run(
t,
abstractBinary('*'),
[TypeAbstractFloat, TypeAbstractFloat],
TypeAbstractFloat,
t.params,
cases
);
});
g.test('vector')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x * y, where x and y are vectors
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', onlyConstInputSource).combine('vectorize', [2, 3, 4]))
.fn(async t => {
const cases = await d.get('scalar'); // Using vectorize to generate vector cases based on scalar cases
await run(
t,
abstractBinary('*'),
[TypeAbstractFloat, TypeAbstractFloat],
TypeAbstractFloat,
t.params,
cases
);
});
g.test('vector_scalar')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x * y, where x is a vector and y is a scalar
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', onlyConstInputSource).combine('dim', [2, 3, 4]))
.fn(async t => {
const dim = t.params.dim;
const cases = await d.get(`vec${dim}_scalar`);
await run(
t,
abstractBinary('*'),
[TypeVec(dim, TypeAbstractFloat), TypeAbstractFloat],
TypeVec(dim, TypeAbstractFloat),
t.params,
cases
);
});
g.test('scalar_vector')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x * y, where x is a scalar and y is a vector
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', onlyConstInputSource).combine('dim', [2, 3, 4]))
.fn(async t => {
const dim = t.params.dim;
const cases = await d.get(`scalar_vec${dim}`);
await run(
t,
abstractBinary('*'),
[TypeAbstractFloat, TypeVec(dim, TypeAbstractFloat)],
TypeVec(dim, TypeAbstractFloat),
t.params,
cases
);
});

View file

@ -0,0 +1,153 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Execution Tests for non-matrix AbstractFloat subtraction expression
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../gpu_test.js';
import { TypeAbstractFloat, TypeVec } from '../../../../util/conversion.js';
import { FP } from '../../../../util/floating_point.js';
import { sparseF64Range, sparseVectorF64Range } from '../../../../util/math.js';
import { makeCaseCache } from '../case_cache.js';
import { onlyConstInputSource, run } from '../expression.js';
import { abstractBinary } from './binary.js';
const subtractionVectorScalarInterval = (v, s) => {
return FP.abstract.toVector(v.map(e => FP.abstract.subtractionInterval(e, s)));
};
const subtractionScalarVectorInterval = (s, v) => {
return FP.abstract.toVector(v.map(e => FP.abstract.subtractionInterval(s, e)));
};
export const g = makeTestGroup(GPUTest);
const scalar_cases = {
['scalar']: () => {
return FP.abstract.generateScalarPairToIntervalCases(
sparseF64Range(),
sparseF64Range(),
'finite',
FP.abstract.subtractionInterval
);
},
};
const vector_scalar_cases = [2, 3, 4]
.map(dim => ({
[`vec${dim}_scalar`]: () => {
return FP.abstract.generateVectorScalarToVectorCases(
sparseVectorF64Range(dim),
sparseF64Range(),
'finite',
subtractionVectorScalarInterval
);
},
}))
.reduce((a, b) => ({ ...a, ...b }), {});
const scalar_vector_cases = [2, 3, 4]
.map(dim => ({
[`scalar_vec${dim}`]: () => {
return FP.abstract.generateScalarVectorToVectorCases(
sparseF64Range(),
sparseVectorF64Range(dim),
'finite',
subtractionScalarVectorInterval
);
},
}))
.reduce((a, b) => ({ ...a, ...b }), {});
export const d = makeCaseCache('binary/af_subtraction', {
...scalar_cases,
...vector_scalar_cases,
...scalar_vector_cases,
});
g.test('scalar')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x - y, where x and y are scalars
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', onlyConstInputSource))
.fn(async t => {
const cases = await d.get('scalar');
await run(
t,
abstractBinary('-'),
[TypeAbstractFloat, TypeAbstractFloat],
TypeAbstractFloat,
t.params,
cases
);
});
g.test('vector')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x - y, where x and y are vectors
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', onlyConstInputSource).combine('vectorize', [2, 3, 4]))
.fn(async t => {
const cases = await d.get('scalar'); // Using vectorize to generate vector cases based on scalar cases
await run(
t,
abstractBinary('-'),
[TypeAbstractFloat, TypeAbstractFloat],
TypeAbstractFloat,
t.params,
cases
);
});
g.test('vector_scalar')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x - y, where x is a vector and y is a scalar
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', onlyConstInputSource).combine('dim', [2, 3, 4]))
.fn(async t => {
const dim = t.params.dim;
const cases = await d.get(`vec${dim}_scalar`);
await run(
t,
abstractBinary('-'),
[TypeVec(dim, TypeAbstractFloat), TypeAbstractFloat],
TypeVec(dim, TypeAbstractFloat),
t.params,
cases
);
});
g.test('scalar_vector')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x - y, where x is a scalar and y is a vector
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', onlyConstInputSource).combine('dim', [2, 3, 4]))
.fn(async t => {
const dim = t.params.dim;
const cases = await d.get(`scalar_vec${dim}`);
await run(
t,
abstractBinary('-'),
[TypeAbstractFloat, TypeVec(dim, TypeAbstractFloat)],
TypeVec(dim, TypeAbstractFloat),
t.params,
cases
);
});

View file

@ -3,7 +3,10 @@
**/ import {
basicExpressionBuilder,
compoundAssignmentBuilder,
} from '../expression.js'; /* @returns a ShaderBuilder that evaluates a binary operation */
abstractFloatShaderBuilder,
} from '../expression.js';
/* @returns a ShaderBuilder that evaluates a binary operation */
export function binary(op) {
return basicExpressionBuilder(values => `(${values.map(v => `(${v})`).join(op)})`);
}
@ -12,3 +15,8 @@ export function binary(op) {
export function compoundBinary(op) {
return compoundAssignmentBuilder(op);
}
/* @returns a ShaderBuilder that evaluates a binary operation that returns AbstractFloats */
export function abstractBinary(op) {
return abstractFloatShaderBuilder(values => `(${values.map(v => `(${v})`).join(op)})`);
}

View file

@ -0,0 +1,216 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Execution Tests for non-matrix f16 addition expression
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../gpu_test.js';
import { TypeF16, TypeVec } from '../../../../util/conversion.js';
import { FP } from '../../../../util/floating_point.js';
import { sparseF16Range, sparseVectorF16Range } from '../../../../util/math.js';
import { makeCaseCache } from '../case_cache.js';
import { allInputSources, run } from '../expression.js';
import { binary, compoundBinary } from './binary.js';
const additionVectorScalarInterval = (v, s) => {
return FP.f16.toVector(v.map(e => FP.f16.additionInterval(e, s)));
};
const additionScalarVectorInterval = (s, v) => {
return FP.f16.toVector(v.map(e => FP.f16.additionInterval(s, e)));
};
export const g = makeTestGroup(GPUTest);
const scalar_cases = [true, false]
.map(nonConst => ({
[`scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f16.generateScalarPairToIntervalCases(
sparseF16Range(),
sparseF16Range(),
nonConst ? 'unfiltered' : 'finite',
FP.f16.additionInterval
);
},
}))
.reduce((a, b) => ({ ...a, ...b }), {});
const vector_scalar_cases = [2, 3, 4]
.flatMap(dim =>
[true, false].map(nonConst => ({
[`vec${dim}_scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f16.generateVectorScalarToVectorCases(
sparseVectorF16Range(dim),
sparseF16Range(),
nonConst ? 'unfiltered' : 'finite',
additionVectorScalarInterval
);
},
}))
)
.reduce((a, b) => ({ ...a, ...b }), {});
const scalar_vector_cases = [2, 3, 4]
.flatMap(dim =>
[true, false].map(nonConst => ({
[`scalar_vec${dim}_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f16.generateScalarVectorToVectorCases(
sparseF16Range(),
sparseVectorF16Range(dim),
nonConst ? 'unfiltered' : 'finite',
additionScalarVectorInterval
);
},
}))
)
.reduce((a, b) => ({ ...a, ...b }), {});
export const d = makeCaseCache('binary/f16_addition', {
...scalar_cases,
...vector_scalar_cases,
...scalar_vector_cases,
});
g.test('scalar')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x + y, where x and y are scalars
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const cases = await d.get(
t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
);
await run(t, binary('+'), [TypeF16, TypeF16], TypeF16, t.params, cases);
});
g.test('vector')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x + y, where x and y are vectors
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('vectorize', [2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const cases = await d.get(
t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const' // Using vectorize to generate vector cases based on scalar cases
);
await run(t, binary('+'), [TypeF16, TypeF16], TypeF16, t.params, cases);
});
g.test('scalar_compound')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x += y
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const cases = await d.get(
t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
);
await run(t, compoundBinary('+='), [TypeF16, TypeF16], TypeF16, t.params, cases);
});
g.test('vector_scalar')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x + y, where x is a vector and y is a scalar
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const dim = t.params.dim;
const cases = await d.get(
t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
);
await run(
t,
binary('+'),
[TypeVec(dim, TypeF16), TypeF16],
TypeVec(dim, TypeF16),
t.params,
cases
);
});
g.test('vector_scalar_compound')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x += y, where x is a vector and y is a scalar
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const dim = t.params.dim;
const cases = await d.get(
t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
);
await run(
t,
compoundBinary('+='),
[TypeVec(dim, TypeF16), TypeF16],
TypeVec(dim, TypeF16),
t.params,
cases
);
});
g.test('scalar_vector')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x + y, where x is a scalar and y is a vector
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const dim = t.params.dim;
const cases = await d.get(
t.params.inputSource === 'const' ? `scalar_vec${dim}_const` : `scalar_vec${dim}_non_const`
);
await run(
t,
binary('+'),
[TypeF16, TypeVec(dim, TypeF16)],
TypeVec(dim, TypeF16),
t.params,
cases
);
});

View file

@ -0,0 +1,216 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Execution Tests for non-matrix f16 division expression
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../gpu_test.js';
import { TypeF16, TypeVec } from '../../../../util/conversion.js';
import { FP } from '../../../../util/floating_point.js';
import { sparseF16Range, sparseVectorF16Range } from '../../../../util/math.js';
import { makeCaseCache } from '../case_cache.js';
import { allInputSources, run } from '../expression.js';
import { binary, compoundBinary } from './binary.js';
const divisionVectorScalarInterval = (v, s) => {
return FP.f16.toVector(v.map(e => FP.f16.divisionInterval(e, s)));
};
const divisionScalarVectorInterval = (s, v) => {
return FP.f16.toVector(v.map(e => FP.f16.divisionInterval(s, e)));
};
export const g = makeTestGroup(GPUTest);
const scalar_cases = [true, false]
.map(nonConst => ({
[`scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f16.generateScalarPairToIntervalCases(
sparseF16Range(),
sparseF16Range(),
nonConst ? 'unfiltered' : 'finite',
FP.f16.divisionInterval
);
},
}))
.reduce((a, b) => ({ ...a, ...b }), {});
const vector_scalar_cases = [2, 3, 4]
.flatMap(dim =>
[true, false].map(nonConst => ({
[`vec${dim}_scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f16.generateVectorScalarToVectorCases(
sparseVectorF16Range(dim),
sparseF16Range(),
nonConst ? 'unfiltered' : 'finite',
divisionVectorScalarInterval
);
},
}))
)
.reduce((a, b) => ({ ...a, ...b }), {});
const scalar_vector_cases = [2, 3, 4]
.flatMap(dim =>
[true, false].map(nonConst => ({
[`scalar_vec${dim}_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f16.generateScalarVectorToVectorCases(
sparseF16Range(),
sparseVectorF16Range(dim),
nonConst ? 'unfiltered' : 'finite',
divisionScalarVectorInterval
);
},
}))
)
.reduce((a, b) => ({ ...a, ...b }), {});
export const d = makeCaseCache('binary/f16_division', {
...scalar_cases,
...vector_scalar_cases,
...scalar_vector_cases,
});
g.test('scalar')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x / y, where x and y are scalars
Accuracy: 2.5 ULP for |y| in the range [2^-126, 2^126]
`
)
.params(u => u.combine('inputSource', allInputSources))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const cases = await d.get(
t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
);
await run(t, binary('/'), [TypeF16, TypeF16], TypeF16, t.params, cases);
});
g.test('vector')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x / y, where x and y are vectors
Accuracy: 2.5 ULP for |y| in the range [2^-126, 2^126]
`
)
.params(u => u.combine('inputSource', allInputSources).combine('vectorize', [2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const cases = await d.get(
t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const' // Using vectorize to generate vector cases based on scalar cases
);
await run(t, binary('/'), [TypeF16, TypeF16], TypeF16, t.params, cases);
});
g.test('scalar_compound')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x /= y
Accuracy: 2.5 ULP for |y| in the range [2^-126, 2^126]
`
)
.params(u => u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const cases = await d.get(
t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
);
await run(t, compoundBinary('/='), [TypeF16, TypeF16], TypeF16, t.params, cases);
});
g.test('vector_scalar')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x / y, where x is a vector and y is a scalar
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const dim = t.params.dim;
const cases = await d.get(
t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
);
await run(
t,
binary('/'),
[TypeVec(dim, TypeF16), TypeF16],
TypeVec(dim, TypeF16),
t.params,
cases
);
});
g.test('vector_scalar_compound')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x /= y, where x is a vector and y is a scalar
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const dim = t.params.dim;
const cases = await d.get(
t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
);
await run(
t,
compoundBinary('/='),
[TypeVec(dim, TypeF16), TypeF16],
TypeVec(dim, TypeF16),
t.params,
cases
);
});
g.test('scalar_vector')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x / y, where x is a scalar and y is a vector
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const dim = t.params.dim;
const cases = await d.get(
t.params.inputSource === 'const' ? `scalar_vec${dim}_const` : `scalar_vec${dim}_non_const`
);
await run(
t,
binary('/'),
[TypeF16, TypeVec(dim, TypeF16)],
TypeVec(dim, TypeF16),
t.params,
cases
);
});

View file

@ -0,0 +1,98 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Execution Tests for matrix f16 addition expression
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../gpu_test.js';
import { TypeF16, TypeMat } from '../../../../util/conversion.js';
import { FP } from '../../../../util/floating_point.js';
import { sparseMatrixF16Range } from '../../../../util/math.js';
import { makeCaseCache } from '../case_cache.js';
import { allInputSources, run } from '../expression.js';
import { binary, compoundBinary } from './binary.js';
export const g = makeTestGroup(GPUTest);
// Cases: matCxR_[non_]const
const mat_cases = [2, 3, 4]
.flatMap(cols =>
[2, 3, 4].flatMap(rows =>
[true, false].map(nonConst => ({
[`mat${cols}x${rows}_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f16.generateMatrixPairToMatrixCases(
sparseMatrixF16Range(cols, rows),
sparseMatrixF16Range(cols, rows),
nonConst ? 'unfiltered' : 'finite',
FP.f16.additionMatrixMatrixInterval
);
},
}))
)
)
.reduce((a, b) => ({ ...a, ...b }), {});
export const d = makeCaseCache('binary/f16_matrix_addition', mat_cases);
g.test('matrix')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x + y, where x and y are matrices
Accuracy: Correctly rounded
`
)
.params(u =>
u.combine('inputSource', allInputSources).combine('cols', [2, 3, 4]).combine('rows', [2, 3, 4])
)
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const cols = t.params.cols;
const rows = t.params.rows;
const cases = await d.get(
t.params.inputSource === 'const' ? `mat${cols}x${rows}_const` : `mat${cols}x${rows}_non_const`
);
await run(
t,
binary('+'),
[TypeMat(cols, rows, TypeF16), TypeMat(cols, rows, TypeF16)],
TypeMat(cols, rows, TypeF16),
t.params,
cases
);
});
g.test('matrix_compound')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x =+ y, where x and y are matrices
Accuracy: Correctly rounded
`
)
.params(u =>
u.combine('inputSource', allInputSources).combine('cols', [2, 3, 4]).combine('rows', [2, 3, 4])
)
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const cols = t.params.cols;
const rows = t.params.rows;
const cases = await d.get(
t.params.inputSource === 'const' ? `mat${cols}x${rows}_const` : `mat${cols}x${rows}_non_const`
);
await run(
t,
compoundBinary('+='),
[TypeMat(cols, rows, TypeF16), TypeMat(cols, rows, TypeF16)],
TypeMat(cols, rows, TypeF16),
t.params,
cases
);
});

View file

@ -0,0 +1,117 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Execution Tests for matrix-matrix f16 multiplication expression
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../gpu_test.js';
import { TypeF16, TypeMat } from '../../../../util/conversion.js';
import { FP } from '../../../../util/floating_point.js';
import { sparseMatrixF16Range } from '../../../../util/math.js';
import { makeCaseCache } from '../case_cache.js';
import { allInputSources, run } from '../expression.js';
import { binary, compoundBinary } from './binary.js';
export const g = makeTestGroup(GPUTest);
// Cases: matKxR_matCxK_[non_]const
const mat_mat_cases = [2, 3, 4]
.flatMap(k =>
[2, 3, 4].flatMap(cols =>
[2, 3, 4].flatMap(rows =>
[true, false].map(nonConst => ({
[`mat${k}x${rows}_mat${cols}x${k}_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f16.generateMatrixPairToMatrixCases(
sparseMatrixF16Range(k, rows),
sparseMatrixF16Range(cols, k),
nonConst ? 'unfiltered' : 'finite',
FP.f16.multiplicationMatrixMatrixInterval
);
},
}))
)
)
)
.reduce((a, b) => ({ ...a, ...b }), {});
export const d = makeCaseCache('binary/f16_matrix_matrix_multiplication', mat_mat_cases);
g.test('matrix_matrix')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x * y, where x is a matrix and y is a matrix
Accuracy: Correctly rounded
`
)
.params(u =>
u
.combine('inputSource', allInputSources)
.combine('common_dim', [2, 3, 4])
.combine('x_rows', [2, 3, 4])
.combine('y_cols', [2, 3, 4])
)
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const x_cols = t.params.common_dim;
const x_rows = t.params.x_rows;
const y_cols = t.params.y_cols;
const y_rows = t.params.common_dim;
const cases = await d.get(
t.params.inputSource === 'const'
? `mat${x_cols}x${x_rows}_mat${y_cols}x${y_rows}_const`
: `mat${x_cols}x${x_rows}_mat${y_cols}x${y_rows}_non_const`
);
await run(
t,
binary('*'),
[TypeMat(x_cols, x_rows, TypeF16), TypeMat(y_cols, y_rows, TypeF16)],
TypeMat(y_cols, x_rows, TypeF16),
t.params,
cases
);
});
g.test('matrix_matrix_compound')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x *= y, where x is a matrix and y is a matrix
Accuracy: Correctly rounded
`
)
.params(u =>
u
.combine('inputSource', allInputSources)
.combine('common_dim', [2, 3, 4])
.combine('x_rows', [2, 3, 4])
)
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const x_cols = t.params.common_dim;
const x_rows = t.params.x_rows;
const y_cols = x_cols;
const y_rows = t.params.common_dim;
const cases = await d.get(
t.params.inputSource === 'const'
? `mat${x_cols}x${x_rows}_mat${y_cols}x${y_rows}_const`
: `mat${x_cols}x${x_rows}_mat${y_cols}x${y_rows}_non_const`
);
await run(
t,
compoundBinary('*='),
[TypeMat(x_cols, x_rows, TypeF16), TypeMat(y_cols, y_rows, TypeF16)],
TypeMat(y_cols, x_rows, TypeF16),
t.params,
cases
);
});

View file

@ -0,0 +1,156 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Execution Tests for matrix-scalar and scalar-matrix f16 multiplication expression
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../gpu_test.js';
import { TypeF16, TypeMat } from '../../../../util/conversion.js';
import { FP } from '../../../../util/floating_point.js';
import { sparseF16Range, sparseMatrixF16Range } from '../../../../util/math.js';
import { makeCaseCache } from '../case_cache.js';
import { allInputSources, run } from '../expression.js';
import { binary, compoundBinary } from './binary.js';
export const g = makeTestGroup(GPUTest);
// Cases: matCxR_scalar_[non_]const
const mat_scalar_cases = [2, 3, 4]
.flatMap(cols =>
[2, 3, 4].flatMap(rows =>
[true, false].map(nonConst => ({
[`mat${cols}x${rows}_scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f16.generateMatrixScalarToMatrixCases(
sparseMatrixF16Range(cols, rows),
sparseF16Range(),
nonConst ? 'unfiltered' : 'finite',
FP.f16.multiplicationMatrixScalarInterval
);
},
}))
)
)
.reduce((a, b) => ({ ...a, ...b }), {});
// Cases: scalar_matCxR_[non_]const
const scalar_mat_cases = [2, 3, 4]
.flatMap(cols =>
[2, 3, 4].flatMap(rows =>
[true, false].map(nonConst => ({
[`scalar_mat${cols}x${rows}_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f16.generateScalarMatrixToMatrixCases(
sparseF16Range(),
sparseMatrixF16Range(cols, rows),
nonConst ? 'unfiltered' : 'finite',
FP.f16.multiplicationScalarMatrixInterval
);
},
}))
)
)
.reduce((a, b) => ({ ...a, ...b }), {});
export const d = makeCaseCache('binary/f16_matrix_scalar_multiplication', {
...mat_scalar_cases,
...scalar_mat_cases,
});
g.test('matrix_scalar')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x * y, where x is a matrix and y is a scalar
Accuracy: Correctly rounded
`
)
.params(u =>
u.combine('inputSource', allInputSources).combine('cols', [2, 3, 4]).combine('rows', [2, 3, 4])
)
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const cols = t.params.cols;
const rows = t.params.rows;
const cases = await d.get(
t.params.inputSource === 'const'
? `mat${cols}x${rows}_scalar_const`
: `mat${cols}x${rows}_scalar_non_const`
);
await run(
t,
binary('*'),
[TypeMat(cols, rows, TypeF16), TypeF16],
TypeMat(cols, rows, TypeF16),
t.params,
cases
);
});
g.test('matrix_scalar_compound')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x *= y, where x is a matrix and y is a scalar
Accuracy: Correctly rounded
`
)
.params(u =>
u.combine('inputSource', allInputSources).combine('cols', [2, 3, 4]).combine('rows', [2, 3, 4])
)
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const cols = t.params.cols;
const rows = t.params.rows;
const cases = await d.get(
t.params.inputSource === 'const'
? `mat${cols}x${rows}_scalar_const`
: `mat${cols}x${rows}_scalar_non_const`
);
await run(
t,
compoundBinary('*='),
[TypeMat(cols, rows, TypeF16), TypeF16],
TypeMat(cols, rows, TypeF16),
t.params,
cases
);
});
g.test('scalar_matrix')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x * y, where x is a scalar and y is a matrix
Accuracy: Correctly rounded
`
)
.params(u =>
u.combine('inputSource', allInputSources).combine('cols', [2, 3, 4]).combine('rows', [2, 3, 4])
)
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const cols = t.params.cols;
const rows = t.params.rows;
const cases = await d.get(
t.params.inputSource === 'const'
? `scalar_mat${cols}x${rows}_const`
: `scalar_mat${cols}x${rows}_non_const`
);
await run(
t,
binary('*'),
[TypeF16, TypeMat(cols, rows, TypeF16)],
TypeMat(cols, rows, TypeF16),
t.params,
cases
);
});

View file

@ -0,0 +1,98 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Execution Tests for matrix f16 subtraction expression
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../gpu_test.js';
import { TypeF16, TypeMat } from '../../../../util/conversion.js';
import { FP } from '../../../../util/floating_point.js';
import { sparseMatrixF16Range } from '../../../../util/math.js';
import { makeCaseCache } from '../case_cache.js';
import { allInputSources, run } from '../expression.js';
import { binary, compoundBinary } from './binary.js';
export const g = makeTestGroup(GPUTest);
// Cases: matCxR_[non_]const
const mat_cases = [2, 3, 4]
.flatMap(cols =>
[2, 3, 4].flatMap(rows =>
[true, false].map(nonConst => ({
[`mat${cols}x${rows}_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f16.generateMatrixPairToMatrixCases(
sparseMatrixF16Range(cols, rows),
sparseMatrixF16Range(cols, rows),
nonConst ? 'unfiltered' : 'finite',
FP.f16.subtractionMatrixMatrixInterval
);
},
}))
)
)
.reduce((a, b) => ({ ...a, ...b }), {});
export const d = makeCaseCache('binary/f16_matrix_subtraction', mat_cases);
g.test('matrix')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x - y, where x and y are matrices
Accuracy: Correctly rounded
`
)
.params(u =>
u.combine('inputSource', allInputSources).combine('cols', [2, 3, 4]).combine('rows', [2, 3, 4])
)
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const cols = t.params.cols;
const rows = t.params.rows;
const cases = await d.get(
t.params.inputSource === 'const' ? `mat${cols}x${rows}_const` : `mat${cols}x${rows}_non_const`
);
await run(
t,
binary('-'),
[TypeMat(cols, rows, TypeF16), TypeMat(cols, rows, TypeF16)],
TypeMat(cols, rows, TypeF16),
t.params,
cases
);
});
g.test('matrix_compound')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x -= y, where x and y are matrices
Accuracy: Correctly rounded
`
)
.params(u =>
u.combine('inputSource', allInputSources).combine('cols', [2, 3, 4]).combine('rows', [2, 3, 4])
)
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const cols = t.params.cols;
const rows = t.params.rows;
const cases = await d.get(
t.params.inputSource === 'const' ? `mat${cols}x${rows}_const` : `mat${cols}x${rows}_non_const`
);
await run(
t,
compoundBinary('-='),
[TypeMat(cols, rows, TypeF16), TypeMat(cols, rows, TypeF16)],
TypeMat(cols, rows, TypeF16),
t.params,
cases
);
});

View file

@ -0,0 +1,154 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Execution Tests for matrix-vector and vector-matrix f16 multiplication expression
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../gpu_test.js';
import { TypeF16, TypeMat, TypeVec } from '../../../../util/conversion.js';
import { FP } from '../../../../util/floating_point.js';
import { sparseMatrixF16Range, sparseVectorF16Range } from '../../../../util/math.js';
import { makeCaseCache } from '../case_cache.js';
import { allInputSources, run } from '../expression.js';
import { binary, compoundBinary } from './binary.js';
export const g = makeTestGroup(GPUTest);
// Cases: matCxR_vecC_[non_]const
const mat_vec_cases = [2, 3, 4]
.flatMap(cols =>
[2, 3, 4].flatMap(rows =>
[true, false].map(nonConst => ({
[`mat${cols}x${rows}_vec${cols}_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f16.generateMatrixVectorToVectorCases(
sparseMatrixF16Range(cols, rows),
sparseVectorF16Range(cols),
nonConst ? 'unfiltered' : 'finite',
FP.f16.multiplicationMatrixVectorInterval
);
},
}))
)
)
.reduce((a, b) => ({ ...a, ...b }), {});
// Cases: vecR_matCxR_[non_]const
const vec_mat_cases = [2, 3, 4]
.flatMap(rows =>
[2, 3, 4].flatMap(cols =>
[true, false].map(nonConst => ({
[`vec${rows}_mat${cols}x${rows}_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f16.generateVectorMatrixToVectorCases(
sparseVectorF16Range(rows),
sparseMatrixF16Range(cols, rows),
nonConst ? 'unfiltered' : 'finite',
FP.f16.multiplicationVectorMatrixInterval
);
},
}))
)
)
.reduce((a, b) => ({ ...a, ...b }), {});
export const d = makeCaseCache('binary/f16_matrix_vector_multiplication', {
...mat_vec_cases,
...vec_mat_cases,
});
g.test('matrix_vector')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x * y, where x is a matrix and y is a vector
Accuracy: Correctly rounded
`
)
.params(u =>
u.combine('inputSource', allInputSources).combine('cols', [2, 3, 4]).combine('rows', [2, 3, 4])
)
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const cols = t.params.cols;
const rows = t.params.rows;
const cases = await d.get(
t.params.inputSource === 'const'
? `mat${cols}x${rows}_vec${cols}_const`
: `mat${cols}x${rows}_vec${cols}_non_const`
);
await run(
t,
binary('*'),
[TypeMat(cols, rows, TypeF16), TypeVec(cols, TypeF16)],
TypeVec(rows, TypeF16),
t.params,
cases
);
});
g.test('vector_matrix')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x * y, where x is a vector and y is is a matrix
Accuracy: Correctly rounded
`
)
.params(u =>
u.combine('inputSource', allInputSources).combine('cols', [2, 3, 4]).combine('rows', [2, 3, 4])
)
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const cols = t.params.cols;
const rows = t.params.rows;
const cases = await d.get(
t.params.inputSource === 'const'
? `vec${rows}_mat${cols}x${rows}_const`
: `vec${rows}_mat${cols}x${rows}_non_const`
);
await run(
t,
binary('*'),
[TypeVec(rows, TypeF16), TypeMat(cols, rows, TypeF16)],
TypeVec(cols, TypeF16),
t.params,
cases
);
});
g.test('vector_matrix_compound')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x *= y, where x is a vector and y is is a matrix
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const cols = t.params.dim;
const rows = t.params.dim;
const cases = await d.get(
t.params.inputSource === 'const'
? `vec${rows}_mat${cols}x${rows}_const`
: `vec${rows}_mat${cols}x${rows}_non_const`
);
await run(
t,
compoundBinary('*='),
[TypeVec(rows, TypeF16), TypeMat(cols, rows, TypeF16)],
TypeVec(cols, TypeF16),
t.params,
cases
);
});

View file

@ -0,0 +1,216 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Execution Tests for non-matrix f16 multiplication expression
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../gpu_test.js';
import { TypeF16, TypeVec } from '../../../../util/conversion.js';
import { FP } from '../../../../util/floating_point.js';
import { sparseF16Range, sparseVectorF16Range } from '../../../../util/math.js';
import { makeCaseCache } from '../case_cache.js';
import { allInputSources, run } from '../expression.js';
import { binary, compoundBinary } from './binary.js';
const multiplicationVectorScalarInterval = (v, s) => {
return FP.f16.toVector(v.map(e => FP.f16.multiplicationInterval(e, s)));
};
const multiplicationScalarVectorInterval = (s, v) => {
return FP.f16.toVector(v.map(e => FP.f16.multiplicationInterval(s, e)));
};
export const g = makeTestGroup(GPUTest);
const scalar_cases = [true, false]
.map(nonConst => ({
[`scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f16.generateScalarPairToIntervalCases(
sparseF16Range(),
sparseF16Range(),
nonConst ? 'unfiltered' : 'finite',
FP.f16.multiplicationInterval
);
},
}))
.reduce((a, b) => ({ ...a, ...b }), {});
const vector_scalar_cases = [2, 3, 4]
.flatMap(dim =>
[true, false].map(nonConst => ({
[`vec${dim}_scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f16.generateVectorScalarToVectorCases(
sparseVectorF16Range(dim),
sparseF16Range(),
nonConst ? 'unfiltered' : 'finite',
multiplicationVectorScalarInterval
);
},
}))
)
.reduce((a, b) => ({ ...a, ...b }), {});
const scalar_vector_cases = [2, 3, 4]
.flatMap(dim =>
[true, false].map(nonConst => ({
[`scalar_vec${dim}_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f16.generateScalarVectorToVectorCases(
sparseF16Range(),
sparseVectorF16Range(dim),
nonConst ? 'unfiltered' : 'finite',
multiplicationScalarVectorInterval
);
},
}))
)
.reduce((a, b) => ({ ...a, ...b }), {});
export const d = makeCaseCache('binary/f16_multiplication', {
...scalar_cases,
...vector_scalar_cases,
...scalar_vector_cases,
});
g.test('scalar')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x * y, where x and y are scalars
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const cases = await d.get(
t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
);
await run(t, binary('*'), [TypeF16, TypeF16], TypeF16, t.params, cases);
});
g.test('vector')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x * y, where x and y are vectors
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('vectorize', [2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const cases = await d.get(
t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const' // Using vectorize to generate vector cases based on scalar cases
);
await run(t, binary('*'), [TypeF16, TypeF16], TypeF16, t.params, cases);
});
g.test('scalar_compound')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x *= y
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const cases = await d.get(
t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
);
await run(t, compoundBinary('*='), [TypeF16, TypeF16], TypeF16, t.params, cases);
});
g.test('vector_scalar')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x * y, where x is a vector and y is a scalar
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const dim = t.params.dim;
const cases = await d.get(
t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
);
await run(
t,
binary('*'),
[TypeVec(dim, TypeF16), TypeF16],
TypeVec(dim, TypeF16),
t.params,
cases
);
});
g.test('vector_scalar_compound')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x *= y, where x is a vector and y is a scalar
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const dim = t.params.dim;
const cases = await d.get(
t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
);
await run(
t,
compoundBinary('*='),
[TypeVec(dim, TypeF16), TypeF16],
TypeVec(dim, TypeF16),
t.params,
cases
);
});
g.test('scalar_vector')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x * y, where x is a scalar and y is a vector
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const dim = t.params.dim;
const cases = await d.get(
t.params.inputSource === 'const' ? `scalar_vec${dim}_const` : `scalar_vec${dim}_non_const`
);
await run(
t,
binary('*'),
[TypeF16, TypeVec(dim, TypeF16)],
TypeVec(dim, TypeF16),
t.params,
cases
);
});

View file

@ -0,0 +1,217 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Execution Tests for non-matrix f16 remainder expression
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../gpu_test.js';
import { TypeF16, TypeVec } from '../../../../util/conversion.js';
import { FP } from '../../../../util/floating_point.js';
import { sparseF16Range, sparseVectorF16Range } from '../../../../util/math.js';
import { makeCaseCache } from '../case_cache.js';
import { allInputSources, run } from '../expression.js';
import { binary, compoundBinary } from './binary.js';
const remainderVectorScalarInterval = (v, s) => {
return FP.f16.toVector(v.map(e => FP.f16.remainderInterval(e, s)));
};
const remainderScalarVectorInterval = (s, v) => {
return FP.f16.toVector(v.map(e => FP.f16.remainderInterval(s, e)));
};
export const g = makeTestGroup(GPUTest);
const scalar_cases = [true, false]
.map(nonConst => ({
[`scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f16.generateScalarPairToIntervalCases(
sparseF16Range(),
sparseF16Range(),
nonConst ? 'unfiltered' : 'finite',
FP.f16.remainderInterval
);
},
}))
.reduce((a, b) => ({ ...a, ...b }), {});
const vector_scalar_cases = [2, 3, 4]
.flatMap(dim =>
[true, false].map(nonConst => ({
[`vec${dim}_scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f16.generateVectorScalarToVectorCases(
sparseVectorF16Range(dim),
sparseF16Range(),
nonConst ? 'unfiltered' : 'finite',
remainderVectorScalarInterval
);
},
}))
)
.reduce((a, b) => ({ ...a, ...b }), {});
const scalar_vector_cases = [2, 3, 4]
.flatMap(dim =>
[true, false].map(nonConst => ({
[`scalar_vec${dim}_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f16.generateScalarVectorToVectorCases(
sparseF16Range(),
sparseVectorF16Range(dim),
nonConst ? 'unfiltered' : 'finite',
remainderScalarVectorInterval
);
},
}))
)
.reduce((a, b) => ({ ...a, ...b }), {});
export const d = makeCaseCache('binary/f16_remainder', {
...scalar_cases,
...vector_scalar_cases,
...scalar_vector_cases,
});
g.test('scalar')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x % y, where x and y are scalars
Accuracy: Derived from x - y * trunc(x/y)
`
)
.params(u => u.combine('inputSource', allInputSources))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase('shader-f16');
})
.fn(async t => {
const cases = await d.get(
t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
);
await run(t, binary('%'), [TypeF16, TypeF16], TypeF16, t.params, cases);
});
g.test('vector')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x % y, where x and y are vectors
Accuracy: Derived from x - y * trunc(x/y)
`
)
.params(u => u.combine('inputSource', allInputSources).combine('vectorize', [2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase('shader-f16');
})
.fn(async t => {
const cases = await d.get(
t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
);
await run(t, binary('%'), [TypeF16, TypeF16], TypeF16, t.params, cases);
});
g.test('scalar_compound')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x %= y
Accuracy: Derived from x - y * trunc(x/y)
`
)
.params(u => u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase('shader-f16');
})
.fn(async t => {
const cases = await d.get(
t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
);
await run(t, compoundBinary('%='), [TypeF16, TypeF16], TypeF16, t.params, cases);
});
g.test('vector_scalar')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x % y, where x is a vector and y is a scalar
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase('shader-f16');
})
.fn(async t => {
const dim = t.params.dim;
const cases = await d.get(
t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
);
await run(
t,
binary('%'),
[TypeVec(dim, TypeF16), TypeF16],
TypeVec(dim, TypeF16),
t.params,
cases
);
});
g.test('vector_scalar_compound')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x %= y, where x is a vector and y is a scalar
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase('shader-f16');
})
.fn(async t => {
const dim = t.params.dim;
const cases = await d.get(
t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
);
await run(
t,
compoundBinary('%='),
[TypeVec(dim, TypeF16), TypeF16],
TypeVec(dim, TypeF16),
t.params,
cases
);
});
g.test('scalar_vector')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x % y, where x is a scalar and y is a vector
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase('shader-f16');
})
.fn(async t => {
const dim = t.params.dim;
const cases = await d.get(
t.params.inputSource === 'const' ? `scalar_vec${dim}_const` : `scalar_vec${dim}_non_const`
);
await run(
t,
binary('%'),
[TypeF16, TypeVec(dim, TypeF16)],
TypeVec(dim, TypeF16),
t.params,
cases
);
});

View file

@ -0,0 +1,216 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/ export const description = `
Execution Tests for non-matrix f16 subtraction expression
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../gpu_test.js';
import { TypeF16, TypeVec } from '../../../../util/conversion.js';
import { FP } from '../../../../util/floating_point.js';
import { sparseF16Range, sparseVectorF16Range } from '../../../../util/math.js';
import { makeCaseCache } from '../case_cache.js';
import { allInputSources, run } from '../expression.js';
import { binary, compoundBinary } from './binary.js';
const subtractionVectorScalarInterval = (v, s) => {
return FP.f16.toVector(v.map(e => FP.f16.subtractionInterval(e, s)));
};
const subtractionScalarVectorInterval = (s, v) => {
return FP.f16.toVector(v.map(e => FP.f16.subtractionInterval(s, e)));
};
export const g = makeTestGroup(GPUTest);
const scalar_cases = [true, false]
.map(nonConst => ({
[`scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f16.generateScalarPairToIntervalCases(
sparseF16Range(),
sparseF16Range(),
nonConst ? 'unfiltered' : 'finite',
FP.f16.subtractionInterval
);
},
}))
.reduce((a, b) => ({ ...a, ...b }), {});
const vector_scalar_cases = [2, 3, 4]
.flatMap(dim =>
[true, false].map(nonConst => ({
[`vec${dim}_scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f16.generateVectorScalarToVectorCases(
sparseVectorF16Range(dim),
sparseF16Range(),
nonConst ? 'unfiltered' : 'finite',
subtractionVectorScalarInterval
);
},
}))
)
.reduce((a, b) => ({ ...a, ...b }), {});
const scalar_vector_cases = [2, 3, 4]
.flatMap(dim =>
[true, false].map(nonConst => ({
[`scalar_vec${dim}_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f16.generateScalarVectorToVectorCases(
sparseF16Range(),
sparseVectorF16Range(dim),
nonConst ? 'unfiltered' : 'finite',
subtractionScalarVectorInterval
);
},
}))
)
.reduce((a, b) => ({ ...a, ...b }), {});
export const d = makeCaseCache('binary/f16_subtraction', {
...scalar_cases,
...vector_scalar_cases,
...scalar_vector_cases,
});
g.test('scalar')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x - y, where x and y are scalars
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const cases = await d.get(
t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
);
await run(t, binary('-'), [TypeF16, TypeF16], TypeF16, t.params, cases);
});
g.test('vector')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x - y, where x and y are vectors
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('vectorize', [2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const cases = await d.get(
t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const' // Using vectorize to generate vector cases based on scalar cases
);
await run(t, binary('-'), [TypeF16, TypeF16], TypeF16, t.params, cases);
});
g.test('scalar_compound')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x -= y
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const cases = await d.get(
t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
);
await run(t, compoundBinary('-='), [TypeF16, TypeF16], TypeF16, t.params, cases);
});
g.test('vector_scalar')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x - y, where x is a vector and y is a scalar
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const dim = t.params.dim;
const cases = await d.get(
t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
);
await run(
t,
binary('-'),
[TypeVec(dim, TypeF16), TypeF16],
TypeVec(dim, TypeF16),
t.params,
cases
);
});
g.test('vector_scalar_compound')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x -= y, where x is a vector and y is a scalar
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const dim = t.params.dim;
const cases = await d.get(
t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
);
await run(
t,
compoundBinary('-='),
[TypeVec(dim, TypeF16), TypeF16],
TypeVec(dim, TypeF16),
t.params,
cases
);
});
g.test('scalar_vector')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x - y, where x is a scalar and y is a vector
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4]))
.beforeAllSubcases(t => {
t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
})
.fn(async t => {
const dim = t.params.dim;
const cases = await d.get(
t.params.inputSource === 'const' ? `scalar_vec${dim}_const` : `scalar_vec${dim}_non_const`
);
await run(
t,
binary('-'),
[TypeF16, TypeVec(dim, TypeF16)],
TypeVec(dim, TypeF16),
t.params,
cases
);
});

View file

@ -23,138 +23,64 @@ const additionScalarVectorInterval = (s, v) => {
export const g = makeTestGroup(GPUTest);
const scalar_cases = [true, false]
.map(nonConst => ({
[`scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f32.generateScalarPairToIntervalCases(
sparseF32Range(),
sparseF32Range(),
nonConst ? 'unfiltered' : 'finite',
FP.f32.additionInterval
);
},
}))
.reduce((a, b) => ({ ...a, ...b }), {});
const vector_scalar_cases = [2, 3, 4]
.flatMap(dim =>
[true, false].map(nonConst => ({
[`vec${dim}_scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f32.generateVectorScalarToVectorCases(
sparseVectorF32Range(dim),
sparseF32Range(),
nonConst ? 'unfiltered' : 'finite',
additionVectorScalarInterval
);
},
}))
)
.reduce((a, b) => ({ ...a, ...b }), {});
const scalar_vector_cases = [2, 3, 4]
.flatMap(dim =>
[true, false].map(nonConst => ({
[`scalar_vec${dim}_${nonConst ? 'non_const' : 'const'}`]: () => {
return FP.f32.generateScalarVectorToVectorCases(
sparseF32Range(),
sparseVectorF32Range(dim),
nonConst ? 'unfiltered' : 'finite',
additionScalarVectorInterval
);
},
}))
)
.reduce((a, b) => ({ ...a, ...b }), {});
export const d = makeCaseCache('binary/f32_addition', {
scalar_const: () => {
return FP.f32.generateScalarPairToIntervalCases(
sparseF32Range(),
sparseF32Range(),
'finite',
FP.f32.additionInterval
);
},
scalar_non_const: () => {
return FP.f32.generateScalarPairToIntervalCases(
sparseF32Range(),
sparseF32Range(),
'unfiltered',
FP.f32.additionInterval
);
},
vec2_scalar_const: () => {
return FP.f32.generateVectorScalarToVectorCases(
sparseVectorF32Range(2),
sparseF32Range(),
'finite',
additionVectorScalarInterval
);
},
vec2_scalar_non_const: () => {
return FP.f32.generateVectorScalarToVectorCases(
sparseVectorF32Range(2),
sparseF32Range(),
'unfiltered',
additionVectorScalarInterval
);
},
vec3_scalar_const: () => {
return FP.f32.generateVectorScalarToVectorCases(
sparseVectorF32Range(3),
sparseF32Range(),
'finite',
additionVectorScalarInterval
);
},
vec3_scalar_non_const: () => {
return FP.f32.generateVectorScalarToVectorCases(
sparseVectorF32Range(3),
sparseF32Range(),
'unfiltered',
additionVectorScalarInterval
);
},
vec4_scalar_const: () => {
return FP.f32.generateVectorScalarToVectorCases(
sparseVectorF32Range(4),
sparseF32Range(),
'finite',
additionVectorScalarInterval
);
},
vec4_scalar_non_const: () => {
return FP.f32.generateVectorScalarToVectorCases(
sparseVectorF32Range(4),
sparseF32Range(),
'unfiltered',
additionVectorScalarInterval
);
},
scalar_vec2_const: () => {
return FP.f32.generateScalarVectorToVectorCases(
sparseF32Range(),
sparseVectorF32Range(2),
'finite',
additionScalarVectorInterval
);
},
scalar_vec2_non_const: () => {
return FP.f32.generateScalarVectorToVectorCases(
sparseF32Range(),
sparseVectorF32Range(2),
'unfiltered',
additionScalarVectorInterval
);
},
scalar_vec3_const: () => {
return FP.f32.generateScalarVectorToVectorCases(
sparseF32Range(),
sparseVectorF32Range(3),
'finite',
additionScalarVectorInterval
);
},
scalar_vec3_non_const: () => {
return FP.f32.generateScalarVectorToVectorCases(
sparseF32Range(),
sparseVectorF32Range(3),
'unfiltered',
additionScalarVectorInterval
);
},
scalar_vec4_const: () => {
return FP.f32.generateScalarVectorToVectorCases(
sparseF32Range(),
sparseVectorF32Range(4),
'finite',
additionScalarVectorInterval
);
},
scalar_vec4_non_const: () => {
return FP.f32.generateScalarVectorToVectorCases(
sparseF32Range(),
sparseVectorF32Range(4),
'unfiltered',
additionScalarVectorInterval
);
},
subtraction_const: () => {
return FP.f32.generateScalarPairToIntervalCases(
sparseF32Range(),
sparseF32Range(),
'finite',
FP.f32.subtractionInterval
);
},
...scalar_cases,
...vector_scalar_cases,
...scalar_vector_cases,
});
g.test('scalar')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x + y
Expression: x + y, where x and y are scalars
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4]))
.params(u => u.combine('inputSource', allInputSources))
.fn(async t => {
const cases = await d.get(
t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
@ -163,6 +89,22 @@ Accuracy: Correctly rounded
await run(t, binary('+'), [TypeF32, TypeF32], TypeF32, t.params, cases);
});
g.test('vector')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(
`
Expression: x + y, where x and y are vectors
Accuracy: Correctly rounded
`
)
.params(u => u.combine('inputSource', allInputSources).combine('vectorize', [2, 3, 4]))
.fn(async t => {
const cases = await d.get(
t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const' // Using vectorize to generate vector cases based on scalar cases
);
await run(t, binary('+'), [TypeF32, TypeF32], TypeF32, t.params, cases);
});
g.test('scalar_compound')
.specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
.desc(

Some files were not shown because too many files have changed in this diff Show more