Add pseudo async Create*PipelineAsync methods (#31695)

* Create fake CreatePipelineAsync

* Update WebGPU CTS

* Update expectations and disable some webgpu tests
This commit is contained in:
Samson 2024-03-19 06:39:42 +01:00 committed by GitHub
parent 228f4fb2fc
commit c25069161d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
167 changed files with 121466 additions and 53445 deletions

View file

@ -161,7 +161,7 @@ DOMInterfaces = {
},
'GPUDevice': {
'inRealms': ['PopErrorScope', 'GetLost'],
'inRealms': ['PopErrorScope', 'GetLost', 'CreateComputePipelineAsync', 'CreateRenderPipelineAsync'],
}
}

View file

@ -737,7 +737,18 @@ impl GPUDeviceMethods for GPUDevice {
)
}
/// <https://gpuweb.github.io/gpuweb/#dom-gpudevice-createcommandencoder>
/// <https://gpuweb.github.io/gpuweb/#dom-gpudevice-createcomputepipelineasync>
fn CreateComputePipelineAsync(
&self,
descriptor: &GPUComputePipelineDescriptor,
comp: InRealm,
) -> Rc<Promise> {
let promise = Promise::new_in_current_realm(comp);
promise.resolve_native(&self.CreateComputePipeline(descriptor));
promise
}
/// https://gpuweb.github.io/gpuweb/#dom-gpudevice-createcommandencoder
fn CreateCommandEncoder(
&self,
descriptor: &GPUCommandEncoderDescriptor,
@ -1040,6 +1051,17 @@ impl GPUDeviceMethods for GPUDevice {
)
}
/// <https://gpuweb.github.io/gpuweb/#dom-gpudevice-createrenderpipelineasync>
fn CreateRenderPipelineAsync(
&self,
descriptor: &GPURenderPipelineDescriptor,
comp: InRealm,
) -> Rc<Promise> {
let promise = Promise::new_in_current_realm(comp);
promise.resolve_native(&self.CreateRenderPipeline(descriptor));
promise
}
/// <https://gpuweb.github.io/gpuweb/#dom-gpudevice-createrenderbundleencoder>
fn CreateRenderBundleEncoder(
&self,
@ -1148,19 +1170,6 @@ impl GPUDeviceMethods for GPUDevice {
}
}
}
/// <https://gpuweb.github.io/gpuweb/#dom-gpudevice-createcomputepipelineasync>
fn CreateComputePipelineAsync(
&self,
_descriptor: &GPUComputePipelineDescriptor,
) -> Rc<Promise> {
todo!()
}
/// <https://gpuweb.github.io/gpuweb/#dom-gpudevice-createrenderpipelineasync>
fn CreateRenderPipelineAsync(&self, _descriptor: &GPURenderPipelineDescriptor) -> Rc<Promise> {
todo!()
}
}
impl Drop for GPUDevice {

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,3 @@
[canvas_composite_alpha_bgra8unorm_opaque_copy.https.html]
expected:
if os == "linux" and not debug: [CRASH, PASS]

View file

@ -1,2 +1,2 @@
[canvas_composite_alpha_bgra8unorm_opaque_draw.https.html]
expected: [PASS, FAIL, CRASH]
expected: [CRASH, PASS, FAIL]

View file

@ -0,0 +1,3 @@
[canvas_composite_alpha_rgba8unorm_opaque_copy.https.html]
expected:
if os == "linux" and not debug: [CRASH, PASS]

View file

@ -0,0 +1,3 @@
[canvas_composite_alpha_rgba8unorm_opaque_draw.https.html]
expected:
if os == "linux" and not debug: PASS

View file

@ -0,0 +1,6 @@
[delay_get_texture.https.html]
expected:
if os == "win": FAIL
if os == "linux" and debug: FAIL
if os == "linux" and not debug: [PASS, FAIL]
if os == "mac": FAIL

View file

@ -1 +1 @@
ae15a59832989c22982acaeaccdf5d379afced62
ad066ef01b1060802b4a2329443faead9f345d87

View file

@ -73,6 +73,7 @@ export class TestFileLoader extends EventTarget {
query,
{
subqueriesToExpand = [],
fullyExpandSubtrees = [],
maxChunkTime = Infinity
} = {})
{
@ -82,6 +83,7 @@ export class TestFileLoader extends EventTarget {
assert(q.level >= 2, () => `subqueriesToExpand entries should not be multi-file:\n ${q}`);
return q;
}),
fullyExpandSubtrees: fullyExpandSubtrees.map((s) => parseQuery(s)),
maxChunkTime
});
this.dispatchEvent(new MessageEvent('finish'));

View file

@ -17,12 +17,49 @@ import {
import { kBigSeparator, kWildcard, kPathSeparator, kParamSeparator } from './separators.js';
import { validQueryPart } from './validQueryPart.js';
export function parseQuery(s) {
/**
* converts foo/bar/src/webgpu/this/that/file.spec.ts to webgpu:this,that,file,*
*/
function convertPathToQuery(path) {
// removes .spec.ts and splits by directory separators.
const parts = path.substring(0, path.length - 8).split(/\/|\\/g);
// Gets parts only after the last `src`. Example: returns ['webgpu', 'foo', 'bar', 'test']
// for ['Users', 'me', 'src', 'cts', 'src', 'webgpu', 'foo', 'bar', 'test']
const partsAfterSrc = parts.slice(parts.lastIndexOf('src') + 1);
const suite = partsAfterSrc.shift();
return `${suite}:${partsAfterSrc.join(',')},*`;
}
/**
* If a query looks like a path (ends in .spec.ts and has directory separators)
* then convert try to convert it to a query.
*/
function convertPathLikeToQuery(queryOrPath) {
return queryOrPath.endsWith('.spec.ts') && (
queryOrPath.includes('/') || queryOrPath.includes('\\')) ?
convertPathToQuery(queryOrPath) :
queryOrPath;
}
/**
* Convert long suite names (the part before the first colon) to the
* shortest last word
* foo.bar.moo:test,subtest,foo -> moo:test,subtest,foo
*/
function shortenSuiteName(query) {
const parts = query.split(':');
// converts foo.bar.moo to moo
const suite = parts.shift()?.replace(/.*\.(\w+)$/, '$1');
return [suite, ...parts].join(':');
}
export function parseQuery(queryLike) {
try {
return parseQueryImpl(s);
const query = shortenSuiteName(convertPathLikeToQuery(queryLike));
return parseQueryImpl(query);
} catch (ex) {
if (ex instanceof Error) {
ex.message += '\n on: ' + s;
ex.message += `\n on: ${queryLike}`;
}
throw ex;
}

View file

@ -286,6 +286,7 @@ loader,
queryToLoad,
{
subqueriesToExpand,
fullyExpandSubtrees = [],
maxChunkTime = Infinity
})
{
@ -303,6 +304,10 @@ queryToLoad,
// If toExpand == subquery, no expansion is needed (but it's still "seen").
if (ordering === Ordering.Equal) seenSubqueriesToExpand[i] = true;
return ordering !== Ordering.StrictSubset;
}) &&
fullyExpandSubtrees.every((toExpand) => {
const ordering = compareQueries(toExpand, subquery);
return ordering === Ordering.Unordered;
});
// L0 = suite-level, e.g. suite:*

View file

@ -1,3 +1,3 @@
// AUTO-GENERATED - DO NOT EDIT. See tools/gen_version.
export const version = 'ae15a59832989c22982acaeaccdf5d379afced62';
export const version = 'ad066ef01b1060802b4a2329443faead9f345d87';

View file

@ -303,7 +303,8 @@ new Int16Array(),
new Int32Array(),
new Float16Array(),
new Float32Array(),
new Float64Array()];
new Float64Array(),
new BigInt64Array()];

View file

@ -126,6 +126,7 @@
<meta name=variant content='?q=webgpu:api,operation,memory_sync,buffer,single_buffer:two_draws_in_the_same_render_pass:*'>
<meta name=variant content='?q=webgpu:api,operation,memory_sync,buffer,single_buffer:two_draws_in_the_same_render_bundle:*'>
<meta name=variant content='?q=webgpu:api,operation,memory_sync,buffer,single_buffer:two_dispatches_in_the_same_compute_pass:*'>
<meta name=variant content='?q=webgpu:api,operation,memory_sync,texture,readonly_depth_stencil:sampling_while_testing:*'>
<meta name=variant content='?q=webgpu:api,operation,memory_sync,texture,same_subresource:rw:*'>
<meta name=variant content='?q=webgpu:api,operation,memory_sync,texture,same_subresource:rw,single_pass,load_store:*'>
<meta name=variant content='?q=webgpu:api,operation,memory_sync,texture,same_subresource:rw,single_pass,load_resolve:*'>
@ -230,6 +231,7 @@
<meta name=variant content='?q=webgpu:api,operation,shader_module,compilation_info:getCompilationInfo_returns:*'>
<meta name=variant content='?q=webgpu:api,operation,shader_module,compilation_info:line_number_and_position:*'>
<meta name=variant content='?q=webgpu:api,operation,shader_module,compilation_info:offset_and_length:*'>
<meta name=variant content='?q=webgpu:api,operation,storage_texture,read_write:basic:*'>
<meta name=variant content='?q=webgpu:api,operation,texture_view,format_reinterpretation:texture_binding:*'>
<meta name=variant content='?q=webgpu:api,operation,texture_view,format_reinterpretation:render_and_resolve_attachment:*'>
<meta name=variant content='?q=webgpu:api,operation,texture_view,read:format:*'>
@ -634,6 +636,7 @@
<meta name=variant content='?q=webgpu:api,validation,image_copy,texture_related:origin_alignment:*'>
<meta name=variant content='?q=webgpu:api,validation,image_copy,texture_related:size_alignment:*'>
<meta name=variant content='?q=webgpu:api,validation,image_copy,texture_related:copy_rectangle:*'>
<meta name=variant content='?q=webgpu:api,validation,layout_shader_compat:pipeline_layout_shader_exact_match:*'>
<meta name=variant content='?q=webgpu:api,validation,query_set,create:count:*'>
<meta name=variant content='?q=webgpu:api,validation,query_set,destroy:twice:*'>
<meta name=variant content='?q=webgpu:api,validation,query_set,destroy:invalid_queryset:*'>
@ -702,6 +705,10 @@
<meta name=variant content='?q=webgpu:api,validation,render_pass,render_pass_descriptor:color_attachments,limits,maxColorAttachments:*'>
<meta name=variant content='?q=webgpu:api,validation,render_pass,render_pass_descriptor:color_attachments,limits,maxColorAttachmentBytesPerSample,aligned:*'>
<meta name=variant content='?q=webgpu:api,validation,render_pass,render_pass_descriptor:color_attachments,limits,maxColorAttachmentBytesPerSample,unaligned:*'>
<meta name=variant content='?q=webgpu:api,validation,render_pass,render_pass_descriptor:color_attachments,depthSlice,definedness:*'>
<meta name=variant content='?q=webgpu:api,validation,render_pass,render_pass_descriptor:color_attachments,depthSlice,bound_check:*'>
<meta name=variant content='?q=webgpu:api,validation,render_pass,render_pass_descriptor:color_attachments,depthSlice,overlaps,same_miplevel:*'>
<meta name=variant content='?q=webgpu:api,validation,render_pass,render_pass_descriptor:color_attachments,depthSlice,overlaps,diff_miplevel:*'>
<meta name=variant content='?q=webgpu:api,validation,render_pass,render_pass_descriptor:color_attachments,non_multisampled:*'>
<meta name=variant content='?q=webgpu:api,validation,render_pass,render_pass_descriptor:color_attachments,sample_count:*'>
<meta name=variant content='?q=webgpu:api,validation,render_pass,render_pass_descriptor:resolveTarget,sample_count:*'>
@ -867,21 +874,28 @@
<meta name=variant content='?q=webgpu:api,validation,texture,rg11b10ufloat_renderable:begin_render_bundle_encoder:*'>
<meta name=variant content='?q=webgpu:api,validation,texture,rg11b10ufloat_renderable:create_render_pipeline:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,createBindGroup:viewDimension_matches_textureBindingViewDimension:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,createBindGroupLayout:unsupportedStorageTextureFormats:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,encoding,cmds,copyTextureToBuffer:compressed:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,encoding,cmds,copyTextureToTexture:compressed:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,encoding,cmds,copyTextureToTexture:multisample:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,encoding,programmable,pipeline_bind_group_compat:twoDifferentTextureViews,render_pass,used:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,encoding,programmable,pipeline_bind_group_compat:twoDifferentTextureViews,render_pass,unused:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,encoding,programmable,pipeline_bind_group_compat:twoDifferentTextureViews,compute_pass,used:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,encoding,programmable,pipeline_bind_group_compat:twoDifferentTextureViews,compute_pass,unused:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,render_pipeline,depth_stencil_state:depthBiasClamp:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,render_pipeline,fragment_state:colorState:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,render_pipeline,shader_module:sample_mask:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,render_pipeline,shader_module:sample_index:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,render_pipeline,shader_module:interpolate:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,render_pipeline,shader_module:unsupportedStorageTextureFormats,computePipeline:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,render_pipeline,shader_module:unsupportedStorageTextureFormats,renderPipeline:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,render_pipeline,vertex_state:maxVertexAttributesVertexIndexInstanceIndex:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,texture,createTexture:unsupportedTextureFormats:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,texture,createTexture:unsupportedTextureViewFormats:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,texture,createTexture:invalidTextureBindingViewDimension:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,texture,createTexture:depthOrArrayLayers_incompatible_with_textureBindingViewDimension:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,texture,createTexture:format_reinterpretation:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,texture,createTexture:unsupportedStorageTextureFormats:*'>
<meta name=variant content='?q=webgpu:compat,api,validation,texture,cubeArray:cube_array:*'>
<meta name=variant content='?q=webgpu:examples:test_name:*'>
<meta name=variant content='?q=webgpu:examples:not_implemented_yet,without_plan:*'>
@ -1197,6 +1211,16 @@
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,bitcast:vec4h_to_vec2i:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,bitcast:vec4h_to_vec2u:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,bitcast:vec4h_to_vec2f:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,bitcast:af_to_f32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,bitcast:af_to_i32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,bitcast:af_to_u32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,bitcast:af_to_vec2f16:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,bitcast:vec2af_to_vec4f16:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,bitcast:ai_to_i32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,bitcast:ai_to_u32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,bitcast:ai_to_f32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,bitcast:ai_to_vec2f16:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,bitcast:vec2ai_to_vec4f16:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,ceil:abstract_float:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,ceil:f32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,ceil:f16:*'>
@ -1246,6 +1270,8 @@
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,dot:f16_vec2:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,dot:f16_vec3:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,dot:f16_vec4:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,dot4I8Packed:basic:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,dot4U8Packed:basic:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,dpdx:f32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,dpdxCoarse:f32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,dpdxFine:f32:*'>
@ -1381,6 +1407,10 @@
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,pack2x16unorm:pack:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,pack4x8snorm:pack:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,pack4x8unorm:pack:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,pack4xI8:basic:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,pack4xI8Clamp:basic:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,pack4xU8:basic:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,pack4xU8Clamp:basic:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,pow:abstract_float:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,pow:f32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,pow:f16:*'>
@ -1525,12 +1555,24 @@
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,unpack2x16unorm:unpack:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,unpack4x8snorm:unpack:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,unpack4x8unorm:unpack:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,unpack4xI8:basic:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,unpack4xU8:basic:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,workgroupBarrier:stage:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,builtin,workgroupBarrier:barrier:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,user,ptr_params:read_full_object:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,user,ptr_params:read_ptr_to_member:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,user,ptr_params:read_ptr_to_element:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,user,ptr_params:write_full_object:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,user,ptr_params:write_ptr_to_member:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,user,ptr_params:write_ptr_to_element:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,call,user,ptr_params:mixed_ptr_parameters:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,af_arithmetic:negation:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,af_assignment:abstract:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,af_assignment:f32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,af_assignment:f16:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,ai_assignment:abstract:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,ai_assignment:i32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,ai_assignment:u32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,bool_conversion:bool:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,bool_conversion:u32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,bool_conversion:i32:*'>
@ -1560,6 +1602,9 @@
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,i32_conversion:i32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,i32_conversion:f32:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,i32_conversion:f16:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,indirection:deref:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,indirection:deref_index:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,indirection:deref_member:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,u32_complement:u32_complement:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,u32_conversion:bool:*'>
<meta name=variant content='?q=webgpu:shader,execution,expression,unary,u32_conversion:u32:*'>
@ -1645,6 +1690,8 @@
<meta name=variant content='?q=webgpu:shader,execution,flow_control,while:while_continue:*'>
<meta name=variant content='?q=webgpu:shader,execution,flow_control,while:while_nested_break:*'>
<meta name=variant content='?q=webgpu:shader,execution,flow_control,while:while_nested_continue:*'>
<meta name=variant content='?q=webgpu:shader,execution,memory_layout:read_layout:*'>
<meta name=variant content='?q=webgpu:shader,execution,memory_layout:write_layout:*'>
<meta name=variant content='?q=webgpu:shader,execution,memory_model,adjacent:f16:*'>
<meta name=variant content='?q=webgpu:shader,execution,memory_model,atomicity:atomicity:*'>
<meta name=variant content='?q=webgpu:shader,execution,memory_model,barrier:workgroup_barrier_store_load:*'>
@ -1674,9 +1721,13 @@
<meta name=variant content='?q=webgpu:shader,execution,shader_io,compute_builtins:inputs:*'>
<meta name=variant content='?q=webgpu:shader,execution,shader_io,fragment_builtins:inputs,position:*'>
<meta name=variant content='?q=webgpu:shader,execution,shader_io,fragment_builtins:inputs,interStage:*'>
<meta name=variant content='?q=webgpu:shader,execution,shader_io,fragment_builtins:inputs,interStage,centroid:*'>
<meta name=variant content='?q=webgpu:shader,execution,shader_io,fragment_builtins:inputs,sample_index:*'>
<meta name=variant content='?q=webgpu:shader,execution,shader_io,fragment_builtins:inputs,front_facing:*'>
<meta name=variant content='?q=webgpu:shader,execution,shader_io,shared_structs:shared_with_buffer:*'>
<meta name=variant content='?q=webgpu:shader,execution,shader_io,shared_structs:shared_between_stages:*'>
<meta name=variant content='?q=webgpu:shader,execution,shader_io,shared_structs:shared_with_non_entry_point_function:*'>
<meta name=variant content='?q=webgpu:shader,execution,shader_io,workgroup_size:workgroup_size:*'>
<meta name=variant content='?q=webgpu:shader,execution,shadow:declaration:*'>
<meta name=variant content='?q=webgpu:shader,execution,shadow:builtin:*'>
<meta name=variant content='?q=webgpu:shader,execution,shadow:for_loop:*'>
@ -1684,6 +1735,9 @@
<meta name=variant content='?q=webgpu:shader,execution,shadow:loop:*'>
<meta name=variant content='?q=webgpu:shader,execution,shadow:switch:*'>
<meta name=variant content='?q=webgpu:shader,execution,shadow:if:*'>
<meta name=variant content='?q=webgpu:shader,execution,stage:basic_compute:*'>
<meta name=variant content='?q=webgpu:shader,execution,stage:basic_render:*'>
<meta name=variant content='?q=webgpu:shader,execution,statement,compound:decl:*'>
<meta name=variant content='?q=webgpu:shader,execution,statement,increment_decrement:scalar_i32_increment:*'>
<meta name=variant content='?q=webgpu:shader,execution,statement,increment_decrement:scalar_i32_increment_overflow:*'>
<meta name=variant content='?q=webgpu:shader,execution,statement,increment_decrement:scalar_u32_increment:*'>
@ -1707,6 +1761,8 @@
<meta name=variant content='?q=webgpu:shader,validation,const_assert,const_assert:constant_expression_logical_and_no_assert:*'>
<meta name=variant content='?q=webgpu:shader,validation,const_assert,const_assert:constant_expression_logical_and_assert:*'>
<meta name=variant content='?q=webgpu:shader,validation,const_assert,const_assert:evaluation_stage:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,compound_statement:decl_conflict:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,compound_statement:decl_use:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,const:no_direct_recursion:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,const:no_indirect_recursion:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,const:no_indirect_recursion_via_array_size:*'>
@ -1720,6 +1776,17 @@
<meta name=variant content='?q=webgpu:shader,validation,decl,ptr_spelling:ptr_bad_store_type:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,ptr_spelling:ptr_address_space_never_uses_access_mode:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,ptr_spelling:ptr_not_instantiable:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,var:module_scope_types:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,var:function_scope_types:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,var:module_scope_initializers:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,var:handle_initializer:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,var:initializer_kind:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,var:function_addrspace_at_module_scope:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,var:binding_point_on_resources:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,var:binding_point_on_non_resources:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,var:binding_point_on_function_var:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,var:binding_collisions:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,var:binding_collision_unused_helper:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,var_access_mode:explicit_access_mode:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,var_access_mode:implicit_access_mode:*'>
<meta name=variant content='?q=webgpu:shader,validation,decl,var_access_mode:read_access:*'>
@ -1734,6 +1801,9 @@
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,acos:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,acosh:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,acosh:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,arrayLength:bool_type:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,arrayLength:type:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,arrayLength:access_mode:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,asin:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,asin:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,asinh:values:*'>
@ -1746,6 +1816,8 @@
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,atanh:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,atanh:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,atomics:stage:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,barriers:only_in_compute:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,barriers:no_return_value:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,bitcast:bad_const_to_f32:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,bitcast:bad_const_to_f16:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,bitcast:bad_type_constructible:*'>
@ -1775,6 +1847,8 @@
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,exp:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,exp2:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,exp2:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,floor:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,floor:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,inverseSqrt:values:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,inverseSqrt:integer_argument:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,length:scalar:*'>
@ -1828,12 +1902,23 @@
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,unpack4xU8:supported:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,unpack4xU8:bad_args:*'>
<meta name=variant content='?q=webgpu:shader,validation,expression,call,builtin,unpack4xU8:must_use:*'>
<meta name=variant content='?q=webgpu:shader,validation,extension,pointer_composite_access:deref:*'>
<meta name=variant content='?q=webgpu:shader,validation,extension,pointer_composite_access:pointer:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,alias_analysis:two_pointers:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,alias_analysis:two_pointers_to_array_elements:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,alias_analysis:two_pointers_to_array_elements_indirect:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,alias_analysis:two_pointers_to_struct_members:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,alias_analysis:two_pointers_to_struct_members_indirect:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,alias_analysis:one_pointer_one_module_scope:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,alias_analysis:subcalls:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,alias_analysis:member_accessors:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,alias_analysis:same_pointer_read_and_write:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,alias_analysis:aliasing_inside_function:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,alias_analysis:two_atomic_pointers:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,alias_analysis:two_atomic_pointers_to_array_elements:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,alias_analysis:two_atomic_pointers_to_struct_members:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,alias_analysis:one_atomic_pointer_one_module_scope:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,alias_analysis:workgroup_uniform_load:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,restrictions:vertex_returns_position:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,restrictions:entry_point_call_target:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,restrictions:function_return_types:*'>
@ -1844,7 +1929,9 @@
<meta name=variant content='?q=webgpu:shader,validation,functions,restrictions:param_names_must_differ:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,restrictions:param_scope_is_function_body:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,restrictions:param_number_matches_call:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,restrictions:call_arg_types_match_params:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,restrictions:call_arg_types_match_1_param:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,restrictions:call_arg_types_match_2_params:*'>
<meta name=variant content='?q=webgpu:shader,validation,functions,restrictions:call_arg_types_match_3_params:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,align:parsing:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,align:required_alignment:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,align:placement:*'>
@ -1855,14 +1942,18 @@
<meta name=variant content='?q=webgpu:shader,validation,parse,blankspace:blankspace:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,blankspace:bom:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,break:placement:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,break_if:placement:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,break_if:non_bool_param:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,builtin:parse:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,builtin:placement:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,comments:comments:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,comments:line_comment_eof:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,comments:line_comment_terminators:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,comments:unterminated_block_comment:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,compound:parse:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,const:placement:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,const_assert:parse:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,continuing:placement:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,diagnostic:valid_params:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,diagnostic:invalid_severity:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,diagnostic:warning_unknown_rule:*'>
@ -1871,6 +1962,7 @@
<meta name=variant content='?q=webgpu:shader,validation,parse,diagnostic:conflicting_directive:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,diagnostic:conflicting_attribute_same_location:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,diagnostic:conflicting_attribute_different_location:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,diagnostic:after_other_directives:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,discard:placement:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,enable:enable:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,identifiers:module_var_name:*'>
@ -1893,19 +1985,24 @@
<meta name=variant content='?q=webgpu:shader,validation,parse,literal:f16:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,must_use:declaration:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,must_use:call:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,must_use:ignore_result_of_non_must_use_that_returns_call_of_must_use:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,must_use:builtin_must_use:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,must_use:builtin_no_must_use:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,pipeline_stage:vertex_parsing:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,pipeline_stage:fragment_parsing:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,pipeline_stage:compute_parsing:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,pipeline_stage:multiple_entry_points:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,pipeline_stage:duplicate_compute_on_function:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,pipeline_stage:duplicate_fragment_on_function:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,pipeline_stage:duplicate_vertex_on_function:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,pipeline_stage:extra_on_compute_function:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,pipeline_stage:extra_on_fragment_function:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,pipeline_stage:extra_on_vertex_function:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,pipeline_stage:placement:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,requires:requires:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,requires:wgsl_matches_api:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,semicolon:module_scope_single:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,semicolon:module_scope_multiple:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,semicolon:after_enable:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,semicolon:after_requires:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,semicolon:after_diagnostic:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,semicolon:after_struct_decl:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,semicolon:after_member:*'>
<meta name=variant content='?q=webgpu:shader,validation,parse,semicolon:after_func_decl:*'>
@ -1951,7 +2048,6 @@
<meta name=variant content='?q=webgpu:shader,validation,parse,var_and_let:var_access_mode_bad_template_delim:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,binding:binding:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,binding:binding_f16:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,binding:binding_without_group:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,builtins:stage_inout:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,builtins:type:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,builtins:nesting:*'>
@ -1965,7 +2061,6 @@
<meta name=variant content='?q=webgpu:shader,validation,shader_io,entry_point:no_entry_point_provided:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,group:group:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,group:group_f16:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,group:group_without_binding:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,group_and_binding:binding_attributes:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,group_and_binding:private_module_scope:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,group_and_binding:private_function_scope:*'>
@ -1995,6 +2090,7 @@
<meta name=variant content='?q=webgpu:shader,validation,shader_io,size:size:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,size:size_fp16:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,size:size_non_struct:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,size:size_creation_fixed_footprint:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,workgroup_size:workgroup_size:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,workgroup_size:workgroup_size_fragment_shader:*'>
<meta name=variant content='?q=webgpu:shader,validation,shader_io,workgroup_size:workgroup_size_vertex_shader:*'>
@ -2029,6 +2125,7 @@
<meta name=variant content='?q=webgpu:shader,validation,uniformity,uniformity:binary_expressions:*'>
<meta name=variant content='?q=webgpu:shader,validation,uniformity,uniformity:unary_expressions:*'>
<meta name=variant content='?q=webgpu:shader,validation,uniformity,uniformity:functions:*'>
<meta name=variant content='?q=webgpu:util,texture,color_space_conversions:util_matches_2d_canvas:*'>
<meta name=variant content='?q=webgpu:util,texture,texel_data:unorm_texel_data_in_shader:*'>
<meta name=variant content='?q=webgpu:util,texture,texel_data:snorm_texel_data_in_shader:*'>
<meta name=variant content='?q=webgpu:util,texture,texel_data:uint_texel_data_in_shader:*'>

View file

@ -2,14 +2,92 @@ Always use `getResourcePath()` to get the appropriate path to these resources de
on the context (WPT, standalone, worker, etc.)
The test video files were generated with the ffmpeg cmds below:
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libvpx -pix_fmt yuv420p -frames 500 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv four-colors-vp8-bt601.webm
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libtheora -pix_fmt yuv420p -frames 500 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv four-colors-theora-bt601.ogv
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libx264 -pix_fmt yuv420p -frames 500 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv four-colors-h264-bt601.mp4
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libvpx-vp9 -pix_fmt yuv420p -frames 500 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv four-colors-vp9-bt601.webm
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libvpx-vp9 -pix_fmt yuv420p -frames 500 -colorspace bt709 -color_primaries bt709 -color_trc bt709 -color_range tv -vf scale=out_color_matrix=bt709:out_range=tv four-colors-vp9-bt709.webm
The test video files were generated with by ffmpeg cmds below:
```
// Generate four-colors-vp8-bt601.webm, mimeType: 'video/webm; codecs=vp8'
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libvpx -pix_fmt yuv420p -frames 50 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv four-colors-vp8-bt601.webm
These rotation test files are copies of four-colors-h264-bt601.mp4 with metadata changes.
ffmpeg.exe -i .\four-colors-h264-bt601.mp4 -c copy -metadata:s:v rotate=90 four-colors-h264-bt601-rotate-90.mp4
ffmpeg.exe -i .\four-colors-h264-bt601.mp4 -c copy -metadata:s:v rotate=180 four-colors-h264-bt601-rotate-180.mp4
ffmpeg.exe -i .\four-colors-h264-bt601.mp4 -c copy -metadata:s:v rotate=270 four-colors-h264-bt601-rotate-270.mp4
// Generate four-colors-h264-bt601.mp4, mimeType: 'video/mp4; codecs=avc1.4d400c'
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libx264 -pix_fmt yuv420p -frames 50 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv four-colors-h264-bt601.mp4
// Generate four-colors-vp9-bt601.webm, mimeType: 'video/webm; codecs=vp9'
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libvpx-vp9 -pix_fmt yuv420p -frames 50 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv four-colors-vp9-bt601.webm
// Generate four-colors-vp9-bt709.webm, mimeType: 'video/webm; codecs=vp9'
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libvpx-vp9 -pix_fmt yuv420p -frames 50 -colorspace bt709 -color_primaries bt709 -color_trc bt709 -color_range tv -vf scale=out_color_matrix=bt709:out_range=tv four-colors-vp9-bt709.webm
// Generate four-colors-vp9-bt601.mp4, mimeType: 'video/mp4; codecs=vp9'
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libvpx-vp9 -pix_fmt yuv420p -frames 50 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv four-colors-vp9-bt601.mp4
```
Generate video files to test rotation behaviour.
Use ffmepg to rotate video content x degrees in cw direction (by using `transpose`) and update transform matrix in metadata through `display_rotation` to x degrees to apply ccw direction rotation.
H264 rotated video files are generated by ffmpeg cmds below:
```
// Generate four-colors-h264-bt601-rotate-90.mp4, mimeType: 'video/mp4; codecs=avc1.4d400c'
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libx264 -pix_fmt yuv420p -frames 50 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv -vf transpose=2 temp.mp4
ffmpeg -display_rotation 270 -i temp.mp4 -c copy four-colors-h264-bt601-rotate-90.mp4
rm temp.mp4
// Generate four-colors-h264-bt601-rotate-180.mp4, mimeType: 'video/mp4; codecs=avc1.4d400c'
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libx264 -pix_fmt yuv420p -frames 50 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv -vf transpose=2,transpose=2 temp.mp4
ffmpeg -display_rotation 180 -i temp.mp4 -c copy four-colors-h264-bt601-rotate-180.mp4
rm temp.mp4
// Generate four-colors-h264-bt601-rotate-270.mp4, mimeType: 'video/mp4; codecs=avc1.4d400c'
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libx264 -pix_fmt yuv420p -frames 50 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv -vf transpose=1 temp.mp4
ffmpeg -display_rotation 90 -i temp.mp4 -c copy four-colors-h264-bt601-rotate-270.mp4
rm temp.mp4
```
Vp9 rotated video files are generated by ffmpeg cmds below:
```
// Generate four-colors-h264-bt601-rotate-90.mp4, mimeType: 'video/mp4; codecs=vp9'
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libvpx-vp9 -pix_fmt yuv420p -frames 50 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv -vf transpose=2 temp.mp4
ffmpeg -display_rotation 270 -i temp.mp4 -c copy four-colors-vp9-bt601-rotate-90.mp4
rm temp.mp4
// Generate four-colors-h264-bt601-rotate-180.mp4, mimeType: 'video/mp4; codecs=vp9'
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libvpx-vp9 -pix_fmt yuv420p -frames 50 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv -vf transpose=2,transpose=2 temp.mp4
ffmpeg -display_rotation 180 -i temp.mp4 -c copy four-colors-vp9-bt601-rotate-180.mp4
rm temp.mp4
// Generate four-colors-h264-bt601-rotate-270.mp4, mimeType: 'video/mp4; codecs=vp9'
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libvpx-vp9 -pix_fmt yuv420p -frames 50 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv -vf transpose=1 temp.mp4
ffmpeg -display_rotation 90 -i temp.mp4 -c copy four-colors-vp9-bt601-rotate-270.mp4
rm temp.mp4
```
Generate video files to test flip behaviour.
Use ffmpeg to flip video content. Using `display_hflip` to do horizontal flip and `display_vflip` to do vertical flip.
H264 flip video files are generated by ffmpeg cmds below:
```
// Generate four-colors-h264-bt601-hflip.mp4, mimeType: 'video/mp4; codecs=avc1.4d400c'
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libx264 -pix_fmt yuv420p -frames 50 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv temp.mp4
ffmpeg -display_hflip -i temp.mp4 -c copy four-colors-h264-bt601-hflip.mp4
rm temp.mp4
// Generate four-colors-h264-bt601-vflip.mp4, mimeType: 'video/mp4; codecs=avc1.4d400c'
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libx264 -pix_fmt yuv420p -frames 50 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv temp.mp4
ffmpeg -display_vflip -i temp.mp4 -c copy four-colors-h264-bt601-vflip.mp4
rm temp.mp4
```
Vp9 flip video files are generated by ffmpeg cmds below:
```
// Generate four-colors-vp9-bt601-hflip.mp4, mimeType: 'video/mp4; codecs=vp09.00.10.08'
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libvpx-vp9 -pix_fmt yuv420p -frames 50 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv temp.mp4
ffmpeg -display_hflip -i temp.mp4 -c copy four-colors-vp9-bt601-hflip.mp4
rm temp.mp4
// Generate four-colors-vp9-bt601-vflip.mp4, mimeType: 'video/mp4; codecs=vp09.00.10.08'
ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libvpx-vp9 -pix_fmt yuv420p -frames 50 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv temp.mp4
ffmpeg -display_vflip -i temp.mp4 -c copy four-colors-vp9-bt601-vflip.mp4
rm temp.mp4
```

View file

@ -1,106 +1,107 @@
{
"webgpu/shader/execution/binary/af_addition.bin": "345a28b7",
"webgpu/shader/execution/binary/af_logical.bin": "27321b9c",
"webgpu/shader/execution/binary/af_division.bin": "c87f1318",
"webgpu/shader/execution/binary/af_matrix_addition.bin": "69cc5319",
"webgpu/shader/execution/binary/af_matrix_subtraction.bin": "c1d89b26",
"webgpu/shader/execution/binary/af_multiplication.bin": "6c3abeab",
"webgpu/shader/execution/binary/af_remainder.bin": "8bd97400",
"webgpu/shader/execution/binary/af_subtraction.bin": "5a7112fa",
"webgpu/shader/execution/binary/f16_addition.bin": "479f0e78",
"webgpu/shader/execution/binary/f16_logical.bin": "431e624e",
"webgpu/shader/execution/binary/f16_division.bin": "9e3455fc",
"webgpu/shader/execution/binary/f16_matrix_addition.bin": "c0cf381b",
"webgpu/shader/execution/binary/f16_matrix_matrix_multiplication.bin": "51edc282",
"webgpu/shader/execution/binary/f16_matrix_scalar_multiplication.bin": "a612226e",
"webgpu/shader/execution/binary/f16_matrix_subtraction.bin": "64491a9",
"webgpu/shader/execution/binary/f16_matrix_vector_multiplication.bin": "541863d2",
"webgpu/shader/execution/binary/f16_multiplication.bin": "1448ea2d",
"webgpu/shader/execution/binary/f16_remainder.bin": "cde237da",
"webgpu/shader/execution/binary/f16_subtraction.bin": "2739d887",
"webgpu/shader/execution/binary/f32_addition.bin": "f532fa83",
"webgpu/shader/execution/binary/f32_logical.bin": "ff723c9d",
"webgpu/shader/execution/binary/f32_division.bin": "1f9f3be2",
"webgpu/shader/execution/binary/f32_matrix_addition.bin": "68f190d7",
"webgpu/shader/execution/binary/f32_matrix_matrix_multiplication.bin": "a126aaf1",
"webgpu/shader/execution/binary/f32_matrix_scalar_multiplication.bin": "5fa08811",
"webgpu/shader/execution/binary/f32_matrix_subtraction.bin": "d9270923",
"webgpu/shader/execution/binary/f32_matrix_vector_multiplication.bin": "9e508eda",
"webgpu/shader/execution/binary/f32_multiplication.bin": "acb51036",
"webgpu/shader/execution/binary/f32_remainder.bin": "26abf1b",
"webgpu/shader/execution/binary/f32_subtraction.bin": "1a895776",
"webgpu/shader/execution/binary/i32_arithmetic.bin": "ae1c1d58",
"webgpu/shader/execution/binary/i32_comparison.bin": "46155b50",
"webgpu/shader/execution/binary/u32_arithmetic.bin": "bab5328e",
"webgpu/shader/execution/binary/u32_comparison.bin": "34d818e3",
"webgpu/shader/execution/abs.bin": "ea7276ee",
"webgpu/shader/execution/acos.bin": "6532fe83",
"webgpu/shader/execution/acosh.bin": "3ccf99c0",
"webgpu/shader/execution/asin.bin": "e4ca1497",
"webgpu/shader/execution/asinh.bin": "a825fb5e",
"webgpu/shader/execution/atan.bin": "83d7b73a",
"webgpu/shader/execution/atan2.bin": "5dc6e553",
"webgpu/shader/execution/atanh.bin": "ff869593",
"webgpu/shader/execution/bitcast.bin": "692546d7",
"webgpu/shader/execution/ceil.bin": "dc82c7f0",
"webgpu/shader/execution/clamp.bin": "8a6b7591",
"webgpu/shader/execution/cos.bin": "37d594c9",
"webgpu/shader/execution/cosh.bin": "e7ef1519",
"webgpu/shader/execution/cross.bin": "ca40471f",
"webgpu/shader/execution/degrees.bin": "2194e1ab",
"webgpu/shader/execution/determinant.bin": "a1ef2144",
"webgpu/shader/execution/distance.bin": "77edf94",
"webgpu/shader/execution/dot.bin": "e8243a47",
"webgpu/shader/execution/exp.bin": "ed53aa3b",
"webgpu/shader/execution/exp2.bin": "f09ed3ed",
"webgpu/shader/execution/faceForward.bin": "ba3a56fb",
"webgpu/shader/execution/floor.bin": "ae0f3a23",
"webgpu/shader/execution/fma.bin": "1f259679",
"webgpu/shader/execution/fract.bin": "6d019f67",
"webgpu/shader/execution/frexp.bin": "cf521b43",
"webgpu/shader/execution/inverseSqrt.bin": "10006497",
"webgpu/shader/execution/ldexp.bin": "a34df4c9",
"webgpu/shader/execution/length.bin": "d8c6cccf",
"webgpu/shader/execution/log.bin": "fb232771",
"webgpu/shader/execution/log2.bin": "5e0540a0",
"webgpu/shader/execution/max.bin": "3dcb4df7",
"webgpu/shader/execution/min.bin": "c1eee2dc",
"webgpu/shader/execution/mix.bin": "ec107b2a",
"webgpu/shader/execution/modf.bin": "3657eaa7",
"webgpu/shader/execution/normalize.bin": "6c6c95cd",
"webgpu/shader/execution/pack2x16float.bin": "9c1bbb0",
"webgpu/shader/execution/pow.bin": "b016609a",
"webgpu/shader/execution/quantizeToF16.bin": "f35538e5",
"webgpu/shader/execution/radians.bin": "63180198",
"webgpu/shader/execution/reflect.bin": "fbd9afd8",
"webgpu/shader/execution/refract.bin": "bd496e20",
"webgpu/shader/execution/round.bin": "2a940af1",
"webgpu/shader/execution/saturate.bin": "31c4b685",
"webgpu/shader/execution/sign.bin": "f5de501b",
"webgpu/shader/execution/sin.bin": "18ff92f7",
"webgpu/shader/execution/sinh.bin": "b6b0fa4f",
"webgpu/shader/execution/smoothstep.bin": "ec4560e1",
"webgpu/shader/execution/sqrt.bin": "746a3e0c",
"webgpu/shader/execution/step.bin": "73253e0c",
"webgpu/shader/execution/tan.bin": "33b15959",
"webgpu/shader/execution/tanh.bin": "39e57783",
"webgpu/shader/execution/transpose.bin": "864aa27a",
"webgpu/shader/execution/trunc.bin": "5a0d2a2",
"webgpu/shader/execution/unpack2x16float.bin": "e882c632",
"webgpu/shader/execution/unpack2x16snorm.bin": "1b639761",
"webgpu/shader/execution/unpack2x16unorm.bin": "c491aba5",
"webgpu/shader/execution/unpack4x8snorm.bin": "f656b21e",
"webgpu/shader/execution/unpack4x8unorm.bin": "9fe4db5a",
"webgpu/shader/execution/unary/af_arithmetic.bin": "833e6033",
"webgpu/shader/execution/unary/af_assignment.bin": "c533f757",
"webgpu/shader/execution/unary/bool_conversion.bin": "2b501a16",
"webgpu/shader/execution/unary/f16_arithmetic.bin": "4ac2bee0",
"webgpu/shader/execution/unary/f16_conversion.bin": "ea17ab50",
"webgpu/shader/execution/unary/f32_arithmetic.bin": "8f702442",
"webgpu/shader/execution/unary/f32_conversion.bin": "23ae43b3",
"webgpu/shader/execution/unary/i32_arithmetic.bin": "8704047",
"webgpu/shader/execution/unary/i32_complement.bin": "7dec3502",
"webgpu/shader/execution/unary/i32_conversion.bin": "45acb16d",
"webgpu/shader/execution/unary/u32_complement.bin": "e000b062",
"webgpu/shader/execution/unary/u32_conversion.bin": "f2ffbc61"
"webgpu/shader/execution/binary/af_addition.bin": "c7c51c39",
"webgpu/shader/execution/binary/af_logical.bin": "59188363",
"webgpu/shader/execution/binary/af_division.bin": "3dceb51d",
"webgpu/shader/execution/binary/af_matrix_addition.bin": "7dce888f",
"webgpu/shader/execution/binary/af_matrix_subtraction.bin": "1bf7797",
"webgpu/shader/execution/binary/af_multiplication.bin": "5bd9db91",
"webgpu/shader/execution/binary/af_remainder.bin": "9426ee9f",
"webgpu/shader/execution/binary/af_subtraction.bin": "823576a9",
"webgpu/shader/execution/binary/f16_addition.bin": "98228b04",
"webgpu/shader/execution/binary/f16_logical.bin": "7f4e05ab",
"webgpu/shader/execution/binary/f16_division.bin": "95e60c24",
"webgpu/shader/execution/binary/f16_matrix_addition.bin": "3503e24",
"webgpu/shader/execution/binary/f16_matrix_matrix_multiplication.bin": "5b0b6c54",
"webgpu/shader/execution/binary/f16_matrix_scalar_multiplication.bin": "7c2b67cc",
"webgpu/shader/execution/binary/f16_matrix_subtraction.bin": "3ab51e54",
"webgpu/shader/execution/binary/f16_matrix_vector_multiplication.bin": "c509829e",
"webgpu/shader/execution/binary/f16_multiplication.bin": "2cb9031c",
"webgpu/shader/execution/binary/f16_remainder.bin": "36a93ab6",
"webgpu/shader/execution/binary/f16_subtraction.bin": "5e564fad",
"webgpu/shader/execution/binary/f32_addition.bin": "6a004a96",
"webgpu/shader/execution/binary/f32_logical.bin": "29b78ff7",
"webgpu/shader/execution/binary/f32_division.bin": "f3960bd",
"webgpu/shader/execution/binary/f32_matrix_addition.bin": "b7a7252c",
"webgpu/shader/execution/binary/f32_matrix_matrix_multiplication.bin": "223c03dc",
"webgpu/shader/execution/binary/f32_matrix_scalar_multiplication.bin": "9fa0adb0",
"webgpu/shader/execution/binary/f32_matrix_subtraction.bin": "48fb4b77",
"webgpu/shader/execution/binary/f32_matrix_vector_multiplication.bin": "9a9a8e4f",
"webgpu/shader/execution/binary/f32_multiplication.bin": "6384c269",
"webgpu/shader/execution/binary/f32_remainder.bin": "668869cd",
"webgpu/shader/execution/binary/f32_subtraction.bin": "b720a489",
"webgpu/shader/execution/binary/i32_arithmetic.bin": "794ff384",
"webgpu/shader/execution/binary/i32_comparison.bin": "5742ecc8",
"webgpu/shader/execution/binary/u32_arithmetic.bin": "61c3eca",
"webgpu/shader/execution/binary/u32_comparison.bin": "8b463360",
"webgpu/shader/execution/abs.bin": "84759b96",
"webgpu/shader/execution/acos.bin": "d2fecf7e",
"webgpu/shader/execution/acosh.bin": "ae49ab4a",
"webgpu/shader/execution/asin.bin": "ef8d1ee0",
"webgpu/shader/execution/asinh.bin": "3fddcdc",
"webgpu/shader/execution/atan.bin": "fa01b779",
"webgpu/shader/execution/atan2.bin": "6544e27b",
"webgpu/shader/execution/atanh.bin": "dbfcf972",
"webgpu/shader/execution/bitcast.bin": "c871c53a",
"webgpu/shader/execution/ceil.bin": "a9259b43",
"webgpu/shader/execution/clamp.bin": "1c0cdd92",
"webgpu/shader/execution/cos.bin": "d32e388f",
"webgpu/shader/execution/cosh.bin": "353519fe",
"webgpu/shader/execution/cross.bin": "e1425e59",
"webgpu/shader/execution/degrees.bin": "303cb3",
"webgpu/shader/execution/determinant.bin": "b61bbfdb",
"webgpu/shader/execution/distance.bin": "9aee4ac6",
"webgpu/shader/execution/dot.bin": "2786d688",
"webgpu/shader/execution/exp.bin": "c791dee0",
"webgpu/shader/execution/exp2.bin": "7317dc87",
"webgpu/shader/execution/faceForward.bin": "2d7ecc3d",
"webgpu/shader/execution/floor.bin": "aad85712",
"webgpu/shader/execution/fma.bin": "cc8a275d",
"webgpu/shader/execution/fract.bin": "b651c53",
"webgpu/shader/execution/frexp.bin": "63c4d7b5",
"webgpu/shader/execution/inverseSqrt.bin": "8065a8af",
"webgpu/shader/execution/ldexp.bin": "e3d5da1e",
"webgpu/shader/execution/length.bin": "c4a77d",
"webgpu/shader/execution/log.bin": "19bd1b97",
"webgpu/shader/execution/log2.bin": "750e4aef",
"webgpu/shader/execution/max.bin": "1858ef6c",
"webgpu/shader/execution/min.bin": "27604a2a",
"webgpu/shader/execution/mix.bin": "e9174bda",
"webgpu/shader/execution/modf.bin": "faeb6938",
"webgpu/shader/execution/normalize.bin": "8325262b",
"webgpu/shader/execution/pack2x16float.bin": "3eab9d7e",
"webgpu/shader/execution/pow.bin": "a95ee5d5",
"webgpu/shader/execution/quantizeToF16.bin": "87ea1db0",
"webgpu/shader/execution/radians.bin": "8900171f",
"webgpu/shader/execution/reflect.bin": "34871ff1",
"webgpu/shader/execution/refract.bin": "8afd8086",
"webgpu/shader/execution/round.bin": "1f0d895",
"webgpu/shader/execution/saturate.bin": "208f6223",
"webgpu/shader/execution/sign.bin": "5d5eef01",
"webgpu/shader/execution/sin.bin": "88673d1b",
"webgpu/shader/execution/sinh.bin": "912004c4",
"webgpu/shader/execution/smoothstep.bin": "58521adc",
"webgpu/shader/execution/sqrt.bin": "6efab59a",
"webgpu/shader/execution/step.bin": "5d6a0269",
"webgpu/shader/execution/tan.bin": "1c2ed170",
"webgpu/shader/execution/tanh.bin": "69c199a6",
"webgpu/shader/execution/transpose.bin": "298a08a6",
"webgpu/shader/execution/trunc.bin": "b7f8c151",
"webgpu/shader/execution/unpack2x16float.bin": "ea2edfa",
"webgpu/shader/execution/unpack2x16snorm.bin": "982a4f26",
"webgpu/shader/execution/unpack2x16unorm.bin": "938aa3f3",
"webgpu/shader/execution/unpack4x8snorm.bin": "38d66fd9",
"webgpu/shader/execution/unpack4x8unorm.bin": "ea69a50e",
"webgpu/shader/execution/unary/af_arithmetic.bin": "ff253208",
"webgpu/shader/execution/unary/af_assignment.bin": "9b7be51c",
"webgpu/shader/execution/unary/bool_conversion.bin": "cc9afc72",
"webgpu/shader/execution/unary/f16_arithmetic.bin": "9d141077",
"webgpu/shader/execution/unary/f16_conversion.bin": "411d49cb",
"webgpu/shader/execution/unary/f32_arithmetic.bin": "cfbaceaf",
"webgpu/shader/execution/unary/f32_conversion.bin": "1e5cb1a7",
"webgpu/shader/execution/unary/i32_arithmetic.bin": "a10a7f07",
"webgpu/shader/execution/unary/i32_complement.bin": "4a7fabe2",
"webgpu/shader/execution/unary/i32_conversion.bin": "46a36b14",
"webgpu/shader/execution/unary/u32_complement.bin": "efe3fec7",
"webgpu/shader/execution/unary/u32_conversion.bin": "afebfab2",
"webgpu/shader/execution/unary/ai_assignment.bin": "dd599f63"
}

View file

@ -211,7 +211,7 @@ class F extends TextureTestMixin(GPUTest) {
align(dstBlocksPerRow * bytesPerBlock, 4);
if (isCompressedTextureFormat(dstTexture.format) && this.isCompatibility) {
assert(viewCompatible(srcFormat, dstFormat));
assert(viewCompatible(this.isCompatibility, srcFormat, dstFormat));
// compare by rendering. We need the expected texture to match
// the dstTexture so we'll create a texture where we supply
// all of the data in JavaScript.
@ -1376,6 +1376,9 @@ desc(
texture can only be 1.
`
).
beforeAllSubcases((t) => {
t.skipIf(t.isCompatibility, 'multisample textures are not copyable in compatibility mode');
}).
fn((t) => {
const textureSize = [32, 16, 1];
const kColorFormat = 'rgba8unorm';
@ -1564,6 +1567,9 @@ desc(
texture can only be 1.
`
).
beforeAllSubcases((t) => {
t.skipIf(t.isCompatibility, 'multisample textures are not copyable in compatibility mode');
}).
fn((t) => {
const textureSize = [32, 16, 1];
const kDepthFormat = 'depth24plus';

View file

@ -25,8 +25,9 @@
* copy_with_no_image_or_slice_padding_and_undefined_values: test that when copying a single row we can set any bytesPerRow value and when copying a single\
slice we can set rowsPerImage to 0. Also test setting offset, rowsPerImage, mipLevel, origin, origin.{x,y,z} to undefined.
Note: more coverage of memory synchronization for different read and write texture methods are in same_subresource.spec.ts.
* TODO:
- add another initMethod which renders the texture [3]
- test copyT2B with buffer size not divisible by 4 (not done because expectContents 4-byte alignment)
- Convert the float32 values in initialData into the ones compatible to the depth aspect of
depthFormats when depth16unorm is supported by the browsers in
@ -86,7 +87,7 @@ import { findFailedPixels } from '../../../util/texture/texture_ok.js';
* - PartialCopyT2B: do CopyT2B to check that the part of the texture we copied to with InitMethod
* matches the data we were copying and that we don't overwrite any data in the target buffer that
* we're not supposed to - that's primarily for testing CopyT2B functionality.
* - FullCopyT2B: do CopyT2B on the whole texture and check wether the part we copied to matches
* - FullCopyT2B: do CopyT2B on the whole texture and check whether the part we copied to matches
* the data we were copying and that the nothing else was modified - that's primarily for testing
* WriteTexture and CopyB2T.
*
@ -1357,8 +1358,6 @@ class ImageCopyTest extends TextureTestMixin(GPUTest) {
/**
* This is a helper function used for filtering test parameters
*
* [3]: Modify this after introducing tests with rendering.
*/
function formatCanBeTested({ format }) {
return kTextureFormatInfo[format].color.copyDst && kTextureFormatInfo[format].color.copySrc;
@ -1520,6 +1519,12 @@ works for every format with 2d and 2d-array textures.
offset + bytesInCopyExtentPerRow { ==, > } bytesPerRow
offset > bytesInACompleteCopyImage
Covers spceial cases for OpenGL Compat:
offset % 4 > 0 while:
- padding bytes at end of each row/layer: bytesPerRow % 256 > 0 || rowsPerImage > copyDepth
- rows/layers are compact: bytesPerRow % 256 == 0 && rowsPerImage == copyDepth
- padding bytes at front and end of the same 4-byte word: format == 'r8snorm' && copyWidth <= 2
TODO: Cover the special code paths for 3D textures in D3D12.
TODO: Make a variant for depth-stencil formats.
`
@ -1534,7 +1539,19 @@ filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension,
beginSubcases().
combineWithParams(kOffsetsAndSizesParams.offsetsAndPaddings).
combine('copyDepth', kOffsetsAndSizesParams.copyDepth) // 2d and 2d-array textures
.unless((p) => p.dimension === '1d' && p.copyDepth !== 1)
.combine('copyWidth', [3, 1, 2, 127, 128, 255, 256]) // copyWidth === 3 is the default. Others covers special cases for r8snorm and rg8snorm on compatiblity mode.
.filter(({ format, copyWidth }) => {
switch (format) {
case 'r8snorm':
case 'rg8snorm':
return true;
default:
// Restrict test parameters to save run time.
return copyWidth === 3;
}
}).
combine('rowsPerImageEqualsCopyHeight', [true, false]).
unless((p) => p.dimension === '1d' && p.copyDepth !== 1)
).
beforeAllSubcases((t) => {
const info = kTextureFormatInfo[t.params.format];
@ -1549,26 +1566,44 @@ fn((t) => {
format,
dimension,
initMethod,
checkMethod
checkMethod,
copyWidth,
rowsPerImageEqualsCopyHeight
} = t.params;
// Skip test cases designed for special cases coverage on compatibility mode to save run time.
if (!(t.isCompatibility && (format === 'r8snorm' || format === 'rg8snorm'))) {
if (rowsPerImageEqualsCopyHeight === false) {
t.skip(
'rowsPerImageEqualsCopyHeight === false is only for r8snorm and rg8snorm on compatibility mode'
);
}
if (copyWidth !== 3) {
t.skip('copyWidth !== 3 is only for r8snorm and rg8snorm on compatibility mode');
}
}
const info = kTextureFormatInfo[format];
const offset = offsetInBlocks * info.color.bytes;
const copyHeight = 3;
const copySize = {
width: 3 * info.blockWidth,
height: 3 * info.blockHeight,
width: copyWidth * info.blockWidth,
height: copyHeight * info.blockHeight,
depthOrArrayLayers: copyDepth
};
let textureHeight = 4 * info.blockHeight;
let rowsPerImage = 3;
const bytesPerRow = 256;
let rowsPerImage = rowsPerImageEqualsCopyHeight ? copyHeight : copyHeight + 1;
const bytesPerRow = align(copyWidth * info.bytesPerBlock, 256);
if (dimension === '1d') {
copySize.height = 1;
textureHeight = info.blockHeight;
rowsPerImage = 1;
}
const textureSize = [4 * info.blockWidth, textureHeight, copyDepth];
// Add textureWidth by 1 to make sure we are doing a partial copy.
const textureSize = [(copyWidth + 1) * info.blockWidth, textureHeight, copyDepth];
const minDataSize = dataBytesForCopyOrFail({
layout: { offset, bytesPerRow, rowsPerImage },
@ -1578,7 +1613,7 @@ fn((t) => {
});
const dataSize = minDataSize + dataPaddingInBytes;
// We're copying a (3 x 3 x copyDepth) (in texel blocks) part of a (4 x 4 x copyDepth)
// We're copying a (copyWidth x 3 x copyDepth) (in texel blocks) part of a ((copyWidth + 1) x 4 x copyDepth)
// (in texel blocks) texture with no origin.
t.uploadTextureAndVerifyCopy({
textureDataLayout: { offset, bytesPerRow, rowsPerImage },

View file

@ -0,0 +1,329 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/export const description = `
Memory synchronization tests for depth-stencil attachments in a single pass, with checks for readonlyness.
`;import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { kDepthStencilFormats, kTextureFormatInfo } from '../../../../format_info.js';
import { GPUTest } from '../../../../gpu_test.js';
export const g = makeTestGroup(GPUTest);
g.test('sampling_while_testing').
desc(
`Tests concurrent sampling and testing of readonly depth-stencil attachments in a render pass.
- Test for all depth-stencil formats.
- Test for all valid combinations of depth/stencilReadOnly.
In particular this test checks that a non-readonly aspect can be rendered to, and used for depth/stencil
testing while the other one is used for sampling.
`
).
params((p) =>
p.
combine('format', kDepthStencilFormats) //
.combine('depthReadOnly', [true, false, undefined]).
combine('stencilReadOnly', [true, false, undefined]).
filter((p) => {
const info = kTextureFormatInfo[p.format];
const depthMatch = info.depth === undefined === (p.depthReadOnly === undefined);
const stencilMatch = info.stencil === undefined === (p.stencilReadOnly === undefined);
return depthMatch && stencilMatch;
})
).
beforeAllSubcases((t) => {
const { format } = t.params;
const formatInfo = kTextureFormatInfo[format];
const hasDepth = formatInfo.depth !== undefined;
const hasStencil = formatInfo.stencil !== undefined;
t.selectDeviceForTextureFormatOrSkipTestCase(t.params.format);
t.skipIf(
t.isCompatibility && hasDepth && hasStencil,
'compatibility mode does not support different TEXTURE_BINDING views of the same texture in a single draw calls'
);
}).
fn((t) => {
const { format, depthReadOnly, stencilReadOnly } = t.params;
const formatInfo = kTextureFormatInfo[format];
const hasDepth = formatInfo.depth !== undefined;
const hasStencil = formatInfo.stencil !== undefined;
// The 3x3 depth stencil texture used for the tests.
const ds = t.device.createTexture({
label: 'testTexture',
size: [3, 3],
format,
usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING
});
t.trackForCleanup(ds);
// Fill the texture along the X axis with stencil values 1, 2, 3 and along the Y axis depth
// values 0.1, 0.2, 0.3. The depth value is written using @builtin(frag_depth) while the
// stencil is written using stencil operation and modifying the stencilReference.
const initModule = t.device.createShaderModule({
code: `
@vertex fn vs(
@builtin(instance_index) x : u32, @builtin(vertex_index) y : u32
) -> @builtin(position) vec4f {
let texcoord = (vec2f(f32(x), f32(y)) + vec2f(0.5)) / 3;
return vec4f((texcoord * 2) - vec2f(1.0), 0, 1);
}
@fragment fn fs_with_depth(@builtin(position) pos : vec4f) -> @builtin(frag_depth) f32 {
return (pos.y + 0.5) / 10;
}
@fragment fn fs_no_depth() {
}
`
});
const initPipeline = t.device.createRenderPipeline({
layout: 'auto',
label: 'initPipeline',
vertex: { module: initModule },
fragment: {
module: initModule,
targets: [],
entryPoint: hasDepth ? 'fs_with_depth' : 'fs_no_depth'
},
depthStencil: {
format,
...(hasDepth && {
depthWriteEnabled: true,
depthCompare: 'always'
}),
...(hasStencil && {
stencilBack: { compare: 'always', passOp: 'replace' },
stencilFront: { compare: 'always', passOp: 'replace' }
})
},
primitive: { topology: 'point-list' }
});
const encoder = t.device.createCommandEncoder();
const initPass = encoder.beginRenderPass({
colorAttachments: [],
depthStencilAttachment: {
view: ds.createView(),
...(hasDepth && {
depthStoreOp: 'store',
depthLoadOp: 'clear',
depthClearValue: 0
}),
...(hasStencil && {
stencilStoreOp: 'store',
stencilLoadOp: 'clear',
stencilClearValue: 0
})
}
});
initPass.setPipeline(initPipeline);
for (let i = 0; i < 3; i++) {
initPass.setStencilReference(i + 1);
// Draw 3 points (Y = 0, 1, 2) at X = instance_index = i.
initPass.draw(3, 1, 0, i);
}
initPass.end();
// Perform the actual test:
// - The shader outputs depth 0.15 and stencil 2 (via stencilReference).
// - Test that the fragdepth / stencilref must be <= to what's in the depth-stencil attachment.
// -> Fragments that have depth 0.1 or stencil 1 are tested out.
// - Test that sampling the depth / stencil (when possible) is <= 0.2 for depth, <= 2 for stencil
// -> Fragments that have depth 0.3 or stencil 3 are discarded if that aspect is readonly.
// - Write the depth / increment the stencil if the aspect is not readonly.
// -> After the test, fragments that passed will have non-readonly aspects updated.
const kFragDepth = 0.15;
const kStencilRef = 2;
const testAndCheckModule = t.device.createShaderModule({
code: `
@group(0) @binding(0) var depthTex : texture_2d<f32>;
@group(0) @binding(1) var stencilTex : texture_2d<u32>;
@vertex fn full_quad_vs(@builtin(vertex_index) id : u32) -> @builtin(position) vec4f {
let pos = array(vec2f(-3, -1), vec2(3, -1), vec2(0, 2));
return vec4f(pos[id], ${kFragDepth}, 1.0);
}
@fragment fn test_texture(@builtin(position) pos : vec4f) {
let texel = vec2u(floor(pos.xy));
if ${!!stencilReadOnly} && textureLoad(stencilTex, texel, 0).r > 2 {
discard;
}
if ${!!depthReadOnly} && textureLoad(depthTex, texel, 0).r > 0.21 {
discard;
}
}
@fragment fn check_texture(@builtin(position) pos : vec4f) -> @location(0) u32 {
let texel = vec2u(floor(pos.xy));
// The current values in the framebuffer.
let initStencil = texel.x + 1;
let initDepth = f32(texel.y + 1) / 10.0;
// Expected results of the test_texture step.
let stencilTestPasses = !${hasStencil} || ${kStencilRef} <= initStencil;
let depthTestPasses = !${hasDepth} || ${kFragDepth} <= initDepth;
let fsDiscards = (${!!stencilReadOnly} && initStencil > 2) ||
(${!!depthReadOnly} && initDepth > 0.21);
// Compute the values that should be in the framebuffer.
var stencil = initStencil;
var depth = initDepth;
// When the fragments aren't discarded, fragment output operations happen.
if depthTestPasses && stencilTestPasses && !fsDiscards {
if ${!stencilReadOnly} {
stencil += 1;
}
if ${!depthReadOnly} {
depth = ${kFragDepth};
}
}
if ${hasStencil} && textureLoad(stencilTex, texel, 0).r != stencil {
return 0;
}
if ${hasDepth} && abs(textureLoad(depthTex, texel, 0).r - depth) > 0.01 {
return 0;
}
return 1;
}
`
});
const testPipeline = t.device.createRenderPipeline({
label: 'testPipeline',
layout: 'auto',
vertex: { module: testAndCheckModule },
fragment: { module: testAndCheckModule, entryPoint: 'test_texture', targets: [] },
depthStencil: {
format,
...(hasDepth && {
depthCompare: 'less-equal',
depthWriteEnabled: !depthReadOnly
}),
...(hasStencil && {
stencilBack: {
compare: 'less-equal',
passOp: stencilReadOnly ? 'keep' : 'increment-clamp'
},
stencilFront: {
compare: 'less-equal',
passOp: stencilReadOnly ? 'keep' : 'increment-clamp'
}
})
},
primitive: { topology: 'triangle-list' }
});
// Make fake stencil or depth textures to put in the bindgroup if the aspect is not readonly.
const fakeStencil = t.device.createTexture({
label: 'fakeStencil',
format: 'r32uint',
size: [1, 1],
usage: GPUTextureUsage.TEXTURE_BINDING
});
t.trackForCleanup(fakeStencil);
const fakeDepth = t.device.createTexture({
label: 'fakeDepth',
format: 'r32float',
size: [1, 1],
usage: GPUTextureUsage.TEXTURE_BINDING
});
t.trackForCleanup(fakeDepth);
const stencilView = stencilReadOnly ?
ds.createView({ aspect: 'stencil-only' }) :
fakeStencil.createView();
const depthView = depthReadOnly ?
ds.createView({ aspect: 'depth-only' }) :
fakeDepth.createView();
const testBindGroup = t.device.createBindGroup({
layout: testPipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: depthView },
{ binding: 1, resource: stencilView }]
});
// Run the test.
const testPass = encoder.beginRenderPass({
colorAttachments: [],
depthStencilAttachment: {
view: ds.createView(),
...(hasDepth && (
depthReadOnly ?
{ depthReadOnly: true } :
{
depthStoreOp: 'store',
depthLoadOp: 'load'
})),
...(hasStencil && (
stencilReadOnly ?
{ stencilReadOnly: true } :
{
stencilStoreOp: 'store',
stencilLoadOp: 'load'
}))
}
});
testPass.setPipeline(testPipeline);
testPass.setStencilReference(kStencilRef);
testPass.setBindGroup(0, testBindGroup);
testPass.draw(3);
testPass.end();
// Check that the contents of the textures are what we expect. See the shader module for the
// computation of what's expected, it writes a 1 on success, 0 otherwise.
const checkPipeline = t.device.createRenderPipeline({
label: 'checkPipeline',
layout: 'auto',
vertex: { module: testAndCheckModule },
fragment: {
module: testAndCheckModule,
entryPoint: 'check_texture',
targets: [{ format: 'r32uint' }]
},
primitive: { topology: 'triangle-list' }
});
const checkBindGroup = t.device.createBindGroup({
layout: checkPipeline.getBindGroupLayout(0),
entries: [
{
binding: 0,
resource: hasDepth ? ds.createView({ aspect: 'depth-only' }) : fakeDepth.createView()
},
{
binding: 1,
resource: hasStencil ?
ds.createView({ aspect: 'stencil-only' }) :
fakeStencil.createView()
}]
});
const resultTexture = t.device.createTexture({
label: 'resultTexture',
format: 'r32uint',
usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
size: [3, 3]
});
const checkPass = encoder.beginRenderPass({
colorAttachments: [
{
view: resultTexture.createView(),
loadOp: 'clear',
clearValue: [0, 0, 0, 0],
storeOp: 'store'
}]
});
checkPass.setPipeline(checkPipeline);
checkPass.setBindGroup(0, checkBindGroup);
checkPass.draw(3);
checkPass.end();
t.queue.submit([encoder.finish()]);
// The check texture should be full of success (a.k.a. 1)!
t.expectSingleColor(resultTexture, resultTexture.format, { size: [3, 3, 1], exp: { R: 1 } });
});

View file

@ -435,7 +435,7 @@ class F extends TextureTestMixin(GPUTest) {
sampleMask,
fragmentShaderOutputMask)
{
const buffer = this.copySinglePixelTextureToBufferUsingComputePass(
const buffer = this.copy2DTextureToBufferUsingComputePass(
TypeF32, // correspond to 'rgba8unorm' format
4,
texture.createView(),
@ -459,7 +459,7 @@ class F extends TextureTestMixin(GPUTest) {
sampleMask,
fragmentShaderOutputMask)
{
const buffer = this.copySinglePixelTextureToBufferUsingComputePass(
const buffer = this.copy2DTextureToBufferUsingComputePass(
// Use f32 as the scalar type for depth (depth24plus, depth32float)
// Use u32 as the scalar type for stencil (stencil8)
aspect === 'depth-only' ? TypeF32 : TypeU32,
@ -702,7 +702,7 @@ fn(async (t) => {
2
);
const colorBuffer = t.copySinglePixelTextureToBufferUsingComputePass(
const colorBuffer = t.copy2DTextureToBufferUsingComputePass(
TypeF32, // correspond to 'rgba8unorm' format
4,
color.createView(),
@ -714,7 +714,7 @@ fn(async (t) => {
});
colorResultPromises.push(colorResult);
const depthBuffer = t.copySinglePixelTextureToBufferUsingComputePass(
const depthBuffer = t.copy2DTextureToBufferUsingComputePass(
TypeF32, // correspond to 'depth24plus-stencil8' format
1,
depthStencil.createView({ aspect: 'depth-only' }),
@ -726,7 +726,7 @@ fn(async (t) => {
});
depthResultPromises.push(depthResult);
const stencilBuffer = t.copySinglePixelTextureToBufferUsingComputePass(
const stencilBuffer = t.copy2DTextureToBufferUsingComputePass(
TypeU32, // correspond to 'depth24plus-stencil8' format
1,
depthStencil.createView({ aspect: 'stencil-only' }),

View file

@ -467,13 +467,8 @@ fn((t) => {
@vertex fn main(
@builtin(vertex_index) VertexIndex : u32,
@builtin(instance_index) InstanceIndex : u32) -> Output {
// TODO: remove workaround for Tint unary array access broke
var zv : array<vec2<f32>, 4> = array<vec2<f32>, 4>(
vec2<f32>(0.2, 0.2),
vec2<f32>(0.3, 0.3),
vec2<f32>(-0.1, -0.1),
vec2<f32>(1.1, 1.1));
let z : f32 = zv[InstanceIndex].x;
let zv = array(0.2, 0.3, -0.1, 1.1);
let z = zv[InstanceIndex];
var output : Output;
output.Position = vec4<f32>(0.5, 0.5, z, 1.0);

View file

@ -304,6 +304,12 @@ u //
}]
)
).
beforeAllSubcases((t) => {
t.skipIf(
t.isCompatibility && t.params.biasClamp !== 0,
'non zero depthBiasClamp is not supported in compatibility mode'
);
}).
fn((t) => {
t.runDepthBiasTest('depth32float', t.params);
});
@ -346,6 +352,12 @@ combineWithParams([
}]
)
).
beforeAllSubcases((t) => {
t.skipIf(
t.isCompatibility && t.params.biasClamp !== 0,
'non zero depthBiasClamp is not supported in compatibility mode'
);
}).
fn((t) => {
const { format } = t.params;
t.runDepthBiasTestFor24BitFormat(format, t.params);

View file

@ -3,6 +3,7 @@
**/import { assert } from '../../../../../common/util/util.js';import { kTextureFormatInfo } from '../../../../format_info.js';import { virtualMipSize } from '../../../../util/texture/base.js';
export const checkContentsByBufferCopy = (
t,
params,

View file

@ -4,6 +4,7 @@
import { virtualMipSize } from '../../../../util/texture/base.js';
function makeFullscreenVertexModule(device) {
return device.createShaderModule({
code: `

View file

@ -8,6 +8,7 @@ import {
'../../../../util/texture/texel_data.js';
export const checkContentsBySampling = (
t,
params,

View file

@ -0,0 +1,533 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/import { kUnitCaseParamsBuilder } from
'../../../../../common/framework/params_builder.js';
import { assert, unreachable } from '../../../../../common/util/util.js';
import { kTextureAspects, kTextureDimensions } from '../../../../capability_info.js';
import { GPUConst } from '../../../../constants.js';
import {
kTextureFormatInfo,
kUncompressedTextureFormats,
textureDimensionAndFormatCompatible } from
'../../../../format_info.js';
import { GPUTest } from '../../../../gpu_test.js';
import { virtualMipSize } from '../../../../util/texture/base.js';
import { createTextureUploadBuffer } from '../../../../util/texture/layout.js';
import { SubresourceRange } from '../../../../util/texture/subresource.js';
import {
kTexelRepresentationInfo } from
'../../../../util/texture/texel_data.js';
export let UninitializeMethod = /*#__PURE__*/function (UninitializeMethod) {UninitializeMethod["Creation"] = "Creation";UninitializeMethod["StoreOpClear"] = "StoreOpClear";return UninitializeMethod;}({});
// The texture was rendered to with GPUStoreOp "clear"
const kUninitializeMethods = Object.keys(UninitializeMethod);
export let ReadMethod = /*#__PURE__*/function (ReadMethod) {ReadMethod["Sample"] = "Sample";ReadMethod["CopyToBuffer"] = "CopyToBuffer";ReadMethod["CopyToTexture"] = "CopyToTexture";ReadMethod["DepthTest"] = "DepthTest";ReadMethod["StencilTest"] = "StencilTest";ReadMethod["ColorBlending"] = "ColorBlending";ReadMethod["Storage"] = "Storage";return ReadMethod;}({});
// Read the texture as a storage texture
// Test with these mip level counts
const kMipLevelCounts = [1, 5];
// For each mip level count, define the mip ranges to leave uninitialized.
const kUninitializedMipRangesToTest = {
1: [{ begin: 0, end: 1 }], // Test the only mip
5: [
{ begin: 0, end: 2 },
{ begin: 3, end: 4 }]
// Test a range and a single mip
};
// Test with these sample counts.
const kSampleCounts = [1, 4];
// Test with these layer counts.
// For each layer count, define the layers to leave uninitialized.
const kUninitializedLayerRangesToTest = {
1: [{ begin: 0, end: 1 }], // Test the only layer
7: [
{ begin: 2, end: 4 },
{ begin: 6, end: 7 }]
// Test a range and a single layer
};
// Enums to abstract over color / depth / stencil values in textures. Depending on the texture format,
// the data for each value may have a different representation. These enums are converted to a
// representation such that their values can be compared. ex.) An integer is needed to upload to an
// unsigned normalized format, but its value is read as a float in the shader.
export let InitializedState = /*#__PURE__*/function (InitializedState) {InitializedState[InitializedState["Canary"] = 0] = "Canary";InitializedState[InitializedState["Zero"] = 1] = "Zero";return InitializedState;}({});
// We check that uninitialized subresources are in this state when read back.
const initializedStateAsFloat = {
[InitializedState.Zero]: 0,
[InitializedState.Canary]: 1
};
const initializedStateAsUint = {
[InitializedState.Zero]: 0,
[InitializedState.Canary]: 1
};
const initializedStateAsSint = {
[InitializedState.Zero]: 0,
[InitializedState.Canary]: -1
};
function initializedStateAsColor(
state,
format)
{
let value;
if (format.indexOf('uint') !== -1) {
value = initializedStateAsUint[state];
} else if (format.indexOf('sint') !== -1) {
value = initializedStateAsSint[state];
} else {
value = initializedStateAsFloat[state];
}
return [value, value, value, value];
}
const initializedStateAsDepth = {
[InitializedState.Zero]: 0,
[InitializedState.Canary]: 0.8
};
const initializedStateAsStencil = {
[InitializedState.Zero]: 0,
[InitializedState.Canary]: 42
};
export function getRequiredTextureUsage(
format,
sampleCount,
uninitializeMethod,
readMethod)
{
let usage = GPUConst.TextureUsage.COPY_DST;
switch (uninitializeMethod) {
case UninitializeMethod.Creation:
break;
case UninitializeMethod.StoreOpClear:
usage |= GPUConst.TextureUsage.RENDER_ATTACHMENT;
break;
default:
unreachable();
}
switch (readMethod) {
case ReadMethod.CopyToBuffer:
case ReadMethod.CopyToTexture:
usage |= GPUConst.TextureUsage.COPY_SRC;
break;
case ReadMethod.Sample:
usage |= GPUConst.TextureUsage.TEXTURE_BINDING;
break;
case ReadMethod.Storage:
usage |= GPUConst.TextureUsage.STORAGE_BINDING;
break;
case ReadMethod.DepthTest:
case ReadMethod.StencilTest:
case ReadMethod.ColorBlending:
usage |= GPUConst.TextureUsage.RENDER_ATTACHMENT;
break;
default:
unreachable();
}
if (sampleCount > 1) {
// Copies to multisampled textures are not allowed. We need OutputAttachment to initialize
// canary data in multisampled textures.
usage |= GPUConst.TextureUsage.RENDER_ATTACHMENT;
}
if (!kTextureFormatInfo[format].copyDst) {
// Copies are not possible. We need OutputAttachment to initialize
// canary data.
assert(kTextureFormatInfo[format].renderable);
usage |= GPUConst.TextureUsage.RENDER_ATTACHMENT;
}
return usage;
}
export class TextureZeroInitTest extends GPUTest {
constructor(sharedState, rec, params) {
super(sharedState, rec, params);
this.p = params;
const stateToTexelComponents = (state) => {
const [R, G, B, A] = initializedStateAsColor(state, this.p.format);
return {
R,
G,
B,
A,
Depth: initializedStateAsDepth[state],
Stencil: initializedStateAsStencil[state]
};
};
this.stateToTexelComponents = {
[InitializedState.Zero]: stateToTexelComponents(InitializedState.Zero),
[InitializedState.Canary]: stateToTexelComponents(InitializedState.Canary)
};
}
get textureWidth() {
let width = 1 << this.p.mipLevelCount;
if (this.p.nonPowerOfTwo) {
width = 2 * width - 1;
}
return width;
}
get textureHeight() {
if (this.p.dimension === '1d') {
return 1;
}
let height = 1 << this.p.mipLevelCount;
if (this.p.nonPowerOfTwo) {
height = 2 * height - 1;
}
return height;
}
get textureDepth() {
return this.p.dimension === '3d' ? 11 : 1;
}
get textureDepthOrArrayLayers() {
return this.p.dimension === '2d' ? this.p.layerCount : this.textureDepth;
}
// Used to iterate subresources and check that their uninitialized contents are zero when accessed
*iterateUninitializedSubresources() {
for (const mipRange of kUninitializedMipRangesToTest[this.p.mipLevelCount]) {
for (const layerRange of kUninitializedLayerRangesToTest[this.p.layerCount]) {
yield new SubresourceRange({ mipRange, layerRange });
}
}
}
// Used to iterate and initialize other subresources not checked for zero-initialization.
// Zero-initialization of uninitialized subresources should not have side effects on already
// initialized subresources.
*iterateInitializedSubresources() {
const uninitialized = new Array(this.p.mipLevelCount);
for (let level = 0; level < uninitialized.length; ++level) {
uninitialized[level] = new Array(this.p.layerCount);
}
for (const subresources of this.iterateUninitializedSubresources()) {
for (const { level, layer } of subresources.each()) {
uninitialized[level][layer] = true;
}
}
for (let level = 0; level < uninitialized.length; ++level) {
for (let layer = 0; layer < uninitialized[level].length; ++layer) {
if (!uninitialized[level][layer]) {
yield new SubresourceRange({
mipRange: { begin: level, count: 1 },
layerRange: { begin: layer, count: 1 }
});
}
}
}
}
*generateTextureViewDescriptorsForRendering(
aspect,
subresourceRange)
{
const viewDescriptor = {
dimension: '2d',
aspect
};
if (subresourceRange === undefined) {
return viewDescriptor;
}
for (const { level, layer } of subresourceRange.each()) {
yield {
...viewDescriptor,
baseMipLevel: level,
mipLevelCount: 1,
baseArrayLayer: layer,
arrayLayerCount: 1
};
}
}
initializeWithStoreOp(
state,
texture,
subresourceRange)
{
const commandEncoder = this.device.createCommandEncoder();
commandEncoder.pushDebugGroup('initializeWithStoreOp');
for (const viewDescriptor of this.generateTextureViewDescriptorsForRendering(
'all',
subresourceRange
)) {
if (kTextureFormatInfo[this.p.format].color) {
commandEncoder.
beginRenderPass({
colorAttachments: [
{
view: texture.createView(viewDescriptor),
storeOp: 'store',
clearValue: initializedStateAsColor(state, this.p.format),
loadOp: 'clear'
}]
}).
end();
} else {
const depthStencilAttachment = {
view: texture.createView(viewDescriptor)
};
if (kTextureFormatInfo[this.p.format].depth) {
depthStencilAttachment.depthClearValue = initializedStateAsDepth[state];
depthStencilAttachment.depthLoadOp = 'clear';
depthStencilAttachment.depthStoreOp = 'store';
}
if (kTextureFormatInfo[this.p.format].stencil) {
depthStencilAttachment.stencilClearValue = initializedStateAsStencil[state];
depthStencilAttachment.stencilLoadOp = 'clear';
depthStencilAttachment.stencilStoreOp = 'store';
}
commandEncoder.
beginRenderPass({
colorAttachments: [],
depthStencilAttachment
}).
end();
}
}
commandEncoder.popDebugGroup();
this.queue.submit([commandEncoder.finish()]);
}
initializeWithCopy(
texture,
state,
subresourceRange)
{
assert(this.p.format in kTextureFormatInfo);
const format = this.p.format;
const firstSubresource = subresourceRange.each().next().value;
assert(typeof firstSubresource !== 'undefined');
const [largestWidth, largestHeight, largestDepth] = virtualMipSize(
this.p.dimension,
[this.textureWidth, this.textureHeight, this.textureDepth],
firstSubresource.level
);
const rep = kTexelRepresentationInfo[format];
const texelData = new Uint8Array(rep.pack(rep.encode(this.stateToTexelComponents[state])));
const { buffer, bytesPerRow, rowsPerImage } = createTextureUploadBuffer(
texelData,
this.device,
format,
this.p.dimension,
[largestWidth, largestHeight, largestDepth]
);
const commandEncoder = this.device.createCommandEncoder();
for (const { level, layer } of subresourceRange.each()) {
const [width, height, depth] = virtualMipSize(
this.p.dimension,
[this.textureWidth, this.textureHeight, this.textureDepth],
level
);
commandEncoder.copyBufferToTexture(
{
buffer,
bytesPerRow,
rowsPerImage
},
{ texture, mipLevel: level, origin: { x: 0, y: 0, z: layer } },
{ width, height, depthOrArrayLayers: depth }
);
}
this.queue.submit([commandEncoder.finish()]);
buffer.destroy();
}
initializeTexture(
texture,
state,
subresourceRange)
{
if (this.p.sampleCount > 1 || !kTextureFormatInfo[this.p.format].copyDst) {
// Copies to multisampled textures not yet specified.
// Use a storeOp for now.
assert(kTextureFormatInfo[this.p.format].renderable);
this.initializeWithStoreOp(state, texture, subresourceRange);
} else {
this.initializeWithCopy(texture, state, subresourceRange);
}
}
discardTexture(texture, subresourceRange) {
const commandEncoder = this.device.createCommandEncoder();
commandEncoder.pushDebugGroup('discardTexture');
for (const desc of this.generateTextureViewDescriptorsForRendering('all', subresourceRange)) {
if (kTextureFormatInfo[this.p.format].color) {
commandEncoder.
beginRenderPass({
colorAttachments: [
{
view: texture.createView(desc),
storeOp: 'discard',
loadOp: 'load'
}]
}).
end();
} else {
const depthStencilAttachment = {
view: texture.createView(desc)
};
if (kTextureFormatInfo[this.p.format].depth) {
depthStencilAttachment.depthLoadOp = 'load';
depthStencilAttachment.depthStoreOp = 'discard';
}
if (kTextureFormatInfo[this.p.format].stencil) {
depthStencilAttachment.stencilLoadOp = 'load';
depthStencilAttachment.stencilStoreOp = 'discard';
}
commandEncoder.
beginRenderPass({
colorAttachments: [],
depthStencilAttachment
}).
end();
}
}
commandEncoder.popDebugGroup();
this.queue.submit([commandEncoder.finish()]);
}
}
export const kTestParams = kUnitCaseParamsBuilder.
combine('dimension', kTextureDimensions).
combine('readMethod', [
ReadMethod.CopyToBuffer,
ReadMethod.CopyToTexture,
ReadMethod.Sample,
ReadMethod.DepthTest,
ReadMethod.StencilTest]
)
// [3] compressed formats
.combine('format', kUncompressedTextureFormats).
filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format)).
beginSubcases().
combine('aspect', kTextureAspects).
unless(({ readMethod, format, aspect }) => {
const info = kTextureFormatInfo[format];
return (
readMethod === ReadMethod.DepthTest && (!info.depth || aspect === 'stencil-only') ||
readMethod === ReadMethod.StencilTest && (!info.stencil || aspect === 'depth-only') ||
readMethod === ReadMethod.ColorBlending && !info.color ||
// [1]: Test with depth/stencil sampling
readMethod === ReadMethod.Sample && (!!info.depth || !!info.stencil) ||
aspect === 'depth-only' && !info.depth ||
aspect === 'stencil-only' && !info.stencil ||
aspect === 'all' && !!info.depth && !!info.stencil ||
// Cannot copy from a packed depth format.
// [2]: Test copying out of the stencil aspect.
(readMethod === ReadMethod.CopyToBuffer || readMethod === ReadMethod.CopyToTexture) && (
format === 'depth24plus' || format === 'depth24plus-stencil8'));
}).
combine('mipLevelCount', kMipLevelCounts)
// 1D texture can only have a single mip level
.unless((p) => p.dimension === '1d' && p.mipLevelCount !== 1).
combine('sampleCount', kSampleCounts).
unless(
({ readMethod, sampleCount }) =>
// We can only read from multisampled textures by sampling.
sampleCount > 1 && (
readMethod === ReadMethod.CopyToBuffer || readMethod === ReadMethod.CopyToTexture)
)
// Multisampled textures may only have one mip
.unless(({ sampleCount, mipLevelCount }) => sampleCount > 1 && mipLevelCount > 1).
combine('uninitializeMethod', kUninitializeMethods).
unless(({ dimension, readMethod, uninitializeMethod, format, sampleCount }) => {
const formatInfo = kTextureFormatInfo[format];
return (
dimension !== '2d' && (
sampleCount > 1 ||
!!formatInfo.depth ||
!!formatInfo.stencil ||
readMethod === ReadMethod.DepthTest ||
readMethod === ReadMethod.StencilTest ||
readMethod === ReadMethod.ColorBlending ||
uninitializeMethod === UninitializeMethod.StoreOpClear));
}).
expandWithParams(function* ({ dimension }) {
switch (dimension) {
case '2d':
yield { layerCount: 1 };
yield { layerCount: 7 };
break;
case '1d':
case '3d':
yield { layerCount: 1 };
break;
}
})
// Multisampled 3D / 2D array textures not supported.
.unless(({ sampleCount, layerCount }) => sampleCount > 1 && layerCount > 1).
unless(({ format, sampleCount, uninitializeMethod, readMethod }) => {
const usage = getRequiredTextureUsage(format, sampleCount, uninitializeMethod, readMethod);
const info = kTextureFormatInfo[format];
return (
(usage & GPUConst.TextureUsage.RENDER_ATTACHMENT) !== 0 && !info.renderable ||
(usage & GPUConst.TextureUsage.STORAGE_BINDING) !== 0 && !info.color?.storage ||
sampleCount > 1 && !info.multisample);
}).
combine('nonPowerOfTwo', [false, true]).
combine('canaryOnCreation', [false, true]).
filter(({ canaryOnCreation, format }) => {
// We can only initialize the texture if it's encodable or renderable.
const canInitialize = format in kTextureFormatInfo || kTextureFormatInfo[format].renderable;
// Filter out cases where we want canary values but can't initialize.
return !canaryOnCreation || canInitialize;
});

View file

@ -7,550 +7,9 @@ TODO:
- test by sampling depth/stencil [1]
- test by copying out of stencil [2]
- test compressed texture formats [3]
`; // MAINTENANCE_TODO: This is a test file, it probably shouldn't export anything.
// Everything that's exported should be moved to another file.
import {
kUnitCaseParamsBuilder } from
'../../../../common/framework/params_builder.js';
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { assert, unreachable } from '../../../../common/util/util.js';
import { kTextureAspects, kTextureDimensions } from '../../../capability_info.js';
import { GPUConst } from '../../../constants.js';
import {
kTextureFormatInfo,
kUncompressedTextureFormats,
textureDimensionAndFormatCompatible } from
'../../../format_info.js';
import { GPUTest } from '../../../gpu_test.js';
import { virtualMipSize } from '../../../util/texture/base.js';
import { createTextureUploadBuffer } from '../../../util/texture/layout.js';
import { SubresourceRange } from '../../../util/texture/subresource.js';
import { kTexelRepresentationInfo } from '../../../util/texture/texel_data.js';
export let UninitializeMethod = /*#__PURE__*/function (UninitializeMethod) {UninitializeMethod["Creation"] = "Creation";UninitializeMethod["StoreOpClear"] = "StoreOpClear";return UninitializeMethod;}({});
// The texture was rendered to with GPUStoreOp "clear"
const kUninitializeMethods = Object.keys(UninitializeMethod);
export let ReadMethod = /*#__PURE__*/function (ReadMethod) {ReadMethod["Sample"] = "Sample";ReadMethod["CopyToBuffer"] = "CopyToBuffer";ReadMethod["CopyToTexture"] = "CopyToTexture";ReadMethod["DepthTest"] = "DepthTest";ReadMethod["StencilTest"] = "StencilTest";ReadMethod["ColorBlending"] = "ColorBlending";ReadMethod["Storage"] = "Storage";return ReadMethod;}({});
// Read the texture as a storage texture
// Test with these mip level counts
const kMipLevelCounts = [1, 5];
// For each mip level count, define the mip ranges to leave uninitialized.
const kUninitializedMipRangesToTest = {
1: [{ begin: 0, end: 1 }], // Test the only mip
5: [
{ begin: 0, end: 2 },
{ begin: 3, end: 4 }]
// Test a range and a single mip
};
// Test with these sample counts.
const kSampleCounts = [1, 4];
// Test with these layer counts.
// For each layer count, define the layers to leave uninitialized.
const kUninitializedLayerRangesToTest = {
1: [{ begin: 0, end: 1 }], // Test the only layer
7: [
{ begin: 2, end: 4 },
{ begin: 6, end: 7 }]
// Test a range and a single layer
};
// Enums to abstract over color / depth / stencil values in textures. Depending on the texture format,
// the data for each value may have a different representation. These enums are converted to a
// representation such that their values can be compared. ex.) An integer is needed to upload to an
// unsigned normalized format, but its value is read as a float in the shader.
export let InitializedState = /*#__PURE__*/function (InitializedState) {InitializedState[InitializedState["Canary"] = 0] = "Canary";InitializedState[InitializedState["Zero"] = 1] = "Zero";return InitializedState;}({});
// We check that uninitialized subresources are in this state when read back.
const initializedStateAsFloat = {
[InitializedState.Zero]: 0,
[InitializedState.Canary]: 1
};
const initializedStateAsUint = {
[InitializedState.Zero]: 0,
[InitializedState.Canary]: 1
};
const initializedStateAsSint = {
[InitializedState.Zero]: 0,
[InitializedState.Canary]: -1
};
function initializedStateAsColor(
state,
format)
{
let value;
if (format.indexOf('uint') !== -1) {
value = initializedStateAsUint[state];
} else if (format.indexOf('sint') !== -1) {
value = initializedStateAsSint[state];
} else {
value = initializedStateAsFloat[state];
}
return [value, value, value, value];
}
const initializedStateAsDepth = {
[InitializedState.Zero]: 0,
[InitializedState.Canary]: 0.8
};
const initializedStateAsStencil = {
[InitializedState.Zero]: 0,
[InitializedState.Canary]: 42
};
function getRequiredTextureUsage(
format,
sampleCount,
uninitializeMethod,
readMethod)
{
let usage = GPUConst.TextureUsage.COPY_DST;
switch (uninitializeMethod) {
case UninitializeMethod.Creation:
break;
case UninitializeMethod.StoreOpClear:
usage |= GPUConst.TextureUsage.RENDER_ATTACHMENT;
break;
default:
unreachable();
}
switch (readMethod) {
case ReadMethod.CopyToBuffer:
case ReadMethod.CopyToTexture:
usage |= GPUConst.TextureUsage.COPY_SRC;
break;
case ReadMethod.Sample:
usage |= GPUConst.TextureUsage.TEXTURE_BINDING;
break;
case ReadMethod.Storage:
usage |= GPUConst.TextureUsage.STORAGE_BINDING;
break;
case ReadMethod.DepthTest:
case ReadMethod.StencilTest:
case ReadMethod.ColorBlending:
usage |= GPUConst.TextureUsage.RENDER_ATTACHMENT;
break;
default:
unreachable();
}
if (sampleCount > 1) {
// Copies to multisampled textures are not allowed. We need OutputAttachment to initialize
// canary data in multisampled textures.
usage |= GPUConst.TextureUsage.RENDER_ATTACHMENT;
}
if (!kTextureFormatInfo[format].copyDst) {
// Copies are not possible. We need OutputAttachment to initialize
// canary data.
assert(kTextureFormatInfo[format].renderable);
usage |= GPUConst.TextureUsage.RENDER_ATTACHMENT;
}
return usage;
}
export class TextureZeroInitTest extends GPUTest {
constructor(sharedState, rec, params) {
super(sharedState, rec, params);
this.p = params;
const stateToTexelComponents = (state) => {
const [R, G, B, A] = initializedStateAsColor(state, this.p.format);
return {
R,
G,
B,
A,
Depth: initializedStateAsDepth[state],
Stencil: initializedStateAsStencil[state]
};
};
this.stateToTexelComponents = {
[InitializedState.Zero]: stateToTexelComponents(InitializedState.Zero),
[InitializedState.Canary]: stateToTexelComponents(InitializedState.Canary)
};
}
get textureWidth() {
let width = 1 << this.p.mipLevelCount;
if (this.p.nonPowerOfTwo) {
width = 2 * width - 1;
}
return width;
}
get textureHeight() {
if (this.p.dimension === '1d') {
return 1;
}
let height = 1 << this.p.mipLevelCount;
if (this.p.nonPowerOfTwo) {
height = 2 * height - 1;
}
return height;
}
get textureDepth() {
return this.p.dimension === '3d' ? 11 : 1;
}
get textureDepthOrArrayLayers() {
return this.p.dimension === '2d' ? this.p.layerCount : this.textureDepth;
}
// Used to iterate subresources and check that their uninitialized contents are zero when accessed
*iterateUninitializedSubresources() {
for (const mipRange of kUninitializedMipRangesToTest[this.p.mipLevelCount]) {
for (const layerRange of kUninitializedLayerRangesToTest[this.p.layerCount]) {
yield new SubresourceRange({ mipRange, layerRange });
}
}
}
// Used to iterate and initialize other subresources not checked for zero-initialization.
// Zero-initialization of uninitialized subresources should not have side effects on already
// initialized subresources.
*iterateInitializedSubresources() {
const uninitialized = new Array(this.p.mipLevelCount);
for (let level = 0; level < uninitialized.length; ++level) {
uninitialized[level] = new Array(this.p.layerCount);
}
for (const subresources of this.iterateUninitializedSubresources()) {
for (const { level, layer } of subresources.each()) {
uninitialized[level][layer] = true;
}
}
for (let level = 0; level < uninitialized.length; ++level) {
for (let layer = 0; layer < uninitialized[level].length; ++layer) {
if (!uninitialized[level][layer]) {
yield new SubresourceRange({
mipRange: { begin: level, count: 1 },
layerRange: { begin: layer, count: 1 }
});
}
}
}
}
*generateTextureViewDescriptorsForRendering(
aspect,
subresourceRange)
{
const viewDescriptor = {
dimension: '2d',
aspect
};
if (subresourceRange === undefined) {
return viewDescriptor;
}
for (const { level, layer } of subresourceRange.each()) {
yield {
...viewDescriptor,
baseMipLevel: level,
mipLevelCount: 1,
baseArrayLayer: layer,
arrayLayerCount: 1
};
}
}
initializeWithStoreOp(
state,
texture,
subresourceRange)
{
const commandEncoder = this.device.createCommandEncoder();
commandEncoder.pushDebugGroup('initializeWithStoreOp');
for (const viewDescriptor of this.generateTextureViewDescriptorsForRendering(
'all',
subresourceRange
)) {
if (kTextureFormatInfo[this.p.format].color) {
commandEncoder.
beginRenderPass({
colorAttachments: [
{
view: texture.createView(viewDescriptor),
storeOp: 'store',
clearValue: initializedStateAsColor(state, this.p.format),
loadOp: 'clear'
}]
}).
end();
} else {
const depthStencilAttachment = {
view: texture.createView(viewDescriptor)
};
if (kTextureFormatInfo[this.p.format].depth) {
depthStencilAttachment.depthClearValue = initializedStateAsDepth[state];
depthStencilAttachment.depthLoadOp = 'clear';
depthStencilAttachment.depthStoreOp = 'store';
}
if (kTextureFormatInfo[this.p.format].stencil) {
depthStencilAttachment.stencilClearValue = initializedStateAsStencil[state];
depthStencilAttachment.stencilLoadOp = 'clear';
depthStencilAttachment.stencilStoreOp = 'store';
}
commandEncoder.
beginRenderPass({
colorAttachments: [],
depthStencilAttachment
}).
end();
}
}
commandEncoder.popDebugGroup();
this.queue.submit([commandEncoder.finish()]);
}
initializeWithCopy(
texture,
state,
subresourceRange)
{
assert(this.p.format in kTextureFormatInfo);
const format = this.p.format;
const firstSubresource = subresourceRange.each().next().value;
assert(typeof firstSubresource !== 'undefined');
const [largestWidth, largestHeight, largestDepth] = virtualMipSize(
this.p.dimension,
[this.textureWidth, this.textureHeight, this.textureDepth],
firstSubresource.level
);
const rep = kTexelRepresentationInfo[format];
const texelData = new Uint8Array(rep.pack(rep.encode(this.stateToTexelComponents[state])));
const { buffer, bytesPerRow, rowsPerImage } = createTextureUploadBuffer(
texelData,
this.device,
format,
this.p.dimension,
[largestWidth, largestHeight, largestDepth]
);
const commandEncoder = this.device.createCommandEncoder();
for (const { level, layer } of subresourceRange.each()) {
const [width, height, depth] = virtualMipSize(
this.p.dimension,
[this.textureWidth, this.textureHeight, this.textureDepth],
level
);
commandEncoder.copyBufferToTexture(
{
buffer,
bytesPerRow,
rowsPerImage
},
{ texture, mipLevel: level, origin: { x: 0, y: 0, z: layer } },
{ width, height, depthOrArrayLayers: depth }
);
}
this.queue.submit([commandEncoder.finish()]);
buffer.destroy();
}
initializeTexture(
texture,
state,
subresourceRange)
{
if (this.p.sampleCount > 1 || !kTextureFormatInfo[this.p.format].copyDst) {
// Copies to multisampled textures not yet specified.
// Use a storeOp for now.
assert(kTextureFormatInfo[this.p.format].renderable);
this.initializeWithStoreOp(state, texture, subresourceRange);
} else {
this.initializeWithCopy(texture, state, subresourceRange);
}
}
discardTexture(texture, subresourceRange) {
const commandEncoder = this.device.createCommandEncoder();
commandEncoder.pushDebugGroup('discardTexture');
for (const desc of this.generateTextureViewDescriptorsForRendering('all', subresourceRange)) {
if (kTextureFormatInfo[this.p.format].color) {
commandEncoder.
beginRenderPass({
colorAttachments: [
{
view: texture.createView(desc),
storeOp: 'discard',
loadOp: 'load'
}]
}).
end();
} else {
const depthStencilAttachment = {
view: texture.createView(desc)
};
if (kTextureFormatInfo[this.p.format].depth) {
depthStencilAttachment.depthLoadOp = 'load';
depthStencilAttachment.depthStoreOp = 'discard';
}
if (kTextureFormatInfo[this.p.format].stencil) {
depthStencilAttachment.stencilLoadOp = 'load';
depthStencilAttachment.stencilStoreOp = 'discard';
}
commandEncoder.
beginRenderPass({
colorAttachments: [],
depthStencilAttachment
}).
end();
}
}
commandEncoder.popDebugGroup();
this.queue.submit([commandEncoder.finish()]);
}
}
const kTestParams = kUnitCaseParamsBuilder.
combine('dimension', kTextureDimensions).
combine('readMethod', [
ReadMethod.CopyToBuffer,
ReadMethod.CopyToTexture,
ReadMethod.Sample,
ReadMethod.DepthTest,
ReadMethod.StencilTest]
)
// [3] compressed formats
.combine('format', kUncompressedTextureFormats).
filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format)).
beginSubcases().
combine('aspect', kTextureAspects).
unless(({ readMethod, format, aspect }) => {
const info = kTextureFormatInfo[format];
return (
readMethod === ReadMethod.DepthTest && (!info.depth || aspect === 'stencil-only') ||
readMethod === ReadMethod.StencilTest && (!info.stencil || aspect === 'depth-only') ||
readMethod === ReadMethod.ColorBlending && !info.color ||
// [1]: Test with depth/stencil sampling
readMethod === ReadMethod.Sample && (!!info.depth || !!info.stencil) ||
aspect === 'depth-only' && !info.depth ||
aspect === 'stencil-only' && !info.stencil ||
aspect === 'all' && !!info.depth && !!info.stencil ||
// Cannot copy from a packed depth format.
// [2]: Test copying out of the stencil aspect.
(readMethod === ReadMethod.CopyToBuffer || readMethod === ReadMethod.CopyToTexture) && (
format === 'depth24plus' || format === 'depth24plus-stencil8'));
}).
combine('mipLevelCount', kMipLevelCounts)
// 1D texture can only have a single mip level
.unless((p) => p.dimension === '1d' && p.mipLevelCount !== 1).
combine('sampleCount', kSampleCounts).
unless(
({ readMethod, sampleCount }) =>
// We can only read from multisampled textures by sampling.
sampleCount > 1 && (
readMethod === ReadMethod.CopyToBuffer || readMethod === ReadMethod.CopyToTexture)
)
// Multisampled textures may only have one mip
.unless(({ sampleCount, mipLevelCount }) => sampleCount > 1 && mipLevelCount > 1).
combine('uninitializeMethod', kUninitializeMethods).
unless(({ dimension, readMethod, uninitializeMethod, format, sampleCount }) => {
const formatInfo = kTextureFormatInfo[format];
return (
dimension !== '2d' && (
sampleCount > 1 ||
!!formatInfo.depth ||
!!formatInfo.stencil ||
readMethod === ReadMethod.DepthTest ||
readMethod === ReadMethod.StencilTest ||
readMethod === ReadMethod.ColorBlending ||
uninitializeMethod === UninitializeMethod.StoreOpClear));
}).
expandWithParams(function* ({ dimension }) {
switch (dimension) {
case '2d':
yield { layerCount: 1 };
yield { layerCount: 7 };
break;
case '1d':
case '3d':
yield { layerCount: 1 };
break;
}
})
// Multisampled 3D / 2D array textures not supported.
.unless(({ sampleCount, layerCount }) => sampleCount > 1 && layerCount > 1).
unless(({ format, sampleCount, uninitializeMethod, readMethod }) => {
const usage = getRequiredTextureUsage(format, sampleCount, uninitializeMethod, readMethod);
const info = kTextureFormatInfo[format];
return (
(usage & GPUConst.TextureUsage.RENDER_ATTACHMENT) !== 0 && !info.renderable ||
(usage & GPUConst.TextureUsage.STORAGE_BINDING) !== 0 && !info.color?.storage ||
sampleCount > 1 && !info.multisample);
}).
combine('nonPowerOfTwo', [false, true]).
combine('canaryOnCreation', [false, true]).
filter(({ canaryOnCreation, format }) => {
// We can only initialize the texture if it's encodable or renderable.
const canInitialize = format in kTextureFormatInfo || kTextureFormatInfo[format].renderable;
// Filter out cases where we want canary values but can't initialize.
return !canaryOnCreation || canInitialize;
});
`;import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { unreachable } from '../../../../common/util/util.js';
import { kTextureFormatInfo } from '../../../format_info.js';
import { checkContentsByBufferCopy, checkContentsByTextureCopy } from './check_texture/by_copy.js';
import {
@ -558,6 +17,15 @@ import {
checkContentsByStencilTest } from
'./check_texture/by_ds_test.js';
import { checkContentsBySampling } from './check_texture/by_sampling.js';
import {
getRequiredTextureUsage,
TextureZeroInitTest,
kTestParams,
UninitializeMethod,
InitializedState } from
'./check_texture/texture_zero_init_test.js';
const checkContentsImpl = {
Sample: checkContentsBySampling,

View file

@ -0,0 +1,379 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/export const description = `
Tests for the behavior of read-write storage textures.
TODO:
- Test resource usage transitions with read-write storage textures
`;import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { assert, unreachable } from '../../../../common/util/util.js';
import { kTextureDimensions } from '../../../capability_info.js';
import { kColorTextureFormats, kTextureFormatInfo } from '../../../format_info.js';
import { GPUTest } from '../../../gpu_test.js';
import { align } from '../../../util/math.js';
const kShaderStagesForReadWriteStorageTexture = ['fragment', 'compute'];
class F extends GPUTest {
GetInitialData(storageTexture) {
const format = storageTexture.format;
const bytesPerBlock = kTextureFormatInfo[format].bytesPerBlock;
assert(bytesPerBlock !== undefined);
const width = storageTexture.width;
const height = storageTexture.height;
const depthOrArrayLayers = storageTexture.depthOrArrayLayers;
const initialData = new ArrayBuffer(bytesPerBlock * width * height * depthOrArrayLayers);
const initialTypedData = this.GetTypedArrayBuffer(initialData, format);
for (let z = 0; z < depthOrArrayLayers; ++z) {
for (let y = 0; y < height; ++y) {
for (let x = 0; x < width; ++x) {
const index = z * width * height + y * width + x;
switch (format) {
case 'r32sint':
initialTypedData[index] = (index & 1 ? 1 : -1) * (2 * index + 1);
break;
case 'r32uint':
initialTypedData[index] = 2 * index + 1;
break;
case 'r32float':
initialTypedData[index] = (2 * index + 1) / 10.0;
break;
}
}
}
}
return initialData;
}
GetTypedArrayBuffer(arrayBuffer, format) {
switch (format) {
case 'r32sint':
return new Int32Array(arrayBuffer);
case 'r32uint':
return new Uint32Array(arrayBuffer);
case 'r32float':
return new Float32Array(arrayBuffer);
default:
unreachable();
return new Uint8Array(arrayBuffer);
}
}
GetExpectedData(
shaderStage,
storageTexture,
initialData)
{
const format = storageTexture.format;
const bytesPerBlock = kTextureFormatInfo[format].bytesPerBlock;
assert(bytesPerBlock !== undefined);
const width = storageTexture.width;
const height = storageTexture.height;
const depthOrArrayLayers = storageTexture.depthOrArrayLayers;
const bytesPerRowAlignment = align(bytesPerBlock * width, 256);
const itemsPerRow = bytesPerRowAlignment / bytesPerBlock;
const expectedData = new ArrayBuffer(
bytesPerRowAlignment * (height * depthOrArrayLayers - 1) + bytesPerBlock * width
);
const expectedTypedData = this.GetTypedArrayBuffer(expectedData, format);
const initialTypedData = this.GetTypedArrayBuffer(initialData, format);
for (let z = 0; z < depthOrArrayLayers; ++z) {
for (let y = 0; y < height; ++y) {
for (let x = 0; x < width; ++x) {
const expectedIndex = z * itemsPerRow * height + y * itemsPerRow + x;
switch (shaderStage) {
case 'compute':{
// In the compute shader we flip the texture along the diagonal.
const initialIndex =
(depthOrArrayLayers - 1 - z) * width * height +
(height - 1 - y) * width + (
width - 1 - x);
expectedTypedData[expectedIndex] = initialTypedData[initialIndex];
break;
}
case 'fragment':{
// In the fragment shader we double the original texel value of the read-write storage
// texture.
const initialIndex = z * width * height + y * width + x;
expectedTypedData[expectedIndex] = initialTypedData[initialIndex] * 2;
break;
}
}
}
}
}
return expectedData;
}
RecordCommandsToTransform(
device,
shaderStage,
commandEncoder,
rwTexture)
{
let declaration = '';
switch (rwTexture.dimension) {
case '1d':
declaration = 'texture_storage_1d';
break;
case '2d':
declaration =
rwTexture.depthOrArrayLayers > 1 ? 'texture_storage_2d_array' : 'texture_storage_2d';
break;
case '3d':
declaration = 'texture_storage_3d';
break;
}
const textureDeclaration = `
@group(0) @binding(0) var rwTexture: ${declaration}<${rwTexture.format}, read_write>;
`;
switch (shaderStage) {
case 'fragment':{
const vertexShader = `
@vertex
fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4f {
var pos = array(
vec2f(-1.0, -1.0),
vec2f(-1.0, 1.0),
vec2f( 1.0, -1.0),
vec2f(-1.0, 1.0),
vec2f( 1.0, -1.0),
vec2f( 1.0, 1.0));
return vec4f(pos[VertexIndex], 0.0, 1.0);
}
`;
let textureLoadStoreCoord = '';
switch (rwTexture.dimension) {
case '1d':
textureLoadStoreCoord = 'textureCoord.x';
break;
case '2d':
textureLoadStoreCoord =
rwTexture.depthOrArrayLayers > 1 ? 'textureCoord, z' : 'textureCoord';
break;
case '3d':
textureLoadStoreCoord = 'vec3u(textureCoord, z)';
break;
}
const fragmentShader = `
${textureDeclaration}
@fragment
fn main(@builtin(position) fragCoord: vec4f) -> @location(0) vec4f {
let textureCoord = vec2u(fragCoord.xy);
for (var z = 0u; z < ${rwTexture.depthOrArrayLayers}; z++) {
let initialValue = textureLoad(rwTexture, ${textureLoadStoreCoord});
let outputValue = initialValue * 2;
textureStore(rwTexture, ${textureLoadStoreCoord}, outputValue);
}
return vec4f(0.0, 1.0, 0.0, 1.0);
}
`;
const renderPipeline = device.createRenderPipeline({
layout: 'auto',
vertex: {
module: device.createShaderModule({
code: vertexShader
})
},
fragment: {
module: device.createShaderModule({
code: fragmentShader
}),
targets: [
{
format: 'rgba8unorm'
}]
},
primitive: {
topology: 'triangle-list'
}
});
const bindGroup = device.createBindGroup({
layout: renderPipeline.getBindGroupLayout(0),
entries: [
{
binding: 0,
resource: rwTexture.createView()
}]
});
const dummyColorTexture = device.createTexture({
size: [rwTexture.width, rwTexture.height, 1],
usage: GPUTextureUsage.RENDER_ATTACHMENT,
format: 'rgba8unorm'
});
const renderPassEncoder = commandEncoder.beginRenderPass({
colorAttachments: [
{
view: dummyColorTexture.createView(),
loadOp: 'clear',
clearValue: { r: 0, g: 0, b: 0, a: 0 },
storeOp: 'store'
}]
});
renderPassEncoder.setPipeline(renderPipeline);
renderPassEncoder.setBindGroup(0, bindGroup);
renderPassEncoder.draw(6);
renderPassEncoder.end();
break;
}
case 'compute':{
let textureLoadCoord = '';
let textureStoreCoord = '';
switch (rwTexture.dimension) {
case '1d':
textureLoadCoord = 'dimension - 1u - invocationID.x';
textureStoreCoord = 'invocationID.x';
break;
case '2d':
textureLoadCoord =
rwTexture.depthOrArrayLayers > 1 ?
`vec2u(dimension.x - 1u - invocationID.x, dimension.y - 1u - invocationID.y),
textureNumLayers(rwTexture) - 1u - invocationID.z` :
`vec2u(dimension.x - 1u - invocationID.x, dimension.y - 1u - invocationID.y)`;
textureStoreCoord =
rwTexture.depthOrArrayLayers > 1 ?
'invocationID.xy, invocationID.z' :
'invocationID.xy';
break;
case '3d':
textureLoadCoord = `
vec3u(dimension.x - 1u - invocationID.x, dimension.y - 1u - invocationID.y,
dimension.z - 1u - invocationID.z)`;
textureStoreCoord = 'invocationID';
break;
}
const computeShader = `
${textureDeclaration}
@compute
@workgroup_size(${rwTexture.width}, ${rwTexture.height}, ${rwTexture.depthOrArrayLayers})
fn main(@builtin(local_invocation_id) invocationID: vec3u) {
let dimension = textureDimensions(rwTexture);
let initialValue = textureLoad(rwTexture, ${textureLoadCoord});
textureBarrier();
textureStore(rwTexture, ${textureStoreCoord}, initialValue);
}`;
const computePipeline = device.createComputePipeline({
compute: {
module: device.createShaderModule({
code: computeShader
})
},
layout: 'auto'
});
const bindGroup = device.createBindGroup({
layout: computePipeline.getBindGroupLayout(0),
entries: [
{
binding: 0,
resource: rwTexture.createView()
}]
});
const computePassEncoder = commandEncoder.beginComputePass();
computePassEncoder.setPipeline(computePipeline);
computePassEncoder.setBindGroup(0, bindGroup);
computePassEncoder.dispatchWorkgroups(1);
computePassEncoder.end();
break;
}
}
}
}
export const g = makeTestGroup(F);
g.test('basic').
desc(
`The basic functionality tests for read-write storage textures. In the test we read data from
the read-write storage texture, do transforms and write the data back to the read-write storage
texture. textureBarrier() is also called in the tests using compute pipelines.`
).
params((u) =>
u.
combine('format', kColorTextureFormats).
filter((p) => kTextureFormatInfo[p.format].color?.readWriteStorage === true).
combine('shaderStage', kShaderStagesForReadWriteStorageTexture).
combine('textureDimension', kTextureDimensions).
combine('depthOrArrayLayers', [1, 2]).
unless((p) => p.textureDimension === '1d' && p.depthOrArrayLayers > 1)
).
fn((t) => {
const { format, shaderStage, textureDimension, depthOrArrayLayers } = t.params;
// In compatibility mode the lowest maxComputeInvocationsPerWorkgroup is 128 vs non-compat which is 256
// So in non-compat we get 16 * 8 * 2, vs compat where we get 8 * 8 * 2
const kWidth = t.isCompatibility ? 8 : 16;
const height = textureDimension === '1d' ? 1 : 8;
const textureSize = [kWidth, height, depthOrArrayLayers];
const storageTexture = t.device.createTexture({
format,
dimension: textureDimension,
size: textureSize,
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST | GPUTextureUsage.STORAGE_BINDING
});
const bytesPerBlock = kTextureFormatInfo[format].bytesPerBlock;
const initialData = t.GetInitialData(storageTexture);
t.queue.writeTexture(
{ texture: storageTexture },
initialData,
{
bytesPerRow: bytesPerBlock * kWidth,
rowsPerImage: height
},
textureSize
);
const commandEncoder = t.device.createCommandEncoder();
t.RecordCommandsToTransform(t.device, shaderStage, commandEncoder, storageTexture);
const expectedData = t.GetExpectedData(shaderStage, storageTexture, initialData);
const readbackBuffer = t.device.createBuffer({
size: expectedData.byteLength,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST
});
const bytesPerRow = align(bytesPerBlock * kWidth, 256);
commandEncoder.copyTextureToBuffer(
{
texture: storageTexture
},
{
buffer: readbackBuffer,
bytesPerRow,
rowsPerImage: height
},
textureSize
);
t.queue.submit([commandEncoder.finish()]);
switch (format) {
case 'r32sint':
t.expectGPUBufferValuesEqual(readbackBuffer, new Int32Array(expectedData));
break;
case 'r32uint':
t.expectGPUBufferValuesEqual(readbackBuffer, new Uint32Array(expectedData));
break;
case 'r32float':
t.expectGPUBufferValuesEqual(readbackBuffer, new Float32Array(expectedData));
break;
}
});

View file

@ -100,7 +100,8 @@ u //
.combine('format', kRegularTextureFormats).
combine('viewFormat', kRegularTextureFormats).
filter(
({ format, viewFormat }) => format !== viewFormat && viewCompatible(format, viewFormat)
({ format, viewFormat }) =>
format !== viewFormat && viewCompatible(false, format, viewFormat)
)
).
beforeAllSubcases((t) => {
@ -202,7 +203,8 @@ u //
.combine('format', kRenderableColorTextureFormats).
combine('viewFormat', kRenderableColorTextureFormats).
filter(
({ format, viewFormat }) => format !== viewFormat && viewCompatible(format, viewFormat)
({ format, viewFormat }) =>
format !== viewFormat && viewCompatible(false, format, viewFormat)
).
combine('sampleCount', [1, 4])
).

View file

@ -3,6 +3,9 @@
**/export const description = `
Test the result of writing textures through texture views with various options.
Reads value from a shader array, writes the value via various write methods.
Check the texture result with the expected texel view.
All x= every possible view write method: {
- storage write {fragment, compute}
- render pass store
@ -13,20 +16,358 @@ Format reinterpretation is not tested here. It is in format_reinterpretation.spe
TODO: Write helper for this if not already available (see resource_init, buffer_sync_test for related code).
`;import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { GPUTest } from '../../../gpu_test.js';
import { unreachable } from '../../../../common/util/util.js';
import {
kRegularTextureFormats,
kTextureFormatInfo } from
export const g = makeTestGroup(GPUTest);
'../../../format_info.js';
import { GPUTest, TextureTestMixin } from '../../../gpu_test.js';
import { kFullscreenQuadVertexShaderCode } from '../../../util/shader.js';
import { TexelView } from '../../../util/texture/texel_view.js';
export const g = makeTestGroup(TextureTestMixin(GPUTest));
const kTextureViewWriteMethods = [
'storage-write-fragment',
'storage-write-compute',
'render-pass-store',
'render-pass-resolve'];
// Src color values to read from a shader array.
const kColorsFloat = [
{ R: 1.0, G: 0.0, B: 0.0, A: 0.8 },
{ R: 0.0, G: 1.0, B: 0.0, A: 0.7 },
{ R: 0.0, G: 0.0, B: 0.0, A: 0.6 },
{ R: 0.0, G: 0.0, B: 0.0, A: 0.5 },
{ R: 1.0, G: 1.0, B: 1.0, A: 0.4 },
{ R: 0.7, G: 0.0, B: 0.0, A: 0.3 },
{ R: 0.0, G: 0.8, B: 0.0, A: 0.2 },
{ R: 0.0, G: 0.0, B: 0.9, A: 0.1 },
{ R: 0.1, G: 0.2, B: 0.0, A: 0.3 },
{ R: 0.4, G: 0.3, B: 0.6, A: 0.8 }];
function FloatToIntColor(c) {
return Math.floor(c * 100);
}
const kColorsInt = kColorsFloat.map((c) => {
return {
R: FloatToIntColor(c.R),
G: FloatToIntColor(c.G),
B: FloatToIntColor(c.B),
A: FloatToIntColor(c.A)
};
});
const kTextureSize = 16;
function writeTextureAndGetExpectedTexelView(
t,
method,
view,
format,
sampleCount)
{
const info = kTextureFormatInfo[format];
const isFloatType = info.color.type === 'float' || info.color.type === 'unfilterable-float';
const kColors = isFloatType ? kColorsFloat : kColorsInt;
const expectedTexelView = TexelView.fromTexelsAsColors(
format,
(coords) => {
const pixelPos = coords.y * kTextureSize + coords.x;
return kColors[pixelPos % kColors.length];
},
{ clampToFormatRange: true }
);
const vecType = isFloatType ? 'vec4f' : info.color.type === 'sint' ? 'vec4i' : 'vec4u';
const kColorArrayShaderString = `array<${vecType}, ${kColors.length}>(
${kColors.map((t) => `${vecType}(${t.R}, ${t.G}, ${t.B}, ${t.A}) `).join(',')}
)`;
switch (method) {
case 'storage-write-compute':
{
const pipeline = t.device.createComputePipeline({
layout: 'auto',
compute: {
module: t.device.createShaderModule({
code: `
@group(0) @binding(0) var dst: texture_storage_2d<${format}, write>;
@compute @workgroup_size(1, 1) fn main(
@builtin(global_invocation_id) global_id: vec3<u32>,
) {
const src = ${kColorArrayShaderString};
let coord = vec2u(global_id.xy);
let idx = coord.x + coord.y * ${kTextureSize};
textureStore(dst, coord, src[idx % ${kColors.length}]);
}`
}),
entryPoint: 'main'
}
});
const commandEncoder = t.device.createCommandEncoder();
const pass = commandEncoder.beginComputePass();
pass.setPipeline(pipeline);
pass.setBindGroup(
0,
t.device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{
binding: 0,
resource: view
}]
})
);
pass.dispatchWorkgroups(kTextureSize, kTextureSize);
pass.end();
t.device.queue.submit([commandEncoder.finish()]);
}
break;
case 'storage-write-fragment':
{
// Create a placeholder color attachment texture,
// The size of which equals that of format texture we are testing,
// so that we have the same number of fragments and texels.
const kPlaceholderTextureFormat = 'rgba8unorm';
const placeholderTexture = t.trackForCleanup(
t.device.createTexture({
format: kPlaceholderTextureFormat,
size: [kTextureSize, kTextureSize],
usage: GPUTextureUsage.RENDER_ATTACHMENT
})
);
const pipeline = t.device.createRenderPipeline({
layout: 'auto',
vertex: {
module: t.device.createShaderModule({
code: kFullscreenQuadVertexShaderCode
})
},
fragment: {
module: t.device.createShaderModule({
code: `
@group(0) @binding(0) var dst: texture_storage_2d<${format}, write>;
@fragment fn main(
@builtin(position) fragCoord: vec4<f32>,
) {
const src = ${kColorArrayShaderString};
let coord = vec2u(fragCoord.xy);
let idx = coord.x + coord.y * ${kTextureSize};
textureStore(dst, coord, src[idx % ${kColors.length}]);
}`
}),
// Set writeMask to 0 as the fragment shader has no output.
targets: [
{
format: kPlaceholderTextureFormat,
writeMask: 0
}]
}
});
const commandEncoder = t.device.createCommandEncoder();
const pass = commandEncoder.beginRenderPass({
colorAttachments: [
{
view: placeholderTexture.createView(),
loadOp: 'clear',
storeOp: 'discard'
}]
});
pass.setPipeline(pipeline);
pass.setBindGroup(
0,
t.device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{
binding: 0,
resource: view
}]
})
);
pass.draw(6);
pass.end();
t.device.queue.submit([commandEncoder.finish()]);
}
break;
case 'render-pass-store':
case 'render-pass-resolve':
{
// Create a placeholder color attachment texture for the store target when tesing texture is used as resolve target.
const targetView =
method === 'render-pass-store' ?
view :
t.
trackForCleanup(
t.device.createTexture({
format,
size: [kTextureSize, kTextureSize],
usage: GPUTextureUsage.RENDER_ATTACHMENT,
sampleCount: 4
})
).
createView();
const resolveView = method === 'render-pass-store' ? undefined : view;
const multisampleCount = method === 'render-pass-store' ? sampleCount : 4;
const pipeline = t.device.createRenderPipeline({
layout: 'auto',
vertex: {
module: t.device.createShaderModule({
code: kFullscreenQuadVertexShaderCode
})
},
fragment: {
module: t.device.createShaderModule({
code: `
@fragment fn main(
@builtin(position) fragCoord: vec4<f32>,
) -> @location(0) ${vecType} {
const src = ${kColorArrayShaderString};
let coord = vec2u(fragCoord.xy);
let idx = coord.x + coord.y * ${kTextureSize};
return src[idx % ${kColors.length}];
}`
}),
targets: [
{
format
}]
},
multisample: {
count: multisampleCount
}
});
const commandEncoder = t.device.createCommandEncoder();
const pass = commandEncoder.beginRenderPass({
colorAttachments: [
{
view: targetView,
resolveTarget: resolveView,
loadOp: 'clear',
storeOp: 'store'
}]
});
pass.setPipeline(pipeline);
pass.draw(6);
pass.end();
t.device.queue.submit([commandEncoder.finish()]);
}
break;
default:
unreachable();
}
return expectedTexelView;
}
g.test('format').
desc(
`Views of every allowed format.
Read values from color array in the shader, and write it to the texture view via different write methods.
- x= every texture format
- x= sampleCount {1, 4} if valid
- x= every possible view write method (see above)
TODO: Test sampleCount > 1 for 'render-pass-store' after extending copySinglePixelTextureToBufferUsingComputePass
to read multiple pixels from multisampled textures. [1]
TODO: Test rgb10a2uint when TexelRepresentation.numericRange is made per-component. [2]
`
).
unimplemented();
params((u) =>
u //
.combine('method', kTextureViewWriteMethods).
combine('format', kRegularTextureFormats).
combine('sampleCount', [1, 4]).
filter(({ format, method, sampleCount }) => {
const info = kTextureFormatInfo[format];
if (sampleCount > 1 && !info.multisample) {
return false;
}
// [2]
if (format === 'rgb10a2uint') {
return false;
}
switch (method) {
case 'storage-write-compute':
case 'storage-write-fragment':
return info.color?.storage && sampleCount === 1;
case 'render-pass-store':
// [1]
if (sampleCount > 1) {
return false;
}
return !!info.colorRender;
case 'render-pass-resolve':
return !!info.colorRender?.resolve && sampleCount === 1;
}
return true;
})
).
beforeAllSubcases((t) => {
const { format, method } = t.params;
t.skipIfTextureFormatNotSupported(format);
switch (method) {
case 'storage-write-compute':
case 'storage-write-fragment':
// Still need to filter again for compat mode.
t.skipIfTextureFormatNotUsableAsStorageTexture(format);
break;
}
}).
fn((t) => {
const { format, method, sampleCount } = t.params;
const usage =
GPUTextureUsage.COPY_SRC | (
method.includes('storage') ?
GPUTextureUsage.STORAGE_BINDING :
GPUTextureUsage.RENDER_ATTACHMENT);
const texture = t.trackForCleanup(
t.device.createTexture({
format,
usage,
size: [kTextureSize, kTextureSize],
sampleCount
})
);
const view = texture.createView();
const expectedTexelView = writeTextureAndGetExpectedTexelView(
t,
method,
view,
format,
sampleCount
);
// [1] Use copySinglePixelTextureToBufferUsingComputePass to check multisampled texture.
t.expectTexelViewComparisonIsOkInTexture({ texture }, expectedTexelView, [
kTextureSize,
kTextureSize]
);
});
g.test('dimension').
desc(

View file

@ -45,32 +45,47 @@ export function getPipelineTypeForBindingCombination(bindingCombination) {
}
}
function getBindGroupIndex(bindGroupTest, i) {
function getBindGroupIndex(bindGroupTest, numBindGroups, i) {
switch (bindGroupTest) {
case 'sameGroup':
return 0;
case 'differentGroups':
return i % 3;
return i % numBindGroups;
}
}
function getBindingIndex(bindGroupTest, numBindGroups, i) {
switch (bindGroupTest) {
case 'sameGroup':
return i;
case 'differentGroups':
return i / numBindGroups | 0;
}
}
function getWGSLBindings(
order,
bindGroupTest,
storageDefinitionWGSLSnippetFn,
{
order,
bindGroupTest,
storageDefinitionWGSLSnippetFn,
numBindGroups
},
numBindings,
id)
{
return reorder(
order,
range(
numBindings,
(i) =>
`@group(${getBindGroupIndex(
bindGroupTest,
i
)}) @binding(${i}) ${storageDefinitionWGSLSnippetFn(i, id)};`
)
range(numBindings, (i) => {
const groupNdx = getBindGroupIndex(bindGroupTest, numBindGroups, i);
const bindingNdx = getBindingIndex(bindGroupTest, numBindGroups, i);
const storageWGSL = storageDefinitionWGSLSnippetFn(i, id);
return `@group(${groupNdx}) @binding(${bindingNdx}) ${storageWGSL};`;
})
).join('\n ');
}
@ -80,15 +95,22 @@ order,
bindGroupTest,
storageDefinitionWGSLSnippetFn,
bodyFn,
numBindGroups,
numBindings,
extraWGSL = '')
{
const bindingParams = {
order,
bindGroupTest,
storageDefinitionWGSLSnippetFn,
numBindGroups
};
switch (bindingCombination) {
case 'vertex':
return `
${extraWGSL}
${getWGSLBindings(order, bindGroupTest, storageDefinitionWGSLSnippetFn, numBindings, 0)}
${getWGSLBindings(bindingParams, numBindings, 0)}
@vertex fn mainVS() -> @builtin(position) vec4f {
${bodyFn(numBindings, 0)}
@ -99,7 +121,7 @@ extraWGSL = '')
return `
${extraWGSL}
${getWGSLBindings(order, bindGroupTest, storageDefinitionWGSLSnippetFn, numBindings, 0)}
${getWGSLBindings(bindingParams, numBindings, 0)}
@vertex fn mainVS() -> @builtin(position) vec4f {
return vec4f(0);
@ -113,9 +135,9 @@ extraWGSL = '')
return `
${extraWGSL}
${getWGSLBindings(order, bindGroupTest, storageDefinitionWGSLSnippetFn, numBindings, 0)}
${getWGSLBindings(bindingParams, numBindings, 0)}
${getWGSLBindings(order, bindGroupTest, storageDefinitionWGSLSnippetFn, numBindings - 1, 1)}
${getWGSLBindings(bindingParams, numBindings - 1, 1)}
@vertex fn mainVS() -> @builtin(position) vec4f {
${bodyFn(numBindings, 0)}
@ -131,9 +153,9 @@ extraWGSL = '')
return `
${extraWGSL}
${getWGSLBindings(order, bindGroupTest, storageDefinitionWGSLSnippetFn, numBindings - 1, 0)}
${getWGSLBindings(bindingParams, numBindings - 1, 0)}
${getWGSLBindings(order, bindGroupTest, storageDefinitionWGSLSnippetFn, numBindings, 1)}
${getWGSLBindings(bindingParams, numBindings, 1)}
@vertex fn mainVS() -> @builtin(position) vec4f {
${bodyFn(numBindings - 1, 0)}
@ -148,8 +170,7 @@ extraWGSL = '')
case 'compute':
return `
${extraWGSL}
${getWGSLBindings(order, bindGroupTest, storageDefinitionWGSLSnippetFn, numBindings, 0)}
@group(3) @binding(0) var<storage, read_write> d: f32;
${getWGSLBindings(bindingParams, numBindings, 0)}
@compute @workgroup_size(1) fn main() {
${bodyFn(numBindings, 0)}
}
@ -164,6 +185,7 @@ order,
bindGroupTest,
storageDefinitionWGSLSnippetFn,
usageWGSLSnippetFn,
maxBindGroups,
numBindings,
extraWGSL = '')
{
@ -174,6 +196,7 @@ extraWGSL = '')
storageDefinitionWGSLSnippetFn,
(numBindings, set) =>
`${range(numBindings, (i) => usageWGSLSnippetFn(i, set)).join('\n ')}`,
maxBindGroups,
numBindings,
extraWGSL
);
@ -185,6 +208,7 @@ order,
bindGroupTest,
storageDefinitionWGSLSnippetFn,
usageWGSLSnippetFn,
numBindGroups,
numBindings,
extraWGSL = '')
{
@ -195,6 +219,7 @@ extraWGSL = '')
storageDefinitionWGSLSnippetFn,
(numBindings, set) =>
`${range(numBindings, (i) => usageWGSLSnippetFn(i, set)).join('\n ')}`,
numBindGroups,
numBindings,
extraWGSL
);

View file

@ -1,6 +1,6 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/import { range } from '../../../../../common/util/util.js';import {
**/import { assert } from '../../../../../common/util/util.js';import {
kCreatePipelineTypes,
kEncoderTypes,
kMaximumLimitBaseParams,
@ -10,30 +10,152 @@
const limit = 'maxBindGroups';
export const { g, description } = makeLimitTestGroup(limit);
const kLimitToBindingLayout = [
{
name: 'maxSampledTexturesPerShaderStage',
entry: {
texture: {}
}
},
{
name: 'maxSamplersPerShaderStage',
entry: {
sampler: {}
}
},
{
name: 'maxUniformBuffersPerShaderStage',
entry: {
buffer: {}
}
},
{
name: 'maxStorageBuffersPerShaderStage',
entry: {
buffer: {
type: 'read-only-storage'
}
}
},
{
name: 'maxStorageTexturesPerShaderStage',
entry: {
storageTexture: {
access: 'write-only',
format: 'rgba8unorm',
viewDimension: '2d'
}
}
}];
/**
* Yields all possible binding layout entries for a stage.
*/
function* getBindingLayoutEntriesForStage(device) {
for (const { name, entry } of kLimitToBindingLayout) {
const limit = device.limits[name];
for (let i = 0; i < limit; ++i) {
yield entry;
}
}
}
/**
* Yields all of the possible BindingLayoutEntryAndVisibility entries for a render pipeline
*/
function* getBindingLayoutEntriesForRenderPipeline(
device)
{
const visibilities = [GPUShaderStage.VERTEX, GPUShaderStage.FRAGMENT];
for (const visibility of visibilities) {
for (const bindEntryResourceType of getBindingLayoutEntriesForStage(device)) {
const entry = {
binding: 0,
visibility,
...bindEntryResourceType
};
yield entry;
}
}
}
/**
* Returns the total possible bindings per render pipeline
*/
function getTotalPossibleBindingsPerRenderPipeline(device) {
const totalPossibleBindingsPerStage =
device.limits.maxSampledTexturesPerShaderStage +
device.limits.maxSamplersPerShaderStage +
device.limits.maxUniformBuffersPerShaderStage +
device.limits.maxStorageBuffersPerShaderStage +
device.limits.maxStorageTexturesPerShaderStage;
return totalPossibleBindingsPerStage * 2;
}
/**
* Yields count GPUBindGroupLayoutEntries
*/
function* getBindingLayoutEntries(
device,
count)
{
assert(count < getTotalPossibleBindingsPerRenderPipeline(device));
const iter = getBindingLayoutEntriesForRenderPipeline(device);
for (; count > 0; --count) {
yield iter.next().value;
}
}
g.test('createPipelineLayout,at_over').
desc(`Test using createPipelineLayout at and over ${limit} limit`).
params(kMaximumLimitBaseParams).
fn(async (t) => {
const { limitTest, testValueName } = t.params;
await t.testDeviceWithRequestedMaximumLimits(
limitTest,
testValueName,
async ({ device, testValue, shouldError }) => {
const bindGroupLayouts = range(testValue, (_i) =>
device.createBindGroupLayout({
entries: [
{
binding: 0,
visibility: GPUShaderStage.VERTEX,
buffer: {}
}]
})
async ({ device, testValue, shouldError, actualLimit }) => {
const totalPossibleBindingsPerPipeline = getTotalPossibleBindingsPerRenderPipeline(device);
// Not sure what to do if we ever hit this but I think it's better to assert than silently skip.
assert(
testValue < totalPossibleBindingsPerPipeline,
`not enough possible bindings(${totalPossibleBindingsPerPipeline}) to test ${testValue} bindGroups`
);
await t.expectValidationError(() => {
device.createPipelineLayout({ bindGroupLayouts });
}, shouldError);
const bindingDescriptions = [];
const bindGroupLayouts = [...getBindingLayoutEntries(device, testValue)].map((entry) => {
bindingDescriptions.push(
`${JSON.stringify(entry)} // group(${bindingDescriptions.length})`
);
return device.createBindGroupLayout({
entries: [entry]
});
});
await t.expectValidationError(
() => {
device.createPipelineLayout({ bindGroupLayouts });
},
shouldError,
`testing ${testValue} bindGroups on maxBindGroups = ${actualLimit} with \n${bindingDescriptions.join(
'\n'
)}`
);
}
);
});

View file

@ -12,7 +12,8 @@ import {
const limit = 'maxComputeWorkgroupStorageSize';
export const { g, description } = makeLimitTestGroup(limit);
const kSmallestWorkgroupVarSize = 4;
// Each var is roundUp(16, SizeOf(T))
const kSmallestWorkgroupVarSize = 16;
const wgslF16Types = {
f16: { alignOf: 2, sizeOf: 2, requireF16: true },
@ -71,7 +72,9 @@ function getModuleForWorkgroupStorageSize(device, wgslType, size) {
const { sizeOf, alignOf, requireF16 } = wgslTypes[wgslType];
const unitSize = align(sizeOf, alignOf);
const units = Math.floor(size / unitSize);
const extra = (size - units * unitSize) / kSmallestWorkgroupVarSize;
const sizeUsed = align(units * unitSize, 16);
const sizeLeft = size - sizeUsed;
const extra = Math.floor(sizeLeft / kSmallestWorkgroupVarSize);
const code =
(requireF16 ? 'enable f16;\n' : '') +
@ -89,7 +92,7 @@ function getModuleForWorkgroupStorageSize(device, wgslType, size) {
b: vec2f,
};
var<workgroup> d0: array<${wgslType}, ${units}>;
${extra ? `var<workgroup> d1: array<f32, ${extra}>;` : ''}
${extra ? `var<workgroup> d1: array<vec4<f32>, ${extra}>;` : ''}
@compute @workgroup_size(1) fn main() {
_ = d0;
${extra ? '_ = d1;' : ''}

View file

@ -111,8 +111,12 @@ params(
combine('sampleMaskOut', [false, true])
).
beforeAllSubcases((t) => {
if (t.isCompatibility && (t.params.sampleMaskIn || t.params.sampleMaskOut)) {
t.skip('sample_mask not supported in compatibility mode');
if (t.isCompatibility) {
t.skipIf(
t.params.sampleMaskIn || t.params.sampleMaskOut,
'sample_mask not supported in compatibility mode'
);
t.skipIf(t.params.sampleIndex, 'sample_index not supported in compatibility mode');
}
}).
fn(async (t) => {

View file

@ -1,8 +1,9 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/import { range, reorder,
kReorderOrderKeys } from
kReorderOrderKeys,
assert } from
'../../../../../common/util/util.js';
import { kShaderStageCombinationsWithStage } from '../../../../capability_info.js';
@ -13,8 +14,14 @@ import {
kBindingCombinations,
getPipelineTypeForBindingCombination,
getPerStageWGSLForBindingCombination } from
'./limit_utils.js';
const kExtraLimits = {
maxBindingsPerBindGroup: 'adapterLimit',
maxBindGroups: 'adapterLimit'
};
const limit = 'maxSampledTexturesPerShaderStage';
export const { g, description } = makeLimitTestGroup(limit);
@ -43,6 +50,9 @@ desc(
Note: We also test order to make sure the implementation isn't just looking
at just the last entry.
Note: It's also possible the maxBindingsPerBindGroup is lower than
${limit} in which case skip the test since we can not hit the limit.
`
).
params(
@ -56,11 +66,17 @@ fn(async (t) => {
limitTest,
testValueName,
async ({ device, testValue, shouldError }) => {
t.skipIf(
t.adapter.limits.maxBindingsPerBindGroup < testValue,
`maxBindingsPerBindGroup = ${t.adapter.limits.maxBindingsPerBindGroup} which is less than ${testValue}`
);
await t.expectValidationError(
() => createBindGroupLayout(device, visibility, order, testValue),
shouldError
);
}
},
kExtraLimits
);
});
@ -83,18 +99,30 @@ fn(async (t) => {
await t.testDeviceWithRequestedMaximumLimits(
limitTest,
testValueName,
async ({ device, testValue, shouldError }) => {
const kNumGroups = 3;
async ({ device, testValue, shouldError, actualLimit }) => {
const maxBindingsPerBindGroup = Math.min(
t.device.limits.maxBindingsPerBindGroup,
actualLimit
);
const kNumGroups = Math.ceil(testValue / maxBindingsPerBindGroup);
// Not sure what to do in this case but best we get notified if it happens.
assert(kNumGroups <= t.device.limits.maxBindGroups);
const bindGroupLayouts = range(kNumGroups, (i) => {
const minInGroup = Math.floor(testValue / kNumGroups);
const numInGroup = i ? minInGroup : testValue - minInGroup * (kNumGroups - 1);
const numInGroup = Math.min(
testValue - i * maxBindingsPerBindGroup,
maxBindingsPerBindGroup
);
return createBindGroupLayout(device, visibility, order, numInGroup);
});
await t.expectValidationError(
() => device.createPipelineLayout({ bindGroupLayouts }),
shouldError
);
}
},
kExtraLimits
);
});
@ -122,16 +150,21 @@ fn(async (t) => {
limitTest,
testValueName,
async ({ device, testValue, actualLimit, shouldError }) => {
t.skipIf(
bindGroupTest === 'sameGroup' && testValue > device.limits.maxBindingsPerBindGroup,
`can not test ${testValue} bindings in same group because maxBindingsPerBindGroup = ${device.limits.maxBindingsPerBindGroup}`
);
const code = getPerStageWGSLForBindingCombination(
bindingCombination,
order,
bindGroupTest,
(i, j) => `var u${j}_${i}: texture_2d<f32>`,
(i, j) => `_ = textureLoad(u${j}_${i}, vec2u(0), 0);`,
device.limits.maxBindGroups,
testValue
);
const module = device.createShaderModule({ code });
await t.testCreatePipeline(
pipelineType,
async,
@ -139,6 +172,7 @@ fn(async (t) => {
shouldError,
`actualLimit: ${actualLimit}, testValue: ${testValue}\n:${code}`
);
}
},
kExtraLimits
);
});

View file

@ -1,8 +1,9 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/import { range, reorder,
kReorderOrderKeys } from
kReorderOrderKeys,
assert } from
'../../../../../common/util/util.js';
import { kShaderStageCombinationsWithStage } from '../../../../capability_info.js';
@ -13,8 +14,14 @@ import {
kBindingCombinations,
getPipelineTypeForBindingCombination,
getPerStageWGSLForBindingCombination } from
'./limit_utils.js';
const kExtraLimits = {
maxBindingsPerBindGroup: 'adapterLimit',
maxBindGroups: 'adapterLimit'
};
const limit = 'maxSamplersPerShaderStage';
export const { g, description } = makeLimitTestGroup(limit);
@ -56,11 +63,17 @@ fn(async (t) => {
limitTest,
testValueName,
async ({ device, testValue, shouldError }) => {
t.skipIf(
t.adapter.limits.maxBindingsPerBindGroup < testValue,
`maxBindingsPerBindGroup = ${t.adapter.limits.maxBindingsPerBindGroup} which is less than ${testValue}`
);
await t.expectValidationError(
() => createBindGroupLayout(device, visibility, order, testValue),
shouldError
);
}
},
kExtraLimits
);
});
@ -83,18 +96,29 @@ fn(async (t) => {
await t.testDeviceWithRequestedMaximumLimits(
limitTest,
testValueName,
async ({ device, testValue, shouldError }) => {
const kNumGroups = 3;
async ({ device, testValue, shouldError, actualLimit }) => {
const maxBindingsPerBindGroup = Math.min(
t.device.limits.maxBindingsPerBindGroup,
actualLimit
);
const kNumGroups = Math.ceil(testValue / maxBindingsPerBindGroup);
// Not sure what to do in this case but best we get notified if it happens.
assert(kNumGroups <= t.device.limits.maxBindGroups);
const bindGroupLayouts = range(kNumGroups, (i) => {
const minInGroup = Math.floor(testValue / kNumGroups);
const numInGroup = i ? minInGroup : testValue - minInGroup * (kNumGroups - 1);
const numInGroup = Math.min(
testValue - i * maxBindingsPerBindGroup,
maxBindingsPerBindGroup
);
return createBindGroupLayout(device, visibility, order, numInGroup);
});
await t.expectValidationError(
() => device.createPipelineLayout({ bindGroupLayouts }),
shouldError
);
}
},
kExtraLimits
);
});
@ -122,14 +146,27 @@ fn(async (t) => {
limitTest,
testValueName,
async ({ device, testValue, actualLimit, shouldError }) => {
t.skipIf(
bindGroupTest === 'sameGroup' && testValue > device.limits.maxBindingsPerBindGroup,
`can not test ${testValue} bindings in same group because maxBindingsPerBindGroup = ${device.limits.maxBindingsPerBindGroup}`
);
// If this was false the texture binding would overlap the sampler bindings.
assert(testValue < device.limits.maxBindGroups * device.limits.maxBindingsPerBindGroup);
// Put the texture on the last possible binding.
const groupNdx = device.limits.maxBindGroups - 1;
const bindingNdx = device.limits.maxBindingsPerBindGroup - 1;
const code = getPerStageWGSLForBindingCombination(
bindingCombination,
order,
bindGroupTest,
(i, j) => `var u${j}_${i}: sampler`,
(i, j) => `_ = textureGather(0, tex, u${j}_${i}, vec2f(0));`,
device.limits.maxBindGroups,
testValue,
'@group(3) @binding(1) var tex: texture_2d<f32>;'
`@group(${groupNdx}) @binding(${bindingNdx}) var tex: texture_2d<f32>;`
);
const module = device.createShaderModule({ code });
@ -140,6 +177,7 @@ fn(async (t) => {
shouldError,
`actualLimit: ${actualLimit}, testValue: ${testValue}\n:${code}`
);
}
},
kExtraLimits
);
});

View file

@ -1,9 +1,11 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/import { range, reorder,
kReorderOrderKeys } from
kReorderOrderKeys,
assert } from
'../../../../../common/util/util.js';
import { kShaderStageCombinationsWithStage } from '../../../../capability_info.js';
import { GPUConst } from '../../../../constants.js';
import {
@ -13,8 +15,14 @@ import {
kBindingCombinations,
getPipelineTypeForBindingCombination,
getPerStageWGSLForBindingCombination } from
'./limit_utils.js';
const kExtraLimits = {
maxBindingsPerBindGroup: 'adapterLimit',
maxBindGroups: 'adapterLimit'
};
const limit = 'maxStorageBuffersPerShaderStage';
export const { g, description } = makeLimitTestGroup(limit);
@ -48,34 +56,31 @@ desc(
).
params(
kMaximumLimitBaseParams.
combine('visibility', [
GPUConst.ShaderStage.VERTEX,
GPUConst.ShaderStage.FRAGMENT,
GPUConst.ShaderStage.VERTEX | GPUConst.ShaderStage.FRAGMENT,
GPUConst.ShaderStage.COMPUTE,
GPUConst.ShaderStage.VERTEX | GPUConst.ShaderStage.COMPUTE,
GPUConst.ShaderStage.FRAGMENT | GPUConst.ShaderStage.COMPUTE,
GPUConst.ShaderStage.VERTEX | GPUConst.ShaderStage.FRAGMENT | GPUConst.ShaderStage.COMPUTE]
).
combine('visibility', kShaderStageCombinationsWithStage).
combine('type', ['storage', 'read-only-storage']).
combine('order', kReorderOrderKeys)
combine('order', kReorderOrderKeys).
filter(
({ visibility, type }) =>
(visibility & GPUConst.ShaderStage.VERTEX) === 0 || type !== 'storage'
)
).
fn(async (t) => {
const { limitTest, testValueName, visibility, order, type } = t.params;
if (visibility & GPUConst.ShaderStage.VERTEX && type === 'storage') {
// vertex stage does not support storage buffers
return;
}
await t.testDeviceWithRequestedMaximumLimits(
limitTest,
testValueName,
async ({ device, testValue, shouldError }) => {
t.skipIf(
t.adapter.limits.maxBindingsPerBindGroup < testValue,
`maxBindingsPerBindGroup = ${t.adapter.limits.maxBindingsPerBindGroup} which is less than ${testValue}`
);
await t.expectValidationError(() => {
createBindGroupLayout(device, visibility, type, order, testValue);
}, shouldError);
}
},
kExtraLimits
);
});
@ -90,41 +95,44 @@ desc(
).
params(
kMaximumLimitBaseParams.
combine('visibility', [
GPUConst.ShaderStage.VERTEX,
GPUConst.ShaderStage.FRAGMENT,
GPUConst.ShaderStage.VERTEX | GPUConst.ShaderStage.FRAGMENT,
GPUConst.ShaderStage.COMPUTE,
GPUConst.ShaderStage.VERTEX | GPUConst.ShaderStage.COMPUTE,
GPUConst.ShaderStage.FRAGMENT | GPUConst.ShaderStage.COMPUTE,
GPUConst.ShaderStage.VERTEX | GPUConst.ShaderStage.FRAGMENT | GPUConst.ShaderStage.COMPUTE]
).
combine('visibility', kShaderStageCombinationsWithStage).
combine('type', ['storage', 'read-only-storage']).
combine('order', kReorderOrderKeys)
combine('order', kReorderOrderKeys).
filter(
({ visibility, type }) =>
(visibility & GPUConst.ShaderStage.VERTEX) === 0 || type !== 'storage'
)
).
fn(async (t) => {
const { limitTest, testValueName, visibility, order, type } = t.params;
if (visibility & GPUConst.ShaderStage.VERTEX && type === 'storage') {
// vertex stage does not support storage buffers
return;
}
await t.testDeviceWithRequestedMaximumLimits(
limitTest,
testValueName,
async ({ device, testValue, shouldError }) => {
const kNumGroups = 3;
async ({ device, testValue, shouldError, actualLimit }) => {
const maxBindingsPerBindGroup = Math.min(
t.device.limits.maxBindingsPerBindGroup,
actualLimit
);
const kNumGroups = Math.ceil(testValue / maxBindingsPerBindGroup);
// Not sure what to do in this case but best we get notified if it happens.
assert(kNumGroups <= t.device.limits.maxBindGroups);
const bindGroupLayouts = range(kNumGroups, (i) => {
const minInGroup = Math.floor(testValue / kNumGroups);
const numInGroup = i ? minInGroup : testValue - minInGroup * (kNumGroups - 1);
const numInGroup = Math.min(
testValue - i * maxBindingsPerBindGroup,
maxBindingsPerBindGroup
);
return createBindGroupLayout(device, visibility, type, order, numInGroup);
});
await t.expectValidationError(
() => device.createPipelineLayout({ bindGroupLayouts }),
shouldError
);
}
},
kExtraLimits
);
});
@ -152,12 +160,18 @@ fn(async (t) => {
limitTest,
testValueName,
async ({ device, testValue, actualLimit, shouldError }) => {
t.skipIf(
bindGroupTest === 'sameGroup' && testValue > device.limits.maxBindingsPerBindGroup,
`can not test ${testValue} bindings in same group because maxBindingsPerBindGroup = ${device.limits.maxBindingsPerBindGroup}`
);
const code = getPerStageWGSLForBindingCombination(
bindingCombination,
order,
bindGroupTest,
(i, j) => `var<storage> u${j}_${i}: f32`,
(i, j) => `_ = u${j}_${i};`,
device.limits.maxBindGroups,
testValue
);
const module = device.createShaderModule({ code });
@ -169,6 +183,7 @@ fn(async (t) => {
shouldError,
`actualLimit: ${actualLimit}, testValue: ${testValue}\n:${code}`
);
}
},
kExtraLimits
);
});

View file

@ -2,7 +2,8 @@
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/import { range, reorder,
kReorderOrderKeys } from
kReorderOrderKeys,
assert } from
'../../../../../common/util/util.js';
import { GPUConst } from '../../../../constants.js';
@ -13,8 +14,14 @@ import {
getPerStageWGSLForBindingCombinationStorageTextures,
getPipelineTypeForBindingCombination } from
'./limit_utils.js';
const kExtraLimits = {
maxBindingsPerBindGroup: 'adapterLimit',
maxBindGroups: 'adapterLimit'
};
const limit = 'maxStorageTexturesPerShaderStage';
export const { g, description } = makeLimitTestGroup(limit);
@ -60,11 +67,17 @@ fn(async (t) => {
limitTest,
testValueName,
async ({ device, testValue, shouldError }) => {
t.skipIf(
t.adapter.limits.maxBindingsPerBindGroup < testValue,
`maxBindingsPerBindGroup = ${t.adapter.limits.maxBindingsPerBindGroup} which is less than ${testValue}`
);
await t.expectValidationError(
() => createBindGroupLayout(device, visibility, order, testValue),
shouldError
);
}
},
kExtraLimits
);
});
@ -91,18 +104,30 @@ fn(async (t) => {
await t.testDeviceWithRequestedMaximumLimits(
limitTest,
testValueName,
async ({ device, testValue, shouldError }) => {
const kNumGroups = 3;
async ({ device, testValue, shouldError, actualLimit }) => {
const maxBindingsPerBindGroup = Math.min(
t.device.limits.maxBindingsPerBindGroup,
actualLimit
);
const kNumGroups = Math.ceil(testValue / maxBindingsPerBindGroup);
// Not sure what to do in this case but best we get notified if it happens.
assert(kNumGroups <= t.device.limits.maxBindGroups);
const bindGroupLayouts = range(kNumGroups, (i) => {
const minInGroup = Math.floor(testValue / kNumGroups);
const numInGroup = i ? minInGroup : testValue - minInGroup * (kNumGroups - 1);
const numInGroup = Math.min(
testValue - i * maxBindingsPerBindGroup,
maxBindingsPerBindGroup
);
return createBindGroupLayout(device, visibility, order, numInGroup);
});
await t.expectValidationError(
() => device.createPipelineLayout({ bindGroupLayouts }),
shouldError
);
}
},
kExtraLimits
);
});
@ -130,6 +155,11 @@ fn(async (t) => {
limitTest,
testValueName,
async ({ device, testValue, actualLimit, shouldError }) => {
t.skipIf(
bindGroupTest === 'sameGroup' && testValue > device.limits.maxBindingsPerBindGroup,
`can not test ${testValue} bindings in same group because maxBindingsPerBindGroup = ${device.limits.maxBindingsPerBindGroup}`
);
if (bindingCombination === 'fragment') {
return;
}
@ -140,6 +170,7 @@ fn(async (t) => {
bindGroupTest,
(i, j) => `var u${j}_${i}: texture_storage_2d<rgba8unorm, write>`,
(i, j) => `textureStore(u${j}_${i}, vec2u(0), vec4f(1));`,
device.limits.maxBindGroups,
testValue
);
const module = device.createShaderModule({ code });
@ -151,6 +182,7 @@ fn(async (t) => {
shouldError,
`actualLimit: ${actualLimit}, testValue: ${testValue}\n:${code}`
);
}
},
kExtraLimits
);
});

View file

@ -1,8 +1,9 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/import { range, reorder,
kReorderOrderKeys } from
kReorderOrderKeys,
assert } from
'../../../../../common/util/util.js';
import { kShaderStageCombinationsWithStage } from '../../../../capability_info.js';
@ -13,8 +14,14 @@ import {
kBindingCombinations,
getPipelineTypeForBindingCombination,
getPerStageWGSLForBindingCombination } from
'./limit_utils.js';
const kExtraLimits = {
maxBindingsPerBindGroup: 'adapterLimit',
maxBindGroups: 'adapterLimit'
};
const limit = 'maxUniformBuffersPerShaderStage';
export const { g, description } = makeLimitTestGroup(limit);
@ -56,11 +63,17 @@ fn(async (t) => {
limitTest,
testValueName,
async ({ device, testValue, shouldError }) => {
t.skipIf(
t.adapter.limits.maxBindingsPerBindGroup < testValue,
`maxBindingsPerBindGroup = ${t.adapter.limits.maxBindingsPerBindGroup} which is less than ${testValue}`
);
await t.expectValidationError(
() => createBindGroupLayout(device, visibility, order, testValue),
shouldError
);
}
},
kExtraLimits
);
});
@ -83,18 +96,30 @@ fn(async (t) => {
await t.testDeviceWithRequestedMaximumLimits(
limitTest,
testValueName,
async ({ device, testValue, shouldError }) => {
const kNumGroups = 3;
async ({ device, testValue, shouldError, actualLimit }) => {
const maxBindingsPerBindGroup = Math.min(
t.device.limits.maxBindingsPerBindGroup,
actualLimit
);
const kNumGroups = Math.ceil(testValue / maxBindingsPerBindGroup);
// Not sure what to do in this case but best we get notified if it happens.
assert(kNumGroups <= t.device.limits.maxBindGroups);
const bindGroupLayouts = range(kNumGroups, (i) => {
const minInGroup = Math.floor(testValue / kNumGroups);
const numInGroup = i ? minInGroup : testValue - minInGroup * (kNumGroups - 1);
const numInGroup = Math.min(
testValue - i * maxBindingsPerBindGroup,
maxBindingsPerBindGroup
);
return createBindGroupLayout(device, visibility, order, numInGroup);
});
await t.expectValidationError(
() => device.createPipelineLayout({ bindGroupLayouts }),
shouldError
);
}
},
kExtraLimits
);
});
@ -122,12 +147,18 @@ fn(async (t) => {
limitTest,
testValueName,
async ({ device, testValue, actualLimit, shouldError }) => {
t.skipIf(
bindGroupTest === 'sameGroup' && testValue > device.limits.maxBindingsPerBindGroup,
`can not test ${testValue} bindings in same group because maxBindingsPerBindGroup = ${device.limits.maxBindingsPerBindGroup}`
);
const code = getPerStageWGSLForBindingCombination(
bindingCombination,
order,
bindGroupTest,
(i, j) => `var<uniform> u${j}_${i}: f32`,
(i, j) => `_ = u${j}_${i};`,
device.limits.maxBindGroups,
testValue
);
const module = device.createShaderModule({ code });
@ -139,6 +170,7 @@ fn(async (t) => {
shouldError,
`actualLimit: ${actualLimit}, testValue: ${testValue}\n:${code}`
);
}
},
kExtraLimits
);
});

View file

@ -594,6 +594,7 @@ params((u) =>
u.
combine('dimension', [undefined, '2d']).
combine('format', kCompressedTextureFormats).
beginSubcases().
expand('sizeVariant', (p) => {
const { blockWidth, blockHeight } = kTextureFormatInfo[p.format];
return [
@ -1105,9 +1106,7 @@ fn((t) => {
t.skipIfTextureFormatNotSupported(format, viewFormat);
const compatible = t.isCompatibility ?
viewFormat === format :
viewCompatible(format, viewFormat);
const compatible = viewCompatible(t.isCompatibility, format, viewFormat);
// Test the viewFormat in the list.
t.expectValidationError(() => {

View file

@ -55,10 +55,9 @@ fn((t) => {
const { blockWidth, blockHeight } = kTextureFormatInfo[textureFormat];
t.skipIfTextureFormatNotSupported(textureFormat, viewFormat);
// Compatibility mode does not support format reinterpretation.
t.skipIf(t.isCompatibility && viewFormat !== undefined && viewFormat !== textureFormat);
const compatible = viewFormat === undefined || viewCompatible(textureFormat, viewFormat);
const compatible =
viewFormat === undefined || viewCompatible(t.isCompatibility, textureFormat, viewFormat);
const texture = t.device.createTexture({
format: textureFormat,

View file

@ -32,7 +32,8 @@ class ErrorScopeTests extends Fixture {
const device = this.trackForCleanup(
await adapter.requestDevice({
requiredLimits: {
maxTextureDimension2D: adapter.limits.maxTextureDimension2D
maxTextureDimension2D: adapter.limits.maxTextureDimension2D,
maxTextureArrayLayers: adapter.limits.maxTextureArrayLayers
}
})
);

View file

@ -86,10 +86,7 @@ fn(async (t) => {
sourceType === 'VideoFrame' ?
await getVideoFrameFromVideoElement(t, videoElement) :
videoElement;
externalTexture = t.device.importExternalTexture({
source: source
});
externalTexture = t.device.importExternalTexture({ source });
bindGroup = t.device.createBindGroup({
layout: t.getDefaultBindGroupLayout(),
@ -99,10 +96,7 @@ fn(async (t) => {
t.submitCommandBuffer(bindGroup, true);
// Import again in the same task scope should return same object.
const mayBeTheSameExternalTexture = t.device.importExternalTexture({
source: source
});
const mayBeTheSameExternalTexture = t.device.importExternalTexture({ source });
if (externalTexture === mayBeTheSameExternalTexture) {
t.submitCommandBuffer(bindGroup, true);
@ -142,10 +136,7 @@ fn(async (t) => {
// Import GPUExternalTexture
queueMicrotask(() => {
externalTexture = t.device.importExternalTexture({
source: source
});
externalTexture = t.device.importExternalTexture({ source });
});
// Submit GPUExternalTexture
@ -182,10 +173,7 @@ fn(async (t) => {
sourceType === 'VideoFrame' ?
await getVideoFrameFromVideoElement(t, videoElement) :
videoElement;
externalTexture = t.device.importExternalTexture({
source: source
});
externalTexture = t.device.importExternalTexture({ source });
bindGroup = t.device.createBindGroup({
layout: t.getDefaultBindGroupLayout(),
@ -218,10 +206,7 @@ fn(async (t) => {
let source;
await startPlayingAndWaitForVideo(videoElement, () => {
source = videoElement;
externalTexture = t.device.importExternalTexture({
source: source
});
externalTexture = t.device.importExternalTexture({ source });
bindGroup = t.device.createBindGroup({
layout: t.getDefaultBindGroupLayout(),
@ -232,10 +217,7 @@ fn(async (t) => {
});
await waitForNextTask(() => {
const mayBeTheSameExternalTexture = t.device.importExternalTexture({
source: source
});
const mayBeTheSameExternalTexture = t.device.importExternalTexture({ source });
if (externalTexture === mayBeTheSameExternalTexture) {
// ImportExternalTexture should refresh expired GPUExternalTexture.
@ -264,10 +246,7 @@ fn(async (t) => {
let externalTexture;
await startPlayingAndWaitForVideo(videoElement, async () => {
const source = await getVideoFrameFromVideoElement(t, videoElement);
externalTexture = t.device.importExternalTexture({
source: source
});
externalTexture = t.device.importExternalTexture({ source });
bindGroup = t.device.createBindGroup({
layout: t.getDefaultBindGroupLayout(),

View file

@ -3,12 +3,291 @@
**/export const description = `
TODO:
- interface matching between pipeline layout and shader
- x= {compute, vertex, fragment, vertex+fragment}, visibilities
- x= bind group index values, binding index values, multiple bindings
- x= types of bindings
- x= {equal, superset, subset}
- x= {superset, subset}
`;import { makeTestGroup } from '../../../common/framework/test_group.js';
import {
kShaderStageCombinations,
kShaderStages } from
'../../capability_info.js';
import { GPUConst } from '../../constants.js';
import { ValidationTest } from './validation_test.js';
export const g = makeTestGroup(ValidationTest);
const kBindableResources = [
'uniformBuf',
'storageBuf',
'readonlyStorageBuf',
'filtSamp',
'nonFiltSamp',
'compareSamp',
'sampledTex',
'sampledTexMS',
'readonlyStorageTex',
'writeonlyStorageTex',
'readwriteStorageTex'];
const bindGroupLayoutEntryContents = {
compareSamp: {
sampler: {
type: 'comparison'
}
},
filtSamp: {
sampler: {
type: 'filtering'
}
},
nonFiltSamp: {
sampler: {
type: 'non-filtering'
}
},
sampledTex: {
texture: {
sampleType: 'unfilterable-float'
}
},
sampledTexMS: {
texture: {
sampleType: 'unfilterable-float',
multisampled: true
}
},
storageBuf: {
buffer: {
type: 'storage'
}
},
readonlyStorageBuf: {
buffer: {
type: 'read-only-storage'
}
},
uniformBuf: {
buffer: {
type: 'uniform'
}
},
readonlyStorageTex: {
storageTexture: {
format: 'r32float',
access: 'read-only'
}
},
writeonlyStorageTex: {
storageTexture: {
format: 'r32float',
access: 'write-only'
}
},
readwriteStorageTex: {
storageTexture: {
format: 'r32float',
access: 'read-write'
}
}
};
class F extends ValidationTest {
createPipelineLayout(
bindingInPipelineLayout,
visibility)
{
return this.device.createPipelineLayout({
bindGroupLayouts: [
this.device.createBindGroupLayout({
entries: [
{
binding: 0,
visibility,
...bindGroupLayoutEntryContents[bindingInPipelineLayout]
}]
})]
});
}
GetBindableResourceShaderDeclaration(bindableResource) {
switch (bindableResource) {
case 'compareSamp':
return 'var tmp : sampler_comparison';
case 'filtSamp':
case 'nonFiltSamp':
return 'var tmp : sampler';
case 'sampledTex':
return 'var tmp : texture_2d<f32>';
case 'sampledTexMS':
return 'var tmp : texture_multisampled_2d<f32>';
case 'storageBuf':
return 'var<storage, read_write> tmp : vec4u';
case 'readonlyStorageBuf':
return 'var<storage, read> tmp : vec4u';
case 'uniformBuf':
return 'var<uniform> tmp : vec4u;';
case 'readonlyStorageTex':
return 'var tmp : texture_storage_2d<r32float, read>';
case 'writeonlyStorageTex':
return 'var tmp : texture_storage_2d<r32float, write>';
case 'readwriteStorageTex':
return 'var tmp : texture_storage_2d<r32float, read_write>';
}
}
}
const BindingResourceCompatibleWithShaderStages = function (
bindingResource,
shaderStages)
{
if ((shaderStages & GPUConst.ShaderStage.VERTEX) > 0) {
switch (bindingResource) {
case 'writeonlyStorageTex':
case 'readwriteStorageTex':
case 'storageBuf':
return false;
default:
break;
}
}
return true;
};
export const g = makeTestGroup(F);
g.test('pipeline_layout_shader_exact_match').
desc(
`
Test that the binding type in the pipeline layout must match the related declaration in shader.
Note that read-write storage textures in the pipeline layout can match write-only storage textures
in the shader.
`
).
params((u) =>
u.
combine('bindingInPipelineLayout', kBindableResources).
combine('bindingInShader', kBindableResources).
beginSubcases().
combine('pipelineLayoutVisibility', kShaderStageCombinations).
combine('shaderStageWithBinding', kShaderStages).
combine('isBindingStaticallyUsed', [true, false]).
unless(
(p) =>
// We don't test using non-filtering sampler in shader because it has the same declaration
// as filtering sampler.
p.bindingInShader === 'nonFiltSamp' ||
!BindingResourceCompatibleWithShaderStages(
p.bindingInPipelineLayout,
p.pipelineLayoutVisibility
) ||
!BindingResourceCompatibleWithShaderStages(p.bindingInShader, p.shaderStageWithBinding)
)
).
fn((t) => {
const {
bindingInPipelineLayout,
bindingInShader,
pipelineLayoutVisibility,
shaderStageWithBinding,
isBindingStaticallyUsed
} = t.params;
const layout = t.createPipelineLayout(bindingInPipelineLayout, pipelineLayoutVisibility);
const bindResourceDeclaration = `@group(0) @binding(0) ${t.GetBindableResourceShaderDeclaration(
bindingInShader
)}`;
const staticallyUseBinding = isBindingStaticallyUsed ? '_ = tmp; ' : '';
const isAsync = false;
let success = true;
if (isBindingStaticallyUsed) {
success = bindingInPipelineLayout === bindingInShader;
// Filtering and non-filtering both have the same shader declaration.
success ||= bindingInPipelineLayout === 'nonFiltSamp' && bindingInShader === 'filtSamp';
// Promoting storage textures that are read-write in the layout can be readonly in the shader.
success ||=
bindingInPipelineLayout === 'readwriteStorageTex' &&
bindingInShader === 'writeonlyStorageTex';
// The shader using the resource must be included in the visibility in the layout.
success &&= (pipelineLayoutVisibility & shaderStageWithBinding) > 0;
}
switch (shaderStageWithBinding) {
case GPUConst.ShaderStage.COMPUTE:{
const computeShader = `
${bindResourceDeclaration};
@compute @workgroup_size(1)
fn main() {
${staticallyUseBinding}
}
`;
t.doCreateComputePipelineTest(isAsync, success, {
layout,
compute: {
module: t.device.createShaderModule({
code: computeShader
})
}
});
break;
}
case GPUConst.ShaderStage.VERTEX:{
const vertexShader = `
${bindResourceDeclaration};
@vertex
fn main() -> @builtin(position) vec4f {
${staticallyUseBinding}
return vec4f();
}
`;
t.doCreateRenderPipelineTest(isAsync, success, {
layout,
vertex: {
module: t.device.createShaderModule({
code: vertexShader
})
}
});
break;
}
case GPUConst.ShaderStage.FRAGMENT:{
const fragmentShader = `
${bindResourceDeclaration};
@fragment
fn main() -> @location(0) vec4f {
${staticallyUseBinding}
return vec4f();
}
`;
t.doCreateRenderPipelineTest(isAsync, success, {
layout,
vertex: {
module: t.device.createShaderModule({
code: `
@vertex
fn main() -> @builtin(position) vec4f {
return vec4f();
}`
})
},
fragment: {
module: t.device.createShaderModule({
code: fragmentShader
}),
targets: [
{
format: 'rgba8unorm'
}]
}
});
break;
}
}
});

View file

@ -164,16 +164,17 @@ Tests that using a destroyed texture referenced by a bindGroup set with setBindG
paramsSubcasesOnly((u) =>
u.
combine('destroyed', [false, true]).
combine('encoderType', ['compute pass', 'render pass', 'render bundle'])
combine('encoderType', ['compute pass', 'render pass', 'render bundle']).
combine('bindingType', ['texture', 'storageTexture'])
).
fn((t) => {
const { destroyed, encoderType } = t.params;
const { destroyed, encoderType, bindingType } = t.params;
const { device } = t;
const texture = t.trackForCleanup(
t.device.createTexture({
size: [1, 1, 1],
format: 'rgba8unorm',
usage: GPUTextureUsage.TEXTURE_BINDING
usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.STORAGE_BINDING
})
);
@ -182,7 +183,9 @@ fn((t) => {
{
binding: 0,
visibility: GPUShaderStage.COMPUTE,
texture: {}
[bindingType]: {
format: texture.format
}
}]
});

View file

@ -26,10 +26,12 @@ class F extends ValidationTest {
{})
{
const {
format = 'rgba8unorm',
dimension = '2d',
width = 16,
height = 16,
arrayLayerCount = 1,
@ -41,6 +43,7 @@ class F extends ValidationTest {
return this.device.createTexture({
size: { width, height, depthOrArrayLayers: arrayLayerCount },
format,
dimension,
mipLevelCount,
sampleCount,
usage
@ -90,6 +93,7 @@ class F extends ValidationTest {
}
export const g = makeTestGroup(F);
const kArrayLayerCount = 10;
g.test('attachments,one_color_attachment').
desc(`Test that a render pass works with only one color attachment.`).
@ -278,6 +282,184 @@ fn((t) => {
t.tryRenderPass(success, { colorAttachments });
});
g.test('color_attachments,depthSlice,definedness').
desc(
`
Test that depthSlice must be undefined for 2d color attachments and defined for 3d color attachments."
- The special value '0xFFFFFFFF' is not treated as 'undefined'.
`
).
params((u) =>
u.
combine('dimension', ['2d', '3d']).
beginSubcases().
combine('depthSlice', [undefined, 0, 0xffffffff])
).
fn((t) => {
const { dimension, depthSlice } = t.params;
const texture = t.createTexture({ dimension });
const colorAttachment = t.getColorAttachment(texture);
if (depthSlice !== undefined) {
colorAttachment.depthSlice = depthSlice;
}
const descriptor = {
colorAttachments: [colorAttachment]
};
const success =
dimension === '2d' && depthSlice === undefined || dimension === '3d' && depthSlice === 0;
t.tryRenderPass(success, descriptor);
});
g.test('color_attachments,depthSlice,bound_check').
desc(
`
Test that depthSlice must be less than the depthOrArrayLayers of 3d texture's subresource at mip levels.
- Check depth bounds with 3d texture size [16, 1, 10], which has 5 mip levels with depth [10, 5, 2, 1, 1]
for testing more mip level size computation.
- Failed if depthSlice >= the depth of each mip level.
`
).
params((u) =>
u.
combine('mipLevel', [0, 1, 2, 3, 4]).
beginSubcases().
expand('depthSlice', ({ mipLevel }) => {
const depthAtMipLevel = Math.max(kArrayLayerCount >> mipLevel, 1);
// Use Set() to exclude duplicates when the depthAtMipLevel is 1 and 2
return [...new Set([0, 1, depthAtMipLevel - 1, depthAtMipLevel])];
})
).
fn((t) => {
const { mipLevel, depthSlice } = t.params;
const texture = t.createTexture({
dimension: '3d',
width: 16,
height: 1,
arrayLayerCount: kArrayLayerCount,
mipLevelCount: mipLevel + 1
});
const viewDescriptor = {
baseMipLevel: mipLevel,
mipLevelCount: 1,
baseArrayLayer: 0,
arrayLayerCount: 1
};
const colorAttachment = t.getColorAttachment(texture, viewDescriptor);
colorAttachment.depthSlice = depthSlice;
const passDescriptor = {
colorAttachments: [colorAttachment]
};
const success = depthSlice < Math.max(kArrayLayerCount >> mipLevel, 1);
t.tryRenderPass(success, passDescriptor);
});
g.test('color_attachments,depthSlice,overlaps,same_miplevel').
desc(
`
Test that the depth slices of 3d color attachments have no overlaps for same texture in a render
pass.
- Succeed if the depth slices are different, or from different textures, or on different render
passes.
- Fail if same depth slice from same texture's same mip level is overwritten in a render pass.
`
).
params((u) =>
u.
combine('sameDepthSlice', [true, false]).
beginSubcases().
combine('sameTexture', [true, false]).
combine('samePass', [true, false])
).
fn((t) => {
const { sameDepthSlice, sameTexture, samePass } = t.params;
const arrayLayerCount = 4;
const texDescriptor = {
dimension: '3d',
arrayLayerCount
};
const texture = t.createTexture(texDescriptor);
const colorAttachments = [];
for (let i = 0; i < arrayLayerCount; i++) {
const colorAttachment = t.getColorAttachment(
sameTexture ? texture : t.createTexture(texDescriptor)
);
colorAttachment.depthSlice = sameDepthSlice ? 0 : i;
colorAttachments.push(colorAttachment);
}
const encoder = t.createEncoder('non-pass');
if (samePass) {
const pass = encoder.encoder.beginRenderPass({ colorAttachments });
pass.end();
} else {
for (let i = 0; i < arrayLayerCount; i++) {
const pass = encoder.encoder.beginRenderPass({ colorAttachments: [colorAttachments[i]] });
pass.end();
}
}
const success = !sameDepthSlice || !sameTexture || !samePass;
encoder.validateFinish(success);
});
g.test('color_attachments,depthSlice,overlaps,diff_miplevel').
desc(
`
Test that the same depth slice from different mip levels of a 3d texture with size [1, 1, N] can
be set in a render pass's color attachments.
`
).
params((u) => u.combine('sameMipLevel', [true, false])).
fn((t) => {
const { sameMipLevel } = t.params;
const mipLevelCount = 4;
const texDescriptor = {
dimension: '3d',
width: 1,
height: 1,
arrayLayerCount: 1 << mipLevelCount,
mipLevelCount
};
const texture = t.createTexture(texDescriptor);
const viewDescriptor = {
baseMipLevel: 0,
mipLevelCount: 1,
baseArrayLayer: 0,
arrayLayerCount: 1
};
const colorAttachments = [];
for (let i = 0; i < mipLevelCount; i++) {
if (!sameMipLevel) {
viewDescriptor.baseMipLevel = i;
}
const colorAttachment = t.getColorAttachment(texture, viewDescriptor);
colorAttachment.depthSlice = 0;
colorAttachments.push(colorAttachment);
}
const encoder = t.createEncoder('non-pass');
const pass = encoder.encoder.beginRenderPass({ colorAttachments });
pass.end();
encoder.validateFinish(!sameMipLevel);
});
g.test('attachments,same_size').
desc(
`

View file

@ -97,8 +97,18 @@ fn((t) => {
});
g.test('location,superset').
desc(`TODO: implement after spec is settled: https://github.com/gpuweb/gpuweb/issues/2038`).
unimplemented();
desc(`Tests that validation should succeed when vertex output is superset of fragment input`).
params((u) => u.combine('isAsync', [false, true])).
fn((t) => {
const { isAsync } = t.params;
const descriptor = t.getDescriptorWithStates(
t.getVertexStateWithOutputs(['@location(0) vout0: f32', '@location(1) vout1: f32']),
t.getFragmentStateWithInputs(['@location(1) fin1: f32'])
);
t.doCreateRenderPipelineTest(isAsync, true, descriptor);
});
g.test('location,subset').
desc(`Tests that validation should fail when vertex output is a subset of fragment input.`).
@ -292,17 +302,20 @@ desc(
params((u) =>
u.combine('isAsync', [false, true]).combineWithParams([
// Number of user-defined input scalar components in test shader = device.limits.maxInterStageShaderComponents + numScalarDelta.
{ numScalarDelta: 0, useExtraBuiltinInputs: false, _success: true },
{ numScalarDelta: 1, useExtraBuiltinInputs: false, _success: false },
{ numScalarDelta: 0, useExtraBuiltinInputs: true, _success: false },
{ numScalarDelta: -3, useExtraBuiltinInputs: true, _success: true },
{ numScalarDelta: -2, useExtraBuiltinInputs: true, _success: false }]
{ numScalarDelta: 0, useExtraBuiltinInputs: false },
{ numScalarDelta: 1, useExtraBuiltinInputs: false },
{ numScalarDelta: 0, useExtraBuiltinInputs: true },
{ numScalarDelta: -3, useExtraBuiltinInputs: true },
{ numScalarDelta: -2, useExtraBuiltinInputs: true }]
)
).
fn((t) => {
const { isAsync, numScalarDelta, useExtraBuiltinInputs, _success } = t.params;
const { isAsync, numScalarDelta, useExtraBuiltinInputs } = t.params;
const numScalarComponents = t.device.limits.maxInterStageShaderComponents + numScalarDelta;
const numExtraComponents = useExtraBuiltinInputs ? t.isCompatibility ? 2 : 3 : 0;
const numUsedComponents = numScalarComponents + numExtraComponents;
const success = numUsedComponents <= t.device.limits.maxInterStageShaderComponents;
const numVec4 = Math.floor(numScalarComponents / 4);
const numTrailingScalars = numScalarComponents % 4;
@ -319,9 +332,11 @@ fn((t) => {
if (useExtraBuiltinInputs) {
inputs.push(
'@builtin(front_facing) front_facing_in: bool',
'@builtin(sample_index) sample_index_in: u32',
'@builtin(sample_mask) sample_mask_in: u32'
);
if (!t.isCompatibility) {
inputs.push('@builtin(sample_index) sample_index_in: u32');
}
}
const descriptor = t.getDescriptorWithStates(
@ -329,5 +344,5 @@ fn((t) => {
t.getFragmentStateWithInputs(inputs, true)
);
t.doCreateRenderPipelineTest(isAsync, _success, descriptor);
t.doCreateRenderPipelineTest(isAsync, success, descriptor);
});

View file

@ -1230,7 +1230,7 @@ fn((t) => {
callDrawOrDispatch
} = t.params;
if (writableUsage === 'readwrite-storage-texture') {
t.requireLanguageFeatureOrSkipTestCase('readonly_and_readwrite_storage_textures');
t.skipIfLanguageFeatureNotSupported('readonly_and_readwrite_storage_textures');
}
const view = t.

View file

@ -290,7 +290,8 @@ combineWithParams([
{ bgLayer: 1, bgLayerCount: 2 }]
).
beginSubcases().
combine('dsReadOnly', [true, false]).
combine('depthReadOnly', [true, false]).
combine('stencilReadOnly', [true, false]).
combine('bgAspect', ['depth-only', 'stencil-only']).
combine('inSamePass', [true, false])
).
@ -302,7 +303,8 @@ fn((t) => {
bgLevelCount,
bgLayer,
bgLayerCount,
dsReadOnly,
depthReadOnly,
stencilReadOnly,
bgAspect,
inSamePass
} = t.params;
@ -333,12 +335,12 @@ fn((t) => {
});
const depthStencilAttachment = {
view: attachmentView,
depthReadOnly: dsReadOnly,
depthLoadOp: dsReadOnly ? undefined : 'load',
depthStoreOp: dsReadOnly ? undefined : 'store',
stencilReadOnly: dsReadOnly,
stencilLoadOp: dsReadOnly ? undefined : 'load',
stencilStoreOp: dsReadOnly ? undefined : 'store'
depthReadOnly,
depthLoadOp: depthReadOnly ? undefined : 'load',
depthStoreOp: depthReadOnly ? undefined : 'store',
stencilReadOnly,
stencilLoadOp: stencilReadOnly ? undefined : 'load',
stencilStoreOp: stencilReadOnly ? undefined : 'store'
};
const encoder = t.device.createCommandEncoder();
@ -379,8 +381,11 @@ fn((t) => {
bgLayer + bgLayerCount - 1
);
const isNotOverlapped = isMipLevelNotOverlapped || isArrayLayerNotOverlapped;
const readonly =
bgAspect === 'stencil-only' && stencilReadOnly ||
bgAspect === 'depth-only' && depthReadOnly;
const success = !inSamePass || isNotOverlapped || dsReadOnly;
const success = !inSamePass || isNotOverlapped || readonly;
t.expectValidationError(() => {
encoder.finish();
}, !success);
@ -424,6 +429,7 @@ unless(
t.bgUsage0 !== 'sampled-texture' && t.bg0Levels.count > 1 ||
t.bgUsage1 !== 'sampled-texture' && t.bg1Levels.count > 1
).
beginSubcases().
combine('inSamePass', [true, false])
).
fn((t) => {

View file

@ -238,7 +238,7 @@ fn((t) => {
textureUsage0 === 'readwrite-storage-texture' ||
textureUsage1 === 'readwrite-storage-texture')
{
t.requireLanguageFeatureOrSkipTestCase('readonly_and_readwrite_storage_textures');
t.skipIfLanguageFeatureNotSupported('readonly_and_readwrite_storage_textures');
}
const texture0 = t.device.createTexture({

View file

@ -704,10 +704,7 @@ fn(async (t) => {
entries: [
{
binding: 0,
resource: t.device.importExternalTexture({
source: source
})
resource: t.device.importExternalTexture({ source })
}]
});

View file

@ -820,4 +820,5 @@ export const kFeatureNames = keysOf(kFeatureNameInfo);
export const kKnownWGSLLanguageFeatures = [
'readonly_and_readwrite_storage_textures',
'packed_4x8_integer_dot_product',
'unrestricted_pointer_parameters'];
'unrestricted_pointer_parameters',
'pointer_composite_access'];

View file

@ -0,0 +1,34 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/export const description = `
Tests that, in compat mode, you can not create a bind group layout with unsupported storage texture formats.
`;import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { kCompatModeUnsupportedStorageTextureFormats } from '../../../format_info.js';
import { CompatibilityTest } from '../../compatibility_test.js';
export const g = makeTestGroup(CompatibilityTest);
g.test('unsupportedStorageTextureFormats').
desc(
`
Tests that, in compat mode, you can not create a bind group layout with unsupported storage texture formats.
`
).
params((u) => u.combine('format', kCompatModeUnsupportedStorageTextureFormats)).
fn((t) => {
const { format } = t.params;
t.expectValidationError(() => {
t.device.createBindGroupLayout({
entries: [
{
binding: 0,
visibility: GPUShaderStage.COMPUTE,
storageTexture: {
format
}
}]
});
}, true);
});

View file

@ -3,15 +3,17 @@
**/export const description = `
Tests limitations of copyTextureToTextures in compat mode.
`;import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
import { kCompressedTextureFormats, kTextureFormatInfo } from '../../../../../format_info.js';
import {
kAllTextureFormats,
kCompressedTextureFormats,
kTextureFormatInfo } from
'../../../../../format_info.js';
import { CompatibilityTest } from '../../../../compatibility_test.js';
export const g = makeTestGroup(CompatibilityTest);
g.test('compressed').
desc(
`Tests that you can not call copyTextureToTextures with compressed textures in compat mode.`
).
desc(`Tests that you can not call copyTextureToTexture with compressed textures in compat mode.`).
params((u) => u.combine('format', kCompressedTextureFormats)).
beforeAllSubcases((t) => {
const { format } = t.params;
@ -36,6 +38,50 @@ fn((t) => {
});
t.trackForCleanup(dstTexture);
const encoder = t.device.createCommandEncoder();
encoder.copyTextureToTexture({ texture: srcTexture }, { texture: dstTexture }, [
blockWidth,
blockHeight,
1]
);
t.expectGPUError('validation', () => {
encoder.finish();
});
});
g.test('multisample').
desc(`Test that you can not call copyTextureToTexture with multisample textures in compat mode.`).
params((u) =>
u.
beginSubcases().
combine('format', kAllTextureFormats).
filter(({ format }) => {
const info = kTextureFormatInfo[format];
return info.multisample && !info.feature;
})
).
fn((t) => {
const { format } = t.params;
const { blockWidth, blockHeight } = kTextureFormatInfo[format];
t.skipIfTextureFormatNotSupported(format);
const srcTexture = t.device.createTexture({
size: [blockWidth, blockHeight, 1],
format,
sampleCount: 4,
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT
});
t.trackForCleanup(srcTexture);
const dstTexture = t.device.createTexture({
size: [blockWidth, blockHeight, 1],
format,
sampleCount: 4,
usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT
});
t.trackForCleanup(dstTexture);
const encoder = t.device.createCommandEncoder();
encoder.copyTextureToTexture({ texture: srcTexture }, { texture: dstTexture }, [
blockWidth,

View file

@ -0,0 +1,53 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/export const description = `
Tests that depthBiasClamp must be zero in compat mode.
`;import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { CompatibilityTest } from '../../../compatibility_test.js';
export const g = makeTestGroup(CompatibilityTest);
g.test('depthBiasClamp').
desc('Tests that depthBiasClamp must be zero in compat mode.').
params((u) =>
u //
.combine('depthBiasClamp', [undefined, 0, 0.1, 1]).
combine('async', [false, true])
).
fn((t) => {
const { depthBiasClamp, async } = t.params;
const module = t.device.createShaderModule({
code: `
@vertex fn vs() -> @builtin(position) vec4f {
return vec4f(0);
}
@fragment fn fs() -> @location(0) vec4f {
return vec4f(0);
}
`
});
const pipelineDescriptor = {
layout: 'auto',
vertex: {
module,
entryPoint: 'vs'
},
fragment: {
module,
entryPoint: 'fs',
targets: [{ format: 'rgba8unorm' }]
},
depthStencil: {
format: 'depth24plus',
depthWriteEnabled: true,
depthCompare: 'always',
...(depthBiasClamp !== undefined && { depthBiasClamp })
}
};
const success = !depthBiasClamp;
t.doCreateRenderPipelineTest(async, success, pipelineDescriptor);
});

View file

@ -3,6 +3,7 @@
**/export const description = `
Tests limitations of createRenderPipeline related to shader modules in compat mode.
`;import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { kCompatModeUnsupportedStorageTextureFormats } from '../../../../format_info.js';
import { CompatibilityTest } from '../../../compatibility_test.js';
export const g = makeTestGroup(CompatibilityTest);
@ -73,6 +74,65 @@ fn((t) => {
);
});
g.test('sample_index').
desc(
`
Tests that you can not create a render pipeline with a shader module that uses sample_index in compat mode.
- Test that a pipeline with a shader that uses sample_index fails.
- Test that a pipeline that references a module that has a shader that uses sample_index
but the pipeline does not reference that shader succeeds.
`
).
params((u) =>
u.combine('entryPoint', ['fsWithoutSampleIndexUsage', 'fsWithSampleIndexUsage'])
).
fn((t) => {
const { entryPoint } = t.params;
const module = t.device.createShaderModule({
code: `
@vertex fn vs() -> @builtin(position) vec4f {
return vec4f(1);
}
@fragment fn fsWithoutSampleIndexUsage() -> @location(0) vec4f {
return vec4f(0);
}
@fragment fn fsWithSampleIndexUsage(@builtin(sample_index) sampleIndex: u32) -> @location(0) vec4f {
_ = sampleIndex;
return vec4f(0);
}
`
});
const pipelineDescriptor = {
layout: 'auto',
vertex: {
module,
entryPoint: 'vs'
},
fragment: {
module,
entryPoint,
targets: [
{
format: 'rgba8unorm'
}]
},
multisample: {
count: 4
}
};
const isValid = entryPoint === 'fsWithoutSampleIndexUsage';
t.expectGPUError(
'validation',
() => t.device.createRenderPipeline(pipelineDescriptor),
!isValid
);
});
g.test('interpolate').
desc(
`
@ -151,4 +211,71 @@ fn((t) => {
() => t.device.createRenderPipeline(pipelineDescriptor),
!isValid
);
});
g.test('unsupportedStorageTextureFormats,computePipeline').
desc(
`
Tests that you can not create a compute pipeline with unsupported storage texture formats in compat mode.
`
).
params((u) =>
u //
.combine('format', kCompatModeUnsupportedStorageTextureFormats).
combine('async', [false, true])
).
fn((t) => {
const { format, async } = t.params;
const module = t.device.createShaderModule({
code: `
@group(0) @binding(0) var s: texture_storage_2d<${format}, read>;
@compute @workgroup_size(1) fn cs() {
_ = textureLoad(s, vec2u(0));
}
`
});
const pipelineDescriptor = {
layout: 'auto',
compute: {
module,
entryPoint: 'cs'
}
};
t.doCreateComputePipelineTest(async, false, pipelineDescriptor);
});
g.test('unsupportedStorageTextureFormats,renderPipeline').
desc(
`
Tests that you can not create a render pipeline with unsupported storage texture formats in compat mode.
`
).
params((u) =>
u //
.combine('format', kCompatModeUnsupportedStorageTextureFormats).
combine('async', [false, true])
).
fn((t) => {
const { format, async } = t.params;
const module = t.device.createShaderModule({
code: `
@group(0) @binding(0) var s: texture_storage_2d<${format}, read>;
@vertex fn vs() -> @builtin(position) vec4f {
_ = textureLoad(s, vec2u(0));
return vec4f(0);
}
`
});
const pipelineDescriptor = {
layout: 'auto',
vertex: {
module,
entryPoint: 'vs'
}
};
t.doCreateRenderPipelineTest(async, false, pipelineDescriptor);
});

View file

@ -6,7 +6,7 @@ Tests that textureBindingViewDimension must compatible with texture dimension
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { kTextureDimensions, kTextureViewDimensions } from '../../../../capability_info.js';
import { kColorTextureFormats, kTextureFormatInfo } from '../../../../format_info.js';
import { kColorTextureFormats, kCompatModeUnsupportedStorageTextureFormats, kTextureFormatInfo } from '../../../../format_info.js';
import { getTextureDimensionFromView } from '../../../../util/texture/base.js';
import { CompatibilityTest } from '../../../compatibility_test.js';
@ -152,4 +152,21 @@ fn((t) => {
true
);
}
});
g.test('unsupportedStorageTextureFormats').
desc(`Tests that you can not create unsupported storage texture formats in compat mode.`).
params((u) => u.combine('format', kCompatModeUnsupportedStorageTextureFormats)).
fn((t) => {
const { format } = t.params;
t.expectGPUError(
'validation',
() =>
t.device.createTexture({
size: [1, 1, 1],
format,
usage: GPUTextureUsage.STORAGE_BINDING
}),
true
);
});

View file

@ -1910,8 +1910,12 @@ format)
*
* This function may need to be generalized to use `baseFormat` from `kTextureFormatInfo`.
*/
export function viewCompatible(a, b) {
return a === b || a + '-srgb' === b || b + '-srgb' === a;
export function viewCompatible(
compatibilityMode,
a,
b)
{
return compatibilityMode ? a === b : a === b || a + '-srgb' === b || b + '-srgb' === a;
}
export function getFeaturesForFormats(
@ -1931,16 +1935,19 @@ export function isCompressedTextureFormat(format) {
return format in kCompressedTextureFormatInfo;
}
export const kCompatModeUnsupportedStorageTextureFormats = [
'rg32float',
'rg32sint',
'rg32uint'];
export function isTextureFormatUsableAsStorageFormat(
format,
isCompatibilityMode)
{
if (isCompatibilityMode) {
switch (format) {
case 'rg32float':
case 'rg32sint':
case 'rg32uint':
return false;
if (kCompatModeUnsupportedStorageTextureFormats.indexOf(format) >= 0) {
return false;
}
}
return !!kTextureFormatInfo[format].color?.storage;

View file

@ -60,7 +60,7 @@ import {
textureContentIsOKByT2B } from
'./util/texture/texture_ok.js';
import { createTextureFromTexelView, createTextureFromTexelViews } from './util/texture.js';
import { reifyOrigin3D } from './util/unions.js';
import { reifyExtent3D, reifyOrigin3D } from './util/unions.js';
const devicePool = new DevicePool();
@ -459,21 +459,25 @@ export class GPUTestBase extends Fixture {
}
/** Skips this test case if the `langFeature` is *not* supported. */
requireLanguageFeatureOrSkipTestCase(langFeature) {
const lf = getGPU(this.rec).wgslLanguageFeatures;
if (lf === undefined || !lf.has(langFeature)) {
skipIfLanguageFeatureNotSupported(langFeature) {
if (!this.hasLanguageFeature(langFeature)) {
this.skip(`WGSL language feature '${langFeature}' is not supported`);
}
}
/** Skips this test case if the `langFeature` is supported. */
skipIfLanguageFeatureSupported(langFeature) {
const lf = getGPU(this.rec).wgslLanguageFeatures;
if (lf !== undefined && lf.has(langFeature)) {
if (this.hasLanguageFeature(langFeature)) {
this.skip(`WGSL language feature '${langFeature}' is supported`);
}
}
/** returns true iff the `langFeature` is supported */
hasLanguageFeature(langFeature) {
const lf = getGPU(this.rec).wgslLanguageFeatures;
return lf !== undefined && lf.has(langFeature);
}
/**
* Expect a GPUBuffer's contents to pass the provided check.
*
@ -784,7 +788,8 @@ export class GPUTestBase extends Fixture {
slice = 0,
layout,
generateWarningOnly = false,
checkElementsBetweenFn = (act, [a, b]) => checkElementsBetween(act, [(i) => a[i], (i) => b[i]])
checkElementsBetweenFn = (act, [a, b]) =>
checkElementsBetween(act, [(i) => a[i], (i) => b[i]])
@ -811,24 +816,32 @@ export class GPUTestBase extends Fixture {
/**
* Emulate a texture to buffer copy by using a compute shader
* to load texture value of a single pixel and write to a storage buffer.
* For sample count == 1, the buffer contains only one value of the sample.
* For sample count > 1, the buffer contains (N = sampleCount) values sorted
* to load texture values of a subregion of a 2d texture and write to a storage buffer.
* For sample count == 1, the buffer contains extent[0] * extent[1] of the sample.
* For sample count > 1, the buffer contains extent[0] * extent[1] * (N = sampleCount) values sorted
* in the order of their sample index [0, sampleCount - 1]
*
* This can be useful when the texture to buffer copy is not available to the texture format
* e.g. (depth24plus), or when the texture is multisampled.
*
* MAINTENANCE_TODO: extend to read multiple pixels with given origin and size.
* MAINTENANCE_TODO: extend texture dimension to 1d and 3d.
*
* @returns storage buffer containing the copied value from the texture.
*/
copySinglePixelTextureToBufferUsingComputePass(
copy2DTextureToBufferUsingComputePass(
type,
componentCount,
textureView,
sampleCount)
sampleCount = 1,
extent_ = [1, 1, 1],
origin_ = [0, 0, 0])
{
const origin = reifyOrigin3D(origin_);
const extent = reifyExtent3D(extent_);
const width = extent.width;
const height = extent.height;
const kWorkgroupSizeX = 8;
const kWorkgroupSizeY = 8;
const textureSrcCode =
sampleCount === 1 ?
`@group(0) @binding(0) var src: texture_2d<${type}>;` :
@ -841,13 +854,24 @@ export class GPUTestBase extends Fixture {
${textureSrcCode}
@group(0) @binding(1) var<storage, read_write> dst : Buffer;
@compute @workgroup_size(1) fn main() {
var coord = vec2<i32>(0, 0);
for (var sampleIndex = 0; sampleIndex < ${sampleCount};
struct Params {
origin: vec2u,
extent: vec2u,
};
@group(0) @binding(2) var<uniform> params : Params;
@compute @workgroup_size(${kWorkgroupSizeX}, ${kWorkgroupSizeY}, 1) fn main(@builtin(global_invocation_id) id : vec3u) {
let boundary = params.origin + params.extent;
let coord = params.origin + id.xy;
if (any(coord >= boundary)) {
return;
}
let offset = (id.x + id.y * params.extent.x) * ${componentCount} * ${sampleCount};
for (var sampleIndex = 0u; sampleIndex < ${sampleCount};
sampleIndex = sampleIndex + 1) {
let o = sampleIndex * ${componentCount};
let v = textureLoad(src, coord, sampleIndex);
for (var component = 0; component < ${componentCount}; component = component + 1) {
let o = offset + sampleIndex * ${componentCount};
let v = textureLoad(src, coord.xy, sampleIndex);
for (var component = 0u; component < ${componentCount}; component = component + 1) {
dst.data[o + component] = v[component];
}
}
@ -864,11 +888,16 @@ export class GPUTestBase extends Fixture {
});
const storageBuffer = this.device.createBuffer({
size: sampleCount * type.size * componentCount,
size: sampleCount * type.size * componentCount * width * height,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST | GPUBufferUsage.COPY_SRC
});
this.trackForCleanup(storageBuffer);
const uniformBuffer = this.makeBufferWithContents(
new Uint32Array([origin.x, origin.y, width, height]),
GPUBufferUsage.UNIFORM
);
const uniformBindGroup = this.device.createBindGroup({
layout: computePipeline.getBindGroupLayout(0),
entries: [
@ -881,6 +910,12 @@ export class GPUTestBase extends Fixture {
resource: {
buffer: storageBuffer
}
},
{
binding: 2,
resource: {
buffer: uniformBuffer
}
}]
});
@ -889,7 +924,11 @@ export class GPUTestBase extends Fixture {
const pass = encoder.beginComputePass();
pass.setPipeline(computePipeline);
pass.setBindGroup(0, uniformBindGroup);
pass.dispatchWorkgroups(1);
pass.dispatchWorkgroups(
Math.floor((width + kWorkgroupSizeX - 1) / kWorkgroupSizeX),
Math.floor((height + kWorkgroupSizeY - 1) / kWorkgroupSizeY),
1
);
pass.end();
this.device.queue.submit([encoder.finish()]);

View file

@ -248,6 +248,15 @@ export const listing = [
"single_buffer"
]
},
{
"file": [
"api",
"operation",
"memory_sync",
"texture",
"readonly_depth_stencil"
]
},
{
"file": [
"api",
@ -495,6 +504,14 @@ export const listing = [
"compilation_info"
]
},
{
"file": [
"api",
"operation",
"storage_texture",
"read_write"
]
},
{
"file": [
"api",
@ -1585,6 +1602,14 @@ export const listing = [
"createBindGroup"
]
},
{
"file": [
"compat",
"api",
"validation",
"createBindGroupLayout"
]
},
{
"file": [
"compat",
@ -1615,6 +1640,15 @@ export const listing = [
"pipeline_bind_group_compat"
]
},
{
"file": [
"compat",
"api",
"validation",
"render_pipeline",
"depth_stencil_state"
]
},
{
"file": [
"compat",
@ -2391,6 +2425,26 @@ export const listing = [
"dot"
]
},
{
"file": [
"shader",
"execution",
"expression",
"call",
"builtin",
"dot4I8Packed"
]
},
{
"file": [
"shader",
"execution",
"expression",
"call",
"builtin",
"dot4U8Packed"
]
},
{
"file": [
"shader",
@ -2741,6 +2795,46 @@ export const listing = [
"pack4x8unorm"
]
},
{
"file": [
"shader",
"execution",
"expression",
"call",
"builtin",
"pack4xI8"
]
},
{
"file": [
"shader",
"execution",
"expression",
"call",
"builtin",
"pack4xI8Clamp"
]
},
{
"file": [
"shader",
"execution",
"expression",
"call",
"builtin",
"pack4xU8"
]
},
{
"file": [
"shader",
"execution",
"expression",
"call",
"builtin",
"pack4xU8Clamp"
]
},
{
"file": [
"shader",
@ -3131,6 +3225,26 @@ export const listing = [
"unpack4x8unorm"
]
},
{
"file": [
"shader",
"execution",
"expression",
"call",
"builtin",
"unpack4xI8"
]
},
{
"file": [
"shader",
"execution",
"expression",
"call",
"builtin",
"unpack4xU8"
]
},
{
"file": [
"shader",
@ -3141,6 +3255,16 @@ export const listing = [
"workgroupBarrier"
]
},
{
"file": [
"shader",
"execution",
"expression",
"call",
"user",
"ptr_params"
]
},
{
"file": [
"shader",
@ -3159,6 +3283,15 @@ export const listing = [
"af_assignment"
]
},
{
"file": [
"shader",
"execution",
"expression",
"unary",
"ai_assignment"
]
},
{
"file": [
"shader",
@ -3240,6 +3373,15 @@ export const listing = [
"i32_conversion"
]
},
{
"file": [
"shader",
"execution",
"expression",
"unary",
"indirection"
]
},
{
"file": [
"shader",
@ -3345,6 +3487,13 @@ export const listing = [
"while"
]
},
{
"file": [
"shader",
"execution",
"memory_layout"
]
},
{
"file": [
"shader",
@ -3430,6 +3579,14 @@ export const listing = [
"shared_structs"
]
},
{
"file": [
"shader",
"execution",
"shader_io",
"workgroup_size"
]
},
{
"file": [
"shader",
@ -3437,6 +3594,21 @@ export const listing = [
"shadow"
]
},
{
"file": [
"shader",
"execution",
"stage"
]
},
{
"file": [
"shader",
"execution",
"statement",
"compound"
]
},
{
"file": [
"shader",
@ -3474,6 +3646,14 @@ export const listing = [
"const_assert"
]
},
{
"file": [
"shader",
"validation",
"decl",
"compound_statement"
]
},
{
"file": [
"shader",
@ -3498,6 +3678,14 @@ export const listing = [
"ptr_spelling"
]
},
{
"file": [
"shader",
"validation",
"decl",
"var"
]
},
{
"file": [
"shader",
@ -3554,6 +3742,16 @@ export const listing = [
"acosh"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"arrayLength"
]
},
{
"file": [
"shader",
@ -3614,6 +3812,16 @@ export const listing = [
"atomics"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"barriers"
]
},
{
"file": [
"shader",
@ -3714,6 +3922,16 @@ export const listing = [
"exp2"
]
},
{
"file": [
"shader",
"validation",
"expression",
"call",
"builtin",
"floor"
]
},
{
"file": [
"shader",
@ -3904,6 +4122,14 @@ export const listing = [
"unpack4xU8"
]
},
{
"file": [
"shader",
"validation",
"extension",
"pointer_composite_access"
]
},
{
"file": [
"shader",
@ -3960,6 +4186,14 @@ export const listing = [
"break"
]
},
{
"file": [
"shader",
"validation",
"parse",
"break_if"
]
},
{
"file": [
"shader",
@ -3976,6 +4210,14 @@ export const listing = [
"comments"
]
},
{
"file": [
"shader",
"validation",
"parse",
"compound"
]
},
{
"file": [
"shader",
@ -3992,6 +4234,14 @@ export const listing = [
"const_assert"
]
},
{
"file": [
"shader",
"validation",
"parse",
"continuing"
]
},
{
"file": [
"shader",
@ -4048,6 +4298,14 @@ export const listing = [
"pipeline_stage"
]
},
{
"file": [
"shader",
"validation",
"parse",
"requires"
]
},
{
"file": [
"shader",
@ -4200,6 +4458,13 @@ export const listing = [
"uniformity"
]
},
{
"file": [
"util",
"texture",
"color_space_conversions"
]
},
{
"file": [
"util",

View file

@ -339,6 +339,11 @@ fn(async (t) => {
const numInvocations = t.params.workgroupSize;
const scalarType = t.params.scalarType;
t.skipIf(
numInvocations > t.device.limits.maxComputeWorkgroupSizeX,
`${numInvocations} > maxComputeWorkgroupSizeX(${t.device.limits.maxComputeWorkgroupSizeX})`
);
// Number of times each workgroup attempts to exchange the same value to the same memory address
const numWrites = 4;
@ -556,6 +561,11 @@ fn(async (t) => {
const numInvocations = t.params.workgroupSize;
const scalarType = t.params.scalarType;
t.skipIf(
numInvocations > t.device.limits.maxComputeWorkgroupSizeX,
`${numInvocations} > maxComputeWorkgroupSizeX(${t.device.limits.maxComputeWorkgroupSizeX})`
);
// Number of times each workgroup attempts to exchange the same value to the same memory address
const numWrites = 4;

View file

@ -25,15 +25,14 @@ export const kMapId = {
}
};
export function typedArrayCtor(scalarType) {
export function typedArrayCtor(
scalarType)
{
switch (scalarType) {
case 'u32':
return Uint32Array;
case 'i32':
return Int32Array;
default:
assert(false, 'Atomic variables can only by u32 or i32');
return Uint8Array;
}
}

View file

@ -1,7 +1,16 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/import { assert } from '../../../../../../common/util/util.js';import { alwaysPass, anyOf } from '../../../../../util/compare.js';import { kBit, kValue } from '../../../../../util/constants.js';
import { Vector, f16, f32, i32, toVector, u32 } from '../../../../../util/conversion.js';
import {
Vector,
f16,
f32,
i32,
toVector,
u32,
abstractFloat } from
'../../../../../util/conversion.js';
import { FP, FPInterval } from '../../../../../util/floating_point.js';
import {
cartesianProduct,
@ -690,6 +699,11 @@ export const d = makeCaseCache('bitcast', {
input: f32(e),
expected: bitcastF32ToVec2F16Comparator(e)
})),
af_to_vec2_f16: () =>
f32FiniteRangeForF16Vec2Finite.map((e) => ({
input: abstractFloat(e),
expected: bitcastF32ToVec2F16Comparator(e)
})),
// vec2<i32>, vec2<u32>, vec2<f32> to vec4<f16>
vec2_i32_to_vec4_f16_inf_nan: () =>
@ -722,6 +736,11 @@ export const d = makeCaseCache('bitcast', {
input: toVector(e, f32),
expected: bitcastVec2F32ToVec4F16Comparator(e)
})),
vec2_af_to_vec4_f16: () =>
slidingSlice(f32FiniteRangeForF16Vec2Finite, 2).map((e) => ({
input: toVector(e, abstractFloat),
expected: bitcastVec2F32ToVec4F16Comparator(e)
})),
// vec2<f16> to i32, u32, f32
vec2_f16_to_u32: () =>

View file

@ -13,6 +13,9 @@ S is i32, u32, f32
T is i32, u32, f32, and T is not S
Reinterpretation of bits. Beware non-normal f32 values.
@const @must_use fn bitcast<u32>(e : AbstractInt) -> T
@const @must_use fn bitcast<vecN<u32>>(e : vecN<AbstractInt>) -> T
@const @must_use fn bitcast<T>(e: vec2<f16> ) -> T
@const @must_use fn bitcast<vec2<T>>(e: vec4<f16> ) -> vec2<T>
@const @must_use fn bitcast<vec2<f16>>(e: T ) -> vec2<f16>
@ -21,8 +24,24 @@ T is i32, u32, f32
`;
import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../../gpu_test.js';
import { TypeF16, TypeF32, TypeI32, TypeU32, TypeVec } from '../../../../../util/conversion.js';
import { allInputSources, run } from '../../expression.js';
import { anyOf } from '../../../../../util/compare.js';
import {
TypeF16,
TypeF32,
TypeI32,
TypeU32,
TypeVec,
TypeAbstractFloat,
f32,
u32,
i32,
abstractFloat,
uint32ToFloat32,
u32Bits } from
'../../../../../util/conversion.js';
import { FP } from '../../../../../util/floating_point.js';
import { scalarF32Range } from '../../../../../util/math.js';
import { allInputSources, onlyConstInputSource, run } from '../../expression.js';
import { d } from './bitcast.cache.js';
import { builtinWithPredeclaration } from './builtin.js';
@ -464,4 +483,217 @@ fn(async (t) => {
t.params,
cases
);
});
});
// Abstract Float
g.test('af_to_f32').
specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin').
desc(`bitcast abstract float to f32 tests`).
params((u) =>
u.
combine('inputSource', onlyConstInputSource).
combine('vectorize', [undefined, 2, 3, 4])
).
fn(async (t) => {
const cases = scalarF32Range().map((u) => {
const res = FP['f32'].correctlyRounded(u).map((f) => {
return f32(f);
});
return {
input: abstractFloat(u),
expected: anyOf(...res)
};
});
await run(t, bitcastBuilder('f32', t.params), [TypeAbstractFloat], TypeF32, t.params, cases);
});
g.test('af_to_i32').
specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin').
desc(`bitcast abstract float to i32 tests`).
params((u) =>
u.
combine('inputSource', onlyConstInputSource).
combine('vectorize', [undefined, 2, 3, 4])
).
fn(async (t) => {
const values = [
0,
1,
10,
256,
u32Bits(0b11111111011111111111111111111111).value,
u32Bits(0b11111111010000000000000000000000).value,
u32Bits(0b11111110110000000000000000000000).value,
u32Bits(0b11111101110000000000000000000000).value,
u32Bits(0b11111011110000000000000000000000).value,
u32Bits(0b11110111110000000000000000000000).value,
u32Bits(0b11101111110000000000000000000000).value,
u32Bits(0b11011111110000000000000000000000).value,
u32Bits(0b10111111110000000000000000000000).value,
u32Bits(0b01111111011111111111111111111111).value,
u32Bits(0b01111111010000000000000000000000).value,
u32Bits(0b01111110110000000000000000000000).value,
u32Bits(0b01111101110000000000000000000000).value,
u32Bits(0b01111011110000000000000000000000).value,
u32Bits(0b01110111110000000000000000000000).value,
u32Bits(0b01101111110000000000000000000000).value,
u32Bits(0b01011111110000000000000000000000).value,
u32Bits(0b00111111110000000000000000000000).value];
const cases = values.map((u) => {
return {
input: abstractFloat(uint32ToFloat32(u)),
expected: i32(u)
};
});
await run(t, bitcastBuilder('i32', t.params), [TypeAbstractFloat], TypeI32, t.params, cases);
});
g.test('af_to_u32').
specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin').
desc(`bitcast abstract float to u32 tests`).
params((u) =>
u.
combine('inputSource', onlyConstInputSource).
combine('vectorize', [undefined, 2, 3, 4])
).
fn(async (t) => {
const values = [
0,
1,
10,
256,
u32Bits(0b11111111011111111111111111111111).value,
u32Bits(0b11111111010000000000000000000000).value,
u32Bits(0b11111110110000000000000000000000).value,
u32Bits(0b11111101110000000000000000000000).value,
u32Bits(0b11111011110000000000000000000000).value,
u32Bits(0b11110111110000000000000000000000).value,
u32Bits(0b11101111110000000000000000000000).value,
u32Bits(0b11011111110000000000000000000000).value,
u32Bits(0b10111111110000000000000000000000).value,
u32Bits(0b01111111011111111111111111111111).value,
u32Bits(0b01111111010000000000000000000000).value,
u32Bits(0b01111110110000000000000000000000).value,
u32Bits(0b01111101110000000000000000000000).value,
u32Bits(0b01111011110000000000000000000000).value,
u32Bits(0b01110111110000000000000000000000).value,
u32Bits(0b01101111110000000000000000000000).value,
u32Bits(0b01011111110000000000000000000000).value,
u32Bits(0b00111111110000000000000000000000).value];
const cases = values.map((u) => {
return {
input: abstractFloat(uint32ToFloat32(u)),
expected: u32(u)
};
});
await run(t, bitcastBuilder('u32', t.params), [TypeAbstractFloat], TypeU32, t.params, cases);
});
g.test('af_to_vec2f16').
specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin').
desc(`bitcast abstract float to f16 tests`).
beforeAllSubcases((t) => {
t.selectDeviceOrSkipTestCase('shader-f16');
}).
params((u) => u.combine('inputSource', onlyConstInputSource)).
fn(async (t) => {
const cases = await d.get('af_to_vec2_f16');
await run(
t,
bitcastBuilder('vec2<f16>', t.params),
[TypeAbstractFloat],
TypeVec(2, TypeF16),
t.params,
cases
);
});
g.test('vec2af_to_vec4f16').
specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin').
desc(`bitcast abstract float to f16 tests`).
beforeAllSubcases((t) => {
t.selectDeviceOrSkipTestCase('shader-f16');
}).
params((u) => u.combine('inputSource', onlyConstInputSource)).
fn(async (t) => {
const cases = await d.get('vec2_af_to_vec4_f16');
await run(
t,
bitcastBuilder('vec4<f16>', t.params),
[TypeVec(2, TypeAbstractFloat)],
TypeVec(4, TypeF16),
t.params,
cases
);
});
// Abstract Int
// bitcast<i32>(12)
// - cases: scalarI32Range
g.test('ai_to_i32').
specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin').
desc(`bitcast abstract int to i32 tests`).
params((u) =>
u.
combine('inputSource', onlyConstInputSource).
combine('vectorize', [undefined, 2, 3, 4])
).
unimplemented();
// bitcast<u32>(12)
// - cases: scalarU32Range
g.test('ai_to_u32').
specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin').
desc(`bitcast abstract int to u32 tests`).
params((u) =>
u.
combine('inputSource', onlyConstInputSource).
combine('vectorize', [undefined, 2, 3, 4])
).
unimplemented();
// bitcast<f32>(12)
// - cases: scalarF32Range
g.test('ai_to_f32').
specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin').
desc(`bitcast abstract flointat to f32 tests`).
params((u) =>
u.
combine('inputSource', onlyConstInputSource).
combine('vectorize', [undefined, 2, 3, 4])
).
unimplemented();
// bitcast<vec2<f16>>(12)
// - cases: scalarF16Range
g.test('ai_to_vec2f16').
specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin').
desc(`bitcast abstract int to vec2f16 tests`).
params((u) =>
u.
combine('inputSource', onlyConstInputSource).
combine('vectorize', [undefined, 2, 3, 4])
).
unimplemented();
// bitcast<vec4<f16>>(vec2(12, 12))
// - cases: sparseVectorF16Range
g.test('vec2ai_to_vec4f16').
specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin').
desc(`bitcast vec2ai to vec4f16 tests`).
params((u) =>
u.
combine('inputSource', onlyConstInputSource).
combine('vectorize', [undefined, 2, 3, 4])
).
unimplemented();

View file

@ -0,0 +1,74 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/export const description = `
Execution tests for the 'dot4I8Packed' builtin function
@const fn dot4I8Packed(e1: u32 ,e2: u32) -> i32
e1 and e2 are interpreted as vectors with four 8-bit signed integer components. Return the signed
integer dot product of these two vectors. Each component is sign-extended to i32 before performing
the multiply, and then the add operations are done in WGSL i32 with wrapping behaviour.
`;import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../../gpu_test.js';
import { TypeI32, TypeU32, i32, u32 } from '../../../../../util/conversion.js';
import { allInputSources, run } from '../../expression.js';
import { builtin } from './builtin.js';
export const g = makeTestGroup(GPUTest);
g.test('basic').
specURL('https://www.w3.org/TR/WGSL/#dot4I8Packed-builtin').
desc(
`
@const fn dot4I8Packed(e1: u32, e2: u32) -> i32
`
).
params((u) => u.combine('inputSource', allInputSources)).
fn(async (t) => {
const cfg = t.params;
const dot4I8Packed = (e1, e2) => {
let result = 0;
for (let i = 0; i < 4; ++i) {
let e1_i = e1 >> i * 8 & 0xff;
if (e1_i >= 128) {
e1_i -= 256;
}
let e2_i = e2 >> i * 8 & 0xff;
if (e2_i >= 128) {
e2_i -= 256;
}
result += e1_i * e2_i;
}
return result;
};
const testInputs = [
// dot({0, 0, 0, 0}, {0, 0, 0, 0})
[0, 0],
// dot({127, 127, 127, 127}, {127, 127, 127, 127})
[0x7f7f7f7f, 0x7f7f7f7f],
// dot({-128, -128, -128, -128}, {-128, -128, -128, -128})
[0x80808080, 0x80808080],
// dot({127, 127, 127, 127}, {-128, -128, -128, -128})
[0x7f7f7f7f, 0x80808080],
// dot({1, 2, 3, 4}, {5, 6, 7, 8})
[0x01020304, 0x05060708],
// dot({1, 2, 3, 4}, {-1, -2, -3, -4})
[0x01020304, 0xfffefdfc],
// dot({-5, -6, -7, -8}, {5, 6, 7, 8})
[0xfbfaf9f8, 0x05060708],
// dot({-9, -10, -11, -12}, {-13, -14, -15, -16})
[0xf7f6f5f4, 0xf3f2f1f0]];
const makeCase = (x, y) => {
return { input: [u32(x), u32(y)], expected: i32(dot4I8Packed(x, y)) };
};
const cases = testInputs.flatMap((v) => {
return [makeCase(...v)];
});
await run(t, builtin('dot4I8Packed'), [TypeU32, TypeU32], TypeI32, cfg, cases);
});

View file

@ -0,0 +1,59 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/export const description = `
Execution tests for the 'dot4U8Packed' builtin function
@const fn dot4U8Packed(e1: u32 ,e2: u32) -> u32
e1 and e2 are interpreted as vectors with four 8-bit unsigned integer components. Return the
unsigned integer dot product of these two vectors.
`;import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../../gpu_test.js';
import { TypeU32, u32 } from '../../../../../util/conversion.js';
import { allInputSources, run } from '../../expression.js';
import { builtin } from './builtin.js';
export const g = makeTestGroup(GPUTest);
g.test('basic').
specURL('https://www.w3.org/TR/WGSL/#dot4U8Packed-builtin').
desc(
`
@const fn dot4U8Packed(e1: u32, e2: u32) -> u32
`
).
params((u) => u.combine('inputSource', allInputSources)).
fn(async (t) => {
const cfg = t.params;
const dot4U8Packed = (e1, e2) => {
let result = 0;
for (let i = 0; i < 4; ++i) {
const e1_i = e1 >> i * 8 & 0xff;
const e2_i = e2 >> i * 8 & 0xff;
result += e1_i * e2_i;
}
return result;
};
const testInputs = [
// dot({0, 0, 0, 0}, {0, 0, 0, 0})
[0, 0],
// dot({255u, 255u, 255u, 255u}, {255u, 255u, 255u, 255u})
[0xffffffff, 0xffffffff],
// dot({1u, 2u, 3u, 4u}, {5u, 6u, 7u, 8u})
[0x01020304, 0x05060708],
// dot({120u, 90u, 60u, 30u}, {50u, 100u, 150u, 200u})
[0x785a3c1e, 0x326496c8]];
const makeCase = (x, y) => {
return { input: [u32(x), u32(y)], expected: u32(dot4U8Packed(x, y)) };
};
const cases = testInputs.flatMap((v) => {
return [makeCase(...v)];
});
await run(t, builtin('dot4U8Packed'), [TypeU32, TypeU32], TypeU32, cfg, cases);
});

View file

@ -0,0 +1,69 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/export const description = `
Execution tests for the 'pack4xI8' builtin function
@const fn pack4xI8(e: vec4<i32>) -> u32
Pack the lower 8 bits of each component of e into a u32 value and drop all the unused bits.
Component e[i] of the input is mapped to bits (8 * i) through (8 * (i + 7)) of the result.
`;import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../../gpu_test.js';
import { TypeI32, TypeU32, TypeVec, u32, toVector, i32 } from '../../../../../util/conversion.js';
import { allInputSources, run } from '../../expression.js';
import { builtin } from './builtin.js';
export const g = makeTestGroup(GPUTest);
g.test('basic').
specURL('https://www.w3.org/TR/WGSL/#pack4xI8-builtin').
desc(
`
@const fn pack4xI8(e: vec4<i32>) -> u32
`
).
params((u) => u.combine('inputSource', allInputSources)).
fn(async (t) => {
const cfg = t.params;
const pack4xI8 = (vals) => {
const result = new Uint32Array(1);
for (let i = 0; i < 4; ++i) {
result[0] |= (vals[i] & 0xff) << i * 8;
}
return result[0];
};
const testInputs = [
[0, 0, 0, 0],
[1, 2, 3, 4],
[-1, 2, 3, 4],
[1, -2, 3, 4],
[1, 2, -3, 4],
[1, 2, 3, -4],
[-1, -2, 3, 4],
[-1, 2, -3, 4],
[-1, 2, 3, -4],
[1, -2, -3, 4],
[1, -2, 3, -4],
[1, 2, -3, -4],
[-1, -2, -3, 4],
[-1, -2, 3, -4],
[-1, 2, -3, -4],
[1, -2, -3, -4],
[-1, -2, -3, -4],
[127, 128, -128, -129],
[128, 128, -128, -128],
[32767, 32768, -32768, -32769]];
const makeCase = (vals) => {
return { input: [toVector(vals, i32)], expected: u32(pack4xI8(vals)) };
};
const cases = testInputs.flatMap((v) => {
return [makeCase(v)];
});
await run(t, builtin('pack4xI8'), [TypeVec(4, TypeI32)], TypeU32, cfg, cases);
});

View file

@ -0,0 +1,73 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/export const description = `
Execution tests for the 'pack4xI8Clamp' builtin function
@const fn pack4xI8Clamp(e: vec4<i32>) -> u32
Clamp each component of e in the range [-128, 127] and then pack the lower 8 bits of each component
into a u32 value. Component e[i] of the input is mapped to bits (8 * i) through (8 * (i + 7)) of the
result.
`;import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../../gpu_test.js';
import { TypeI32, TypeU32, TypeVec, u32, toVector, i32 } from '../../../../../util/conversion.js';
import { clamp } from '../../../../../util/math.js';
import { allInputSources, run } from '../../expression.js';
import { builtin } from './builtin.js';
export const g = makeTestGroup(GPUTest);
g.test('basic').
specURL('https://www.w3.org/TR/WGSL/#pack4xI8Clamp-builtin').
desc(
`
@const fn pack4xI8Clamp(e: vec4<i32>) -> u32
`
).
params((u) => u.combine('inputSource', allInputSources)).
fn(async (t) => {
const cfg = t.params;
const pack4xI8Clamp = (vals) => {
const result = new Uint32Array(1);
for (let i = 0; i < 4; ++i) {
const clampedValue = clamp(vals[i], { min: -128, max: 127 });
result[0] |= (clampedValue & 0xff) << i * 8;
}
return result[0];
};
const testInputs = [
[0, 0, 0, 0],
[1, 2, 3, 4],
[-1, 2, 3, 4],
[1, -2, 3, 4],
[1, 2, -3, 4],
[1, 2, 3, -4],
[-1, -2, 3, 4],
[-1, 2, -3, 4],
[-1, 2, 3, -4],
[1, -2, -3, 4],
[1, -2, 3, -4],
[1, 2, -3, -4],
[-1, -2, -3, 4],
[-1, -2, 3, -4],
[-1, 2, -3, -4],
[1, -2, -3, -4],
[-1, -2, -3, -4],
[126, 127, 128, 129],
[-130, -129, -128, -127],
[127, 128, -128, -129],
[32767, 32768, -32768, -32769]];
const makeCase = (vals) => {
return { input: [toVector(vals, i32)], expected: u32(pack4xI8Clamp(vals)) };
};
const cases = testInputs.flatMap((v) => {
return [makeCase(v)];
});
await run(t, builtin('pack4xI8Clamp'), [TypeVec(4, TypeI32)], TypeU32, cfg, cases);
});

View file

@ -0,0 +1,54 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/export const description = `
Execution tests for the 'pack4xU8' builtin function
@const fn pack4xU8(e: vec4<u32>) -> u32
Pack the lower 8 bits of each component of e into a u32 value and drop all the unused bits.
Component e[i] of the input is mapped to bits (8 * i) through (8 * (i + 7)) of the result.
`;import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../../gpu_test.js';
import { TypeU32, TypeVec, u32, toVector } from '../../../../../util/conversion.js';
import { allInputSources, run } from '../../expression.js';
import { builtin } from './builtin.js';
export const g = makeTestGroup(GPUTest);
g.test('basic').
specURL('https://www.w3.org/TR/WGSL/#pack4xU8-builtin').
desc(
`
@const fn pack4xU8(e: vec4<u32>) -> u32
`
).
params((u) => u.combine('inputSource', allInputSources)).
fn(async (t) => {
const cfg = t.params;
const pack4xU8 = (vals) => {
const result = new Uint32Array(1);
for (let i = 0; i < 4; ++i) {
result[0] |= (vals[i] & 0xff) << i * 8;
}
return result[0];
};
const testInputs = [
[0, 0, 0, 0],
[1, 2, 3, 4],
[255, 255, 255, 255],
[254, 255, 256, 257],
[65535, 65536, 255, 254]];
const makeCase = (vals) => {
return { input: [toVector(vals, u32)], expected: u32(pack4xU8(vals)) };
};
const cases = testInputs.flatMap((v) => {
return [makeCase(v)];
});
await run(t, builtin('pack4xU8'), [TypeVec(4, TypeU32)], TypeU32, cfg, cases);
});

View file

@ -0,0 +1,57 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/export const description = `
Execution tests for the 'pack4xU8Clamp' builtin function
@const fn pack4xU8Clamp(e: vec4<u32>) -> u32
Clamp each component of e in the range of [0, 255] and then pack the lower 8 bits of each component
into a u32 value. Component e[i] of the input is mapped to bits (8 * i) through (8 * (i + 7)) of the
result.
`;import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../../gpu_test.js';
import { TypeU32, TypeVec, u32, toVector } from '../../../../../util/conversion.js';
import { clamp } from '../../../../../util/math.js';
import { allInputSources, run } from '../../expression.js';
import { builtin } from './builtin.js';
export const g = makeTestGroup(GPUTest);
g.test('basic').
specURL('https://www.w3.org/TR/WGSL/#pack4xU8Clamp-builtin').
desc(
`
@const fn pack4xU8Clamp(e: vec4<u32>) -> u32
`
).
params((u) => u.combine('inputSource', allInputSources)).
fn(async (t) => {
const cfg = t.params;
const pack4xU8Clamp = (vals) => {
const result = new Uint32Array(1);
for (let i = 0; i < 4; ++i) {
const clampedValue = clamp(vals[i], { min: 0, max: 255 });
result[0] |= clampedValue << i * 8;
}
return result[0];
};
const testInputs = [
[0, 0, 0, 0],
[1, 2, 3, 4],
[255, 255, 255, 255],
[254, 255, 256, 257],
[65535, 65536, 255, 254]];
const makeCase = (vals) => {
return { input: [toVector(vals, u32)], expected: u32(pack4xU8Clamp(vals)) };
};
const cases = testInputs.flatMap((v) => {
return [makeCase(v)];
});
await run(t, builtin('pack4xU8Clamp'), [TypeVec(4, TypeU32)], TypeU32, cfg, cases);
});

View file

@ -0,0 +1,56 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/export const description = `
Execution tests for the 'unpack4xI8' builtin function
@const fn unpack4xI8(e: u32) -> vec4<i32>
e is interpreted as a vector with four 8-bit signed integer components. Unpack e into a vec4<i32>
with sign extension.
`;import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../../gpu_test.js';
import { TypeI32, TypeU32, TypeVec, u32, toVector, i32 } from '../../../../../util/conversion.js';
import { allInputSources, run } from '../../expression.js';
import { builtin } from './builtin.js';
export const g = makeTestGroup(GPUTest);
g.test('basic').
specURL('https://www.w3.org/TR/WGSL/#unpack4xI8-builtin').
desc(
`
@const fn unpack4xI8(e: u32) -> vec4<i32>
`
).
params((u) => u.combine('inputSource', allInputSources)).
fn(async (t) => {
const cfg = t.params;
const unpack4xI8 = (e) => {
const result = [0, 0, 0, 0];
for (let i = 0; i < 4; ++i) {
let intValue = e >> 8 * i & 0xff;
if (intValue > 127) {
intValue -= 256;
}
result[i] = intValue;
}
return result;
};
const testInputs = [
0, 0x01020304, 0xfcfdfeff, 0x040302ff, 0x0403fe01, 0x04fd0201, 0xfc030201, 0xfcfdfe01,
0xfcfd02ff, 0xfc03feff, 0x04fdfeff, 0x0403feff, 0x04fd02ff, 0xfc0302ff, 0x04fdfe01,
0xfc03fe01, 0xfcfd0201, 0x80817f7e];
const makeCase = (e) => {
return { input: [u32(e)], expected: toVector(unpack4xI8(e), i32) };
};
const cases = testInputs.flatMap((v) => {
return [makeCase(v)];
});
await run(t, builtin('unpack4xI8'), [TypeU32], TypeVec(4, TypeI32), cfg, cases);
});

View file

@ -0,0 +1,48 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/export const description = `
Execution tests for the 'unpack4xU8' builtin function
@const fn unpack4xU8(e: u32) -> vec4<u32>
e is interpreted as a vector with four 8-bit unsigned integer components. Unpack e into a vec4<u32>
with zero extension.
`;import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../../gpu_test.js';
import { TypeU32, TypeVec, u32, toVector } from '../../../../../util/conversion.js';
import { allInputSources, run } from '../../expression.js';
import { builtin } from './builtin.js';
export const g = makeTestGroup(GPUTest);
g.test('basic').
specURL('https://www.w3.org/TR/WGSL/#unpack4xU8-builtin').
desc(
`
@const fn unpack4xU8(e: u32) -> vec4<u32>
`
).
params((u) => u.combine('inputSource', allInputSources)).
fn(async (t) => {
const cfg = t.params;
const unpack4xU8 = (e) => {
const result = [0, 0, 0, 0];
for (let i = 0; i < 4; ++i) {
result[i] = e >> 8 * i & 0xff;
}
return result;
};
const testInputs = [0, 0x08060402, 0xffffffff, 0xfefdfcfb];
const makeCase = (e) => {
return { input: [u32(e)], expected: toVector(unpack4xU8(e), u32) };
};
const cases = testInputs.flatMap((v) => {
return [makeCase(v)];
});
await run(t, builtin('unpack4xU8'), [TypeU32], TypeVec(4, TypeU32), cfg, cases);
});

View file

@ -0,0 +1,705 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/export const description = `
User function call tests for pointer parameters.
`;import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../../gpu_test.js';
export const g = makeTestGroup(GPUTest);
function wgslTypeDecl(kind) {
switch (kind) {
case 'vec4i':
return `
alias T = vec4i;
`;
case 'array':
return `
alias T = array<vec4f, 3>;
`;
case 'struct':
return `
struct S {
a : i32,
b : u32,
c : i32,
d : u32,
}
alias T = S;
`;
}
}
function valuesForType(kind) {
switch (kind) {
case 'vec4i':
return new Uint32Array([1, 2, 3, 4]);
case 'array':
return new Float32Array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]);
case 'struct':
return new Uint32Array([1, 2, 3, 4]);
}
}
function run(
t,
wgsl,
inputUsage,
input,
expected)
{
const pipeline = t.device.createComputePipeline({
layout: 'auto',
compute: {
module: t.device.createShaderModule({ code: wgsl }),
entryPoint: 'main'
}
});
const inputBuffer = t.makeBufferWithContents(
input,
inputUsage === 'uniform' ? GPUBufferUsage.UNIFORM : GPUBufferUsage.STORAGE
);
const outputBuffer = t.device.createBuffer({
size: expected.buffer.byteLength,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC
});
const bindGroup = t.device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: { buffer: inputBuffer } },
{ binding: 1, resource: { buffer: outputBuffer } }]
});
const encoder = t.device.createCommandEncoder();
const pass = encoder.beginComputePass();
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.dispatchWorkgroups(1);
pass.end();
t.queue.submit([encoder.finish()]);
t.expectGPUBufferValuesEqual(outputBuffer, expected);
}
g.test('read_full_object').
desc('Test a pointer parameter can be read by a callee function').
params((u) =>
u.
combine('address_space', ['function', 'private', 'workgroup', 'storage', 'uniform']).
combine('call_indirection', [0, 1, 2]).
combine('type', ['vec4i', 'array', 'struct'])
).
fn((t) => {
switch (t.params.address_space) {
case 'workgroup':
case 'storage':
case 'uniform':
t.skipIfLanguageFeatureNotSupported('unrestricted_pointer_parameters');
}
const main = {
function: `
@compute @workgroup_size(1)
fn main() {
var F : T = input;
f0(&F);
}
`,
private: `
var<private> P : T;
@compute @workgroup_size(1)
fn main() {
P = input;
f0(&P);
}
`,
workgroup: `
var<workgroup> W : T;
@compute @workgroup_size(1)
fn main() {
W = input;
f0(&W);
}
`,
storage: `
@compute @workgroup_size(1)
fn main() {
f0(&input);
}
`,
uniform: `
@compute @workgroup_size(1)
fn main() {
f0(&input);
}
`
}[t.params.address_space];
let call_chain = '';
for (let i = 0; i < t.params.call_indirection; i++) {
call_chain += `
fn f${i}(p : ptr<${t.params.address_space}, T>) {
f${i + 1}(p);
}
`;
}
const inputVar =
t.params.address_space === 'uniform' ?
`@binding(0) @group(0) var<uniform> input : T;` :
`@binding(0) @group(0) var<storage, read> input : T;`;
const wgsl = `
${wgslTypeDecl(t.params.type)}
${inputVar}
@binding(1) @group(0) var<storage, read_write> output : T;
fn f${t.params.call_indirection}(p : ptr<${t.params.address_space}, T>) {
output = *p;
}
${call_chain}
${main}
`;
const values = valuesForType(t.params.type);
run(t, wgsl, t.params.address_space === 'uniform' ? 'uniform' : 'storage', values, values);
});
g.test('read_ptr_to_member').
desc('Test a pointer parameter to a member of a structure can be read by a callee function').
params((u) =>
u.combine('address_space', ['function', 'private', 'workgroup', 'storage', 'uniform'])
).
fn((t) => {
t.skipIfLanguageFeatureNotSupported('unrestricted_pointer_parameters');
const main = {
function: `
@compute @workgroup_size(1)
fn main() {
var v : S = input;
output = f0(&v);
}
`,
private: `
var<private> P : S;
@compute @workgroup_size(1)
fn main() {
P = input;
output = f0(&P);
}
`,
workgroup: `
var<workgroup> W : S;
@compute @workgroup_size(1)
fn main() {
W = input;
output = f0(&W);
}
`,
storage: `
@compute @workgroup_size(1)
fn main() {
output = f0(&input);
}
`,
uniform: `
@compute @workgroup_size(1)
fn main() {
output = f0(&input);
}
`
}[t.params.address_space];
const inputVar =
t.params.address_space === 'uniform' ?
`@binding(0) @group(0) var<uniform> input : S;` :
`@binding(0) @group(0) var<storage, read> input : S;`;
const wgsl = `
struct S {
a : vec4i,
b : T,
c : vec4i,
}
struct T {
a : vec4i,
b : vec4i,
}
${inputVar}
@binding(1) @group(0) var<storage, read_write> output : T;
fn f2(p : ptr<${t.params.address_space}, T>) -> T {
return *p;
}
fn f1(p : ptr<${t.params.address_space}, S>) -> T {
return f2(&(*p).b);
}
fn f0(p : ptr<${t.params.address_space}, S>) -> T {
return f1(p);
}
${main}
`;
const input = new Uint32Array([
/* S.a */1, 2, 3, 4,
/* S.b.a */5, 6, 7, 8,
/* S.b.b */9, 10, 11, 12,
/* S.c */13, 14, 15, 16]
);
const expected = new Uint32Array([
/* S.b.a */5, 6, 7, 8,
/* S.b.b */9, 10, 11, 12]
);
run(t, wgsl, t.params.address_space === 'uniform' ? 'uniform' : 'storage', input, expected);
});
g.test('read_ptr_to_element').
desc('Test a pointer parameter to an element of an array can be read by a callee function').
params((u) =>
u.combine('address_space', ['function', 'private', 'workgroup', 'storage', 'uniform'])
).
fn((t) => {
t.skipIfLanguageFeatureNotSupported('unrestricted_pointer_parameters');
const main = {
function: `
@compute @workgroup_size(1)
fn main() {
var v : T = input;
output = f0(&v);
}
`,
private: `
var<private> P : T;
@compute @workgroup_size(1)
fn main() {
P = input;
output = f0(&P);
}
`,
workgroup: `
var<workgroup> W : T;
@compute @workgroup_size(1)
fn main() {
W = input;
output = f0(&W);
}
`,
storage: `
@compute @workgroup_size(1)
fn main() {
output = f0(&input);
}
`,
uniform: `
@compute @workgroup_size(1)
fn main() {
output = f0(&input);
}
`
}[t.params.address_space];
const inputVar =
t.params.address_space === 'uniform' ?
`@binding(0) @group(0) var<uniform> input : T;` :
`@binding(0) @group(0) var<storage, read> input : T;`;
const wgsl = `
alias T3 = vec4i;
alias T2 = array<T3, 2>;
alias T1 = array<T2, 3>;
alias T = array<T1, 2>;
${inputVar}
@binding(1) @group(0) var<storage, read_write> output : T3;
fn f2(p : ptr<${t.params.address_space}, T2>) -> T3 {
return (*p)[1];
}
fn f1(p : ptr<${t.params.address_space}, T1>) -> T3 {
return f2(&(*p)[0]) + f2(&(*p)[2]);
}
fn f0(p : ptr<${t.params.address_space}, T>) -> T3 {
return f1(&(*p)[0]);
}
${main}
`;
const input = new Uint32Array([
/* [0][0][0] */1, 2, 3, 4,
/* [0][0][1] */5, 6, 7, 8,
/* [0][1][0] */9, 10, 11, 12,
/* [0][1][1] */13, 14, 15, 16,
/* [0][2][0] */17, 18, 19, 20,
/* [0][2][1] */21, 22, 23, 24,
/* [1][0][0] */25, 26, 27, 28,
/* [1][0][1] */29, 30, 31, 32,
/* [1][1][0] */33, 34, 35, 36,
/* [1][1][1] */37, 38, 39, 40,
/* [1][2][0] */41, 42, 43, 44,
/* [1][2][1] */45, 46, 47, 48]
);
const expected = new Uint32Array([/* [0][0][1] + [0][2][1] */5 + 21, 6 + 22, 7 + 23, 8 + 24]);
run(t, wgsl, t.params.address_space === 'uniform' ? 'uniform' : 'storage', input, expected);
});
g.test('write_full_object').
desc('Test a pointer parameter can be written to by a callee function').
params((u) =>
u.
combine('address_space', ['function', 'private', 'workgroup', 'storage']).
combine('call_indirection', [0, 1, 2]).
combine('type', ['vec4i', 'array', 'struct'])
).
fn((t) => {
switch (t.params.address_space) {
case 'workgroup':
case 'storage':
t.skipIfLanguageFeatureNotSupported('unrestricted_pointer_parameters');
}
const ptr =
t.params.address_space === 'storage' ?
`ptr<storage, T, read_write>` :
`ptr<${t.params.address_space}, T>`;
const main = {
function: `
@compute @workgroup_size(1)
fn main() {
var F : T;
f0(&F);
output = F;
}
`,
private: `
var<private> P : T;
@compute @workgroup_size(1)
fn main() {
f0(&P);
output = P;
}
`,
workgroup: `
var<workgroup> W : T;
@compute @workgroup_size(1)
fn main() {
f0(&W);
output = W;
}
`,
storage: `
@compute @workgroup_size(1)
fn main() {
f0(&output);
}
`
}[t.params.address_space];
let call_chain = '';
for (let i = 0; i < t.params.call_indirection; i++) {
call_chain += `
fn f${i}(p : ${ptr}) {
f${i + 1}(p);
}
`;
}
const wgsl = `
${wgslTypeDecl(t.params.type)}
@binding(0) @group(0) var<uniform> input : T;
@binding(1) @group(0) var<storage, read_write> output : T;
fn f${t.params.call_indirection}(p : ${ptr}) {
*p = input;
}
${call_chain}
${main}
`;
const values = valuesForType(t.params.type);
run(t, wgsl, 'uniform', values, values);
});
g.test('write_ptr_to_member').
desc(
'Test a pointer parameter to a member of a structure can be written to by a callee function'
).
params((u) => u.combine('address_space', ['function', 'private', 'workgroup', 'storage'])).
fn((t) => {
t.skipIfLanguageFeatureNotSupported('unrestricted_pointer_parameters');
const main = {
function: `
@compute @workgroup_size(1)
fn main() {
var v : S;
f0(&v);
output = v;
}
`,
private: `
var<private> P : S;
@compute @workgroup_size(1)
fn main() {
f0(&P);
output = P;
}
`,
workgroup: `
var<workgroup> W : S;
@compute @workgroup_size(1)
fn main() {
f0(&W);
output = W;
}
`,
storage: `
@compute @workgroup_size(1)
fn main() {
f1(&output);
}
`
}[t.params.address_space];
const ptr = (ty) =>
t.params.address_space === 'storage' ?
`ptr<storage, ${ty}, read_write>` :
`ptr<${t.params.address_space}, ${ty}>`;
const wgsl = `
struct S {
a : vec4i,
b : T,
c : vec4i,
}
struct T {
a : vec4i,
b : vec4i,
}
@binding(0) @group(0) var<storage> input : T;
@binding(1) @group(0) var<storage, read_write> output : S;
fn f2(p : ${ptr('T')}) {
*p = input;
}
fn f1(p : ${ptr('S')}) {
f2(&(*p).b);
}
fn f0(p : ${ptr('S')}) {
f1(p);
}
${main}
`;
const input = new Uint32Array([
/* S.b.a */5, 6, 7, 8,
/* S.b.b */9, 10, 11, 12]
);
const expected = new Uint32Array([
/* S.a */0, 0, 0, 0,
/* S.b.a */5, 6, 7, 8,
/* S.b.b */9, 10, 11, 12,
/* S.c */0, 0, 0, 0]
);
run(t, wgsl, 'storage', input, expected);
});
g.test('write_ptr_to_element').
desc('Test a pointer parameter to an element of an array can be written to by a callee function').
params((u) => u.combine('address_space', ['function', 'private', 'workgroup', 'storage'])).
fn((t) => {
t.skipIfLanguageFeatureNotSupported('unrestricted_pointer_parameters');
const main = {
function: `
@compute @workgroup_size(1)
fn main() {
var v : T;
f0(&v);
output = v;
}
`,
private: `
var<private> P : T;
@compute @workgroup_size(1)
fn main() {
f0(&P);
output = P;
}
`,
workgroup: `
var<workgroup> W : T;
@compute @workgroup_size(1)
fn main() {
f0(&W);
output = W;
}
`,
storage: `
@compute @workgroup_size(1)
fn main() {
f0(&output);
}
`
}[t.params.address_space];
const ptr = (ty) =>
t.params.address_space === 'storage' ?
`ptr<storage, ${ty}, read_write>` :
`ptr<${t.params.address_space}, ${ty}>`;
const wgsl = `
alias T3 = vec4i;
alias T2 = array<T3, 2>;
alias T1 = array<T2, 3>;
alias T = array<T1, 2>;
@binding(0) @group(0) var<storage, read> input : T3;
@binding(1) @group(0) var<storage, read_write> output : T;
fn f2(p : ${ptr('T2')}) {
(*p)[1] = input;
}
fn f1(p : ${ptr('T1')}) {
f2(&(*p)[0]);
f2(&(*p)[2]);
}
fn f0(p : ${ptr('T')}) {
f1(&(*p)[0]);
}
${main}
`;
const input = new Uint32Array([1, 2, 3, 4]);
const expected = new Uint32Array([
/* [0][0][0] */0, 0, 0, 0,
/* [0][0][1] */1, 2, 3, 4,
/* [0][1][0] */0, 0, 0, 0,
/* [0][1][1] */0, 0, 0, 0,
/* [0][2][0] */0, 0, 0, 0,
/* [0][2][1] */1, 2, 3, 4,
/* [1][0][0] */0, 0, 0, 0,
/* [1][0][1] */0, 0, 0, 0,
/* [1][1][0] */0, 0, 0, 0,
/* [1][1][1] */0, 0, 0, 0,
/* [1][2][0] */0, 0, 0, 0,
/* [1][2][1] */0, 0, 0, 0]
);
run(t, wgsl, 'storage', input, expected);
});
g.test('mixed_ptr_parameters').
desc('Test that functions can accept multiple, mixed pointer parameters').
fn((t) => {
t.skipIfLanguageFeatureNotSupported('unrestricted_pointer_parameters');
const wgsl = `
@binding(0) @group(0) var<uniform> input : array<vec4i, 4>;
@binding(1) @group(0) var<storage, read_write> output : array<vec4i, 4>;
fn sum(f : ptr<function, i32>,
w : ptr<workgroup, atomic<i32>>,
p : ptr<private, i32>,
u : ptr<uniform, vec4i>) -> vec4i {
return vec4(*f + atomicLoad(w) + *p) + *u;
}
struct S {
i : i32,
}
var<private> P0 = S(0);
var<private> P1 = S(10);
var<private> P2 = 20;
var<private> P3 = 30;
struct T {
i : atomic<i32>,
}
var<workgroup> W0 : T;
var<workgroup> W1 : atomic<i32>;
var<workgroup> W2 : T;
var<workgroup> W3 : atomic<i32>;
@compute @workgroup_size(1)
fn main() {
atomicStore(&W0.i, 0);
atomicStore(&W1, 100);
atomicStore(&W2.i, 200);
atomicStore(&W3, 300);
var F = array(0, 1000, 2000, 3000);
output[0] = sum(&F[2], &W3, &P1.i, &input[0]); // vec4(2310) + vec4(1, 2, 3, 4)
output[1] = sum(&F[1], &W2.i, &P0.i, &input[1]); // vec4(1200) + vec4(4, 3, 2, 1)
output[2] = sum(&F[3], &W0.i, &P3, &input[2]); // vec4(3030) + vec4(2, 4, 1, 3)
output[3] = sum(&F[2], &W1, &P2, &input[3]); // vec4(2120) + vec4(4, 1, 2, 3)
}
`;
const input = new Uint32Array([
/* [0] */1, 2, 3, 4,
/* [1] */4, 3, 2, 1,
/* [2] */2, 4, 1, 3,
/* [3] */4, 1, 2, 3]
);
const expected = new Uint32Array([
/* [0] */2311, 2312, 2313, 2314,
/* [1] */1204, 1203, 1202, 1201,
/* [2] */3032, 3034, 3031, 3033,
/* [3] */2124, 2121, 2122, 2123]
);
run(t, wgsl, 'uniform', input, expected);
});

View file

@ -32,6 +32,9 @@ export const allInputSources = ['const', 'uniform', 'storage_r', 'storage_rw'];
/** Just constant input source */
export const onlyConstInputSource = ['const'];
/** All input sources except const */
export const allButConstInputSource = ['uniform', 'storage_r', 'storage_rw'];
/** Configuration for running a expression test */
@ -47,9 +50,9 @@ export const onlyConstInputSource = ['const'];
// Helper for returning the stride for a given Type
function valueStride(ty) {
// AbstractFloats are passed out of the shader via a struct of 2x u32s and
// unpacking containers as arrays
if (scalarTypeOf(ty).kind === 'abstract-float') {
// AbstractFloats and AbstractInts are passed out of the shader via structs of
// 2x u32s and unpacking containers as arrays
if (scalarTypeOf(ty).kind === 'abstract-float' || scalarTypeOf(ty).kind === 'abstract-int') {
if (ty instanceof ScalarType) {
return 16;
}
@ -139,7 +142,7 @@ function valueStride(ty) {
return 16;
}
// Helper for summing up all of the stride values for an array of Types
// Helper for summing up all the stride values for an array of Types
function valueStrides(tys) {
return tys.map(valueStride).reduce((sum, c) => sum + c);
}
@ -148,6 +151,7 @@ function valueStrides(tys) {
function storageType(ty) {
if (ty instanceof ScalarType) {
assert(ty.kind !== 'f64', `No storage type defined for 'f64' values`);
assert(ty.kind !== 'abstract-int', `Custom handling is implemented for 'abstract-int' values`);
assert(
ty.kind !== 'abstract-float',
`Custom handling is implemented for 'abstract-float' values`
@ -165,16 +169,21 @@ function storageType(ty) {
// Helper for converting a value of the type 'ty' from the storage type.
function fromStorage(ty, expr) {
if (ty instanceof ScalarType) {
assert(ty.kind !== 'abstract-float', `AbstractFloat values should not be in input storage`);
assert(ty.kind !== 'abstract-int', `'abstract-int' values should not be in input storage`);
assert(ty.kind !== 'abstract-float', `'abstract-float' values should not be in input storage`);
assert(ty.kind !== 'f64', `'No storage type defined for 'f64' values`);
if (ty.kind === 'bool') {
return `${expr} != 0u`;
}
}
if (ty instanceof VectorType) {
assert(
ty.elementType.kind !== 'abstract-int',
`'abstract-int' values cannot appear in input storage`
);
assert(
ty.elementType.kind !== 'abstract-float',
`AbstractFloat values cannot appear in input storage`
`'abstract-float' values cannot appear in input storage`
);
assert(ty.elementType.kind !== 'f64', `'No storage type defined for 'f64' values`);
if (ty.elementType.kind === 'bool') {
@ -187,9 +196,13 @@ function fromStorage(ty, expr) {
// Helper for converting a value of the type 'ty' to the storage type.
function toStorage(ty, expr) {
if (ty instanceof ScalarType) {
assert(
ty.kind !== 'abstract-int',
`'abstract-int' values have custom code for writing to storage`
);
assert(
ty.kind !== 'abstract-float',
`AbstractFloat values have custom code for writing to storage`
`'abstract-float' values have custom code for writing to storage`
);
assert(ty.kind !== 'f64', `No storage type defined for 'f64' values`);
if (ty.kind === 'bool') {
@ -197,9 +210,13 @@ function toStorage(ty, expr) {
}
}
if (ty instanceof VectorType) {
assert(
ty.elementType.kind !== 'abstract-int',
`'abstract-int' values have custom code for writing to storage`
);
assert(
ty.elementType.kind !== 'abstract-float',
`AbstractFloat values have custom code for writing to storage`
`'abstract-float' values have custom code for writing to storage`
);
assert(ty.elementType.kind !== 'f64', `'No storage type defined for 'f64' values`);
if (ty.elementType.kind === 'bool') {
@ -460,7 +477,10 @@ function map(v, fn) {
*/
function wgslOutputs(resultType, count) {
let output_struct = undefined;
if (scalarTypeOf(resultType).kind !== 'abstract-float') {
if (
scalarTypeOf(resultType).kind !== 'abstract-float' &&
scalarTypeOf(resultType).kind !== 'abstract-int')
{
output_struct = `
struct Output {
@size(${valueStride(resultType)}) value : ${storageType(resultType)}
@ -568,9 +588,13 @@ resultType,
cases,
inputSource)
{
assert(
scalarTypeOf(resultType).kind !== 'abstract-int',
`abstractIntShaderBuilder should be used when result type is 'abstract-int'`
);
assert(
scalarTypeOf(resultType).kind !== 'abstract-float',
`abstractFloatShaderBuilder should be used when result type is 'abstract-float`
`abstractFloatShaderBuilder should be used when result type is 'abstract-float'`
);
if (inputSource === 'const') {
//////////////////////////////////////////////////////////////////////////
@ -925,7 +949,7 @@ export function abstractFloatShaderBuilder(expressionBuilder) {
cases,
inputSource) =>
{
assert(inputSource === 'const', 'AbstractFloat results are only defined for const-eval');
assert(inputSource === 'const', `'abstract-float' results are only defined for const-eval`);
assert(
scalarTypeOf(resultType).kind === 'abstract-float',
`Expected resultType of 'abstract-float', received '${scalarTypeOf(resultType).kind}' instead`
@ -950,6 +974,90 @@ ${body}
};
}
/**
* @returns a string that extracts the value of an AbstractInt into an output
* destination
* @param expr expression for an AbstractInt value, if working with vectors,
* this string needs to include indexing into the container.
* @param case_idx index in the case output array to assign the result
* @param accessor string representing how access to the AbstractInt that needs
* to be operated on.
* For scalars this should be left as ''.
* For vectors this will be an indexing operation,
* i.e. '[i]'
*/
function abstractIntSnippet(expr, case_idx, accessor = '') {
// AbstractInts are i64s under the hood. WebGPU does not support
// putting i64s in buffers, or any 64-bit simple types, so the result needs to
// be split up into u32 bitfields
//
// Since there is no 64-bit data type that can be used as an element for a
// vector or a matrix in WGSL, the testing framework needs to pass the u32s
// via a struct with two u32s, and deconstruct vectors into arrays.
//
// This is complicated by the fact that user defined functions cannot
// take/return AbstractInts, and AbstractInts cannot be stored in
// variables, so the code cannot just inject a simple utility function
// at the top of the shader, instead this snippet needs to be inlined
// everywhere the test needs to return an AbstractInt.
return ` {
outputs[${case_idx}].value${accessor}.high = bitcast<u32>(i32(${expr}${accessor} >> 32)) & 0xFFFFFFFF;
const low_sign = (${expr}${accessor} & (1 << 31));
outputs[${case_idx}].value${accessor}.low = bitcast<u32>((${expr}${accessor} & 0x7FFFFFFF)) | low_sign;
}`;
}
/** @returns a string for a specific case that has a AbstractInt result */
function abstractIntCaseBody(expr, resultType, i) {
if (resultType instanceof ScalarType) {
return abstractIntSnippet(expr, i);
}
if (resultType instanceof VectorType) {
return [...Array(resultType.width).keys()].
map((idx) => abstractIntSnippet(expr, i, `[${idx}]`)).
join(' \n');
}
unreachable(`Results of type '${resultType}' not yet implemented`);
}
/**
* @returns a ShaderBuilder that builds a test shader hands AbstractInt results.
* @param expressionBuilder an expression builder that will return AbstractInts
*/
export function abstractIntShaderBuilder(expressionBuilder) {
return (
parameterTypes,
resultType,
cases,
inputSource) =>
{
assert(inputSource === 'const', `'abstract-int' results are only defined for const-eval`);
assert(
scalarTypeOf(resultType).kind === 'abstract-int',
`Expected resultType of 'abstract-int', received '${scalarTypeOf(resultType).kind}' instead`
);
const body = cases.
map((c, i) => {
const expr = `${expressionBuilder(map(c.input, (v) => v.wgsl()))}`;
return abstractIntCaseBody(expr, resultType, i);
}).
join('\n ');
return `
${wgslHeader(parameterTypes, resultType)}
${wgslOutputs(resultType, cases.length)}
@compute @workgroup_size(1)
fn main() {
${body}
}`;
};
}
/**
* Constructs and returns a GPUComputePipeline and GPUBindGroup for running a
* batch of test cases. If a pre-created pipeline can be found in

View file

@ -0,0 +1,21 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/import { abstractInt, i32, u32 } from '../../../../util/conversion.js';import { fullI32Range, fullI64Range, fullU32Range } from '../../../../util/math.js';import { makeCaseCache } from '../case_cache.js';
export const d = makeCaseCache('unary/ai_assignment', {
abstract: () => {
return fullI64Range().map((n) => {
return { input: abstractInt(n), expected: abstractInt(n) };
});
},
i32: () => {
return fullI32Range().map((n) => {
return { input: abstractInt(BigInt(n)), expected: i32(n) };
});
},
u32: () => {
return fullU32Range().map((n) => {
return { input: abstractInt(BigInt(n)), expected: u32(n) };
});
}
});

View file

@ -0,0 +1,65 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/export const description = `
Execution Tests for assignment of AbstractInts
`;import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../gpu_test.js';
import { TypeAbstractInt, TypeI32, TypeU32 } from '../../../../util/conversion.js';
import {
abstractIntShaderBuilder,
basicExpressionBuilder,
onlyConstInputSource,
run } from
'../expression.js';
import { d } from './ai_assignment.cache.js';
function concrete_assignment() {
return basicExpressionBuilder((value) => `${value}`);
}
function abstract_assignment() {
return abstractIntShaderBuilder((value) => `${value}`);
}
export const g = makeTestGroup(GPUTest);
g.test('abstract').
specURL('https://www.w3.org/TR/WGSL/#abstract-types').
desc(
`
testing that extracting abstract ints works
`
).
params((u) => u.combine('inputSource', onlyConstInputSource)).
fn(async (t) => {
const cases = await d.get('abstract');
await run(t, abstract_assignment(), [TypeAbstractInt], TypeAbstractInt, t.params, cases, 1);
});
g.test('i32').
specURL('https://www.w3.org/TR/WGSL/#i32-builtin').
desc(
`
concretizing to i32
`
).
params((u) => u.combine('inputSource', onlyConstInputSource)).
fn(async (t) => {
const cases = await d.get('i32');
await run(t, concrete_assignment(), [TypeAbstractInt], TypeI32, t.params, cases);
});
g.test('u32').
specURL('https://www.w3.org/TR/WGSL/#u32-builtin').
desc(
`
concretizing to u32
`
).
params((u) => u.combine('inputSource', onlyConstInputSource)).
fn(async (t) => {
const cases = await d.get('u32');
await run(t, concrete_assignment(), [TypeAbstractInt], TypeU32, t.params, cases);
});

View file

@ -0,0 +1,156 @@
/**
* AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts
**/export const description = `
Execution Tests for unary indirection (dereference)
`;import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { keysOf } from '../../../../../common/util/data_tables.js';
import { GPUTest } from '../../../../gpu_test.js';
import { scalarType } from '../../../../util/conversion.js';
import { sparseScalarF32Range } from '../../../../util/math.js';
import {
allButConstInputSource,
basicExpressionWithPredeclarationBuilder,
run } from
'../expression.js';
export const g = makeTestGroup(GPUTest);
// All the ways to deref an expression
const kDerefCases = {
deref_address_of_identifier: {
wgsl: '(*(&a))',
requires_pointer_composite_access: false
},
deref_pointer: {
wgsl: '(*p)',
requires_pointer_composite_access: false
},
address_of_identifier: {
wgsl: '(&a)',
requires_pointer_composite_access: true
},
pointer: {
wgsl: 'p',
requires_pointer_composite_access: true
}
};
g.test('deref').
specURL('https://www.w3.org/TR/WGSL/#indirection').
desc(
`
Expression: *e
Pointer expression dereference.
`
).
params((u) =>
u.
combine('inputSource', allButConstInputSource).
combine('vectorize', [undefined, 2, 3, 4]).
combine('scalarType', ['u32', 'i32', 'f32']).
combine('derefType', keysOf(kDerefCases)).
filter((p) => !kDerefCases[p.derefType].requires_pointer_composite_access)
).
fn(async (t) => {
const ty = scalarType(t.params.scalarType);
const cases = sparseScalarF32Range().map((e) => {
return { input: ty.create(e), expected: ty.create(e) };
});
const elemType = ty.kind;
const type = t.params.vectorize ? `vec${t.params.vectorize}<${elemType}>` : elemType;
const shaderBuilder = basicExpressionWithPredeclarationBuilder(
(value) => `get_dereferenced_value(${value})`,
`fn get_dereferenced_value(value: ${type}) -> ${type} {
var a = value;
let p = &a;
return ${kDerefCases[t.params.derefType].wgsl};
}`
);
await run(t, shaderBuilder, [ty], ty, t.params, cases);
});
g.test('deref_index').
specURL('https://www.w3.org/TR/WGSL/#logical-expr').
desc(
`
Expression: (*e)[index]
Pointer expression dereference as lhs of index accessor expression
`
).
params((u) =>
u.
combine('inputSource', allButConstInputSource).
combine('vectorize', [undefined, 2, 3, 4]).
combine('scalarType', ['i32', 'f32']).
combine('derefType', keysOf(kDerefCases))
).
fn(async (t) => {
if (
kDerefCases[t.params.derefType].requires_pointer_composite_access &&
!t.hasLanguageFeature('pointer_composite_access'))
{
return;
}
const ty = scalarType(t.params.scalarType);
const cases = sparseScalarF32Range().map((e) => {
return { input: ty.create(e), expected: ty.create(e) };
});
const elemType = ty.kind;
const type = t.params.vectorize ? `vec${t.params.vectorize}<${elemType}>` : elemType;
const shaderBuilder = basicExpressionWithPredeclarationBuilder(
(value) => `get_dereferenced_value(${value})`,
`fn get_dereferenced_value(value: ${type}) -> ${type} {
var a = array<${type}, 1>(value);
let p = &a;
return ${kDerefCases[t.params.derefType].wgsl}[0];
}`
);
await run(t, shaderBuilder, [ty], ty, t.params, cases);
});
g.test('deref_member').
specURL('https://www.w3.org/TR/WGSL/#logical-expr').
desc(
`
Expression: (*e).member
Pointer expression dereference as lhs of member accessor expression
`
).
params((u) =>
u.
combine('inputSource', allButConstInputSource).
combine('vectorize', [undefined, 2, 3, 4]).
combine('scalarType', ['i32', 'f32']).
combine('derefType', keysOf(kDerefCases))
).
fn(async (t) => {
if (
kDerefCases[t.params.derefType].requires_pointer_composite_access &&
!t.hasLanguageFeature('pointer_composite_access'))
{
return;
}
const ty = scalarType(t.params.scalarType);
const cases = sparseScalarF32Range().map((e) => {
return { input: ty.create(e), expected: ty.create(e) };
});
const elemType = ty.kind;
const type = t.params.vectorize ? `vec${t.params.vectorize}<${elemType}>` : elemType;
const shaderBuilder = basicExpressionWithPredeclarationBuilder(
(value) => `get_dereferenced_value(${value})`,
`struct S {
m : ${type}
}
fn get_dereferenced_value(value: ${type}) -> ${type} {
var a = S(value);
let p = &a;
return ${kDerefCases[t.params.derefType].wgsl}.m;
}`
);
await run(t, shaderBuilder, [ty], ty, t.params, cases);
});

Some files were not shown because too many files have changed in this diff Show more