webgpu: Implement proper async pipeline creation and GPUPipelineError (#32636)

* Add GPUPipelineError

Signed-off-by: sagudev <16504129+sagudev@users.noreply.github.com>

* Proper GetBindGroupLayout

Signed-off-by: sagudev <16504129+sagudev@users.noreply.github.com>

* Proper Create*PipelineAsync

Signed-off-by: sagudev <16504129+sagudev@users.noreply.github.com>

* Expectations

Signed-off-by: sagudev <16504129+sagudev@users.noreply.github.com>

* fixups

Signed-off-by: sagudev <16504129+sagudev@users.noreply.github.com>

* more good expectations

Signed-off-by: sagudev <16504129+sagudev@users.noreply.github.com>

---------

Signed-off-by: sagudev <16504129+sagudev@users.noreply.github.com>
This commit is contained in:
Samson 2024-08-08 13:48:43 +02:00 committed by GitHub
parent 08eb4faf4d
commit b8cf0cf9af
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 465 additions and 790 deletions

View file

@ -132,11 +132,11 @@ impl DOMException {
) )
} }
fn new_inherited(message_: DOMString, name_: DOMString) -> DOMException { pub fn new_inherited(message: DOMString, name: DOMString) -> DOMException {
DOMException { DOMException {
reflector_: Reflector::new(), reflector_: Reflector::new(),
message: message_, message,
name: name_, name,
} }
} }

View file

@ -2,14 +2,12 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use std::string::String;
use dom_struct::dom_struct; use dom_struct::dom_struct;
use webgpu::{WebGPU, WebGPUBindGroupLayout, WebGPUComputePipeline, WebGPURequest}; use webgpu::{WebGPU, WebGPUBindGroupLayout, WebGPUComputePipeline, WebGPURequest};
use crate::dom::bindings::cell::DomRefCell; use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::WebGPUBinding::GPUComputePipelineMethods; use crate::dom::bindings::codegen::Bindings::WebGPUBinding::GPUComputePipelineMethods;
use crate::dom::bindings::error::{Error, Fallible}; use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::reflector::{reflect_dom_object, DomObject, Reflector}; use crate::dom::bindings::reflector::{reflect_dom_object, DomObject, Reflector};
use crate::dom::bindings::root::{Dom, DomRoot}; use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::str::USVString; use crate::dom::bindings::str::USVString;
@ -26,43 +24,34 @@ pub struct GPUComputePipeline {
label: DomRefCell<USVString>, label: DomRefCell<USVString>,
#[no_trace] #[no_trace]
compute_pipeline: WebGPUComputePipeline, compute_pipeline: WebGPUComputePipeline,
#[no_trace]
bind_group_layouts: Vec<WebGPUBindGroupLayout>,
device: Dom<GPUDevice>, device: Dom<GPUDevice>,
} }
impl GPUComputePipeline { impl GPUComputePipeline {
fn new_inherited( fn new_inherited(
channel: WebGPU,
compute_pipeline: WebGPUComputePipeline, compute_pipeline: WebGPUComputePipeline,
label: USVString, label: USVString,
bgls: Vec<WebGPUBindGroupLayout>,
device: &GPUDevice, device: &GPUDevice,
) -> Self { ) -> Self {
Self { Self {
reflector_: Reflector::new(), reflector_: Reflector::new(),
channel, channel: device.channel(),
label: DomRefCell::new(label), label: DomRefCell::new(label),
compute_pipeline, compute_pipeline,
bind_group_layouts: bgls,
device: Dom::from_ref(device), device: Dom::from_ref(device),
} }
} }
pub fn new( pub fn new(
global: &GlobalScope, global: &GlobalScope,
channel: WebGPU,
compute_pipeline: WebGPUComputePipeline, compute_pipeline: WebGPUComputePipeline,
label: USVString, label: USVString,
bgls: Vec<WebGPUBindGroupLayout>,
device: &GPUDevice, device: &GPUDevice,
) -> DomRoot<Self> { ) -> DomRoot<Self> {
reflect_dom_object( reflect_dom_object(
Box::new(GPUComputePipeline::new_inherited( Box::new(GPUComputePipeline::new_inherited(
channel,
compute_pipeline, compute_pipeline,
label, label,
bgls,
device, device,
)), )),
global, global,
@ -89,13 +78,28 @@ impl GPUComputePipelineMethods for GPUComputePipeline {
/// <https://gpuweb.github.io/gpuweb/#dom-gpupipelinebase-getbindgrouplayout> /// <https://gpuweb.github.io/gpuweb/#dom-gpupipelinebase-getbindgrouplayout>
fn GetBindGroupLayout(&self, index: u32) -> Fallible<DomRoot<GPUBindGroupLayout>> { fn GetBindGroupLayout(&self, index: u32) -> Fallible<DomRoot<GPUBindGroupLayout>> {
if index > self.bind_group_layouts.len() as u32 { let id = self
return Err(Error::Range(String::from("Index out of bounds"))); .global()
.wgpu_id_hub()
.create_bind_group_layout_id(self.compute_pipeline.0.backend());
if let Err(e) = self
.channel
.0
.send(WebGPURequest::ComputeGetBindGroupLayout {
device_id: self.device.id().0,
pipeline_id: self.compute_pipeline.0,
index,
id,
})
{
warn!("Failed to send WebGPURequest::ComputeGetBindGroupLayout {e:?}");
} }
Ok(GPUBindGroupLayout::new( Ok(GPUBindGroupLayout::new(
&self.global(), &self.global(),
self.channel.clone(), self.channel.clone(),
self.bind_group_layouts[index as usize], WebGPUBindGroupLayout(id),
USVString::default(), USVString::default(),
)) ))
} }

View file

@ -14,15 +14,21 @@ use std::sync::{Arc, Mutex};
use dom_struct::dom_struct; use dom_struct::dom_struct;
use js::jsapi::{Heap, JSObject}; use js::jsapi::{Heap, JSObject};
use webgpu::wgc::id::{BindGroupLayoutId, PipelineLayoutId}; use webgpu::wgc::id::{BindGroupLayoutId, PipelineLayoutId};
use webgpu::wgc::pipeline::RenderPipelineDescriptor;
use webgpu::wgc::{ use webgpu::wgc::{
binding_model as wgpu_bind, command as wgpu_com, pipeline as wgpu_pipe, resource as wgpu_res, binding_model as wgpu_bind, command as wgpu_com, pipeline as wgpu_pipe, resource as wgpu_res,
}; };
use webgpu::{self, wgt, PopError, WebGPU, WebGPURequest, WebGPUResponse}; use webgpu::{
self, wgt, PopError, WebGPU, WebGPUComputePipeline, WebGPURenderPipeline, WebGPURequest,
WebGPUResponse,
};
use super::bindings::codegen::Bindings::WebGPUBinding::GPUPipelineErrorReason;
use super::bindings::codegen::UnionTypes::GPUPipelineLayoutOrGPUAutoLayoutMode; use super::bindings::codegen::UnionTypes::GPUPipelineLayoutOrGPUAutoLayoutMode;
use super::bindings::error::Fallible; use super::bindings::error::Fallible;
use super::gpu::AsyncWGPUListener; use super::gpu::AsyncWGPUListener;
use super::gpudevicelostinfo::GPUDeviceLostInfo; use super::gpudevicelostinfo::GPUDeviceLostInfo;
use super::gpupipelineerror::GPUPipelineError;
use super::gpusupportedlimits::GPUSupportedLimits; use super::gpusupportedlimits::GPUSupportedLimits;
use super::types::GPUError; use super::types::GPUError;
use crate::dom::bindings::cell::DomRefCell; use crate::dom::bindings::cell::DomRefCell;
@ -214,6 +220,118 @@ impl GPUDevice {
} }
} }
fn parse_render_pipeline(
&self,
descriptor: &GPURenderPipelineDescriptor,
) -> (
Option<(PipelineLayoutId, Vec<BindGroupLayoutId>)>,
RenderPipelineDescriptor<'static>,
) {
let (layout, implicit_ids, _) = self.get_pipeline_layout_data(&descriptor.parent.layout);
let desc = wgpu_pipe::RenderPipelineDescriptor {
label: convert_label(&descriptor.parent.parent),
layout,
cache: None,
vertex: wgpu_pipe::VertexState {
stage: wgpu_pipe::ProgrammableStageDescriptor {
module: descriptor.vertex.parent.module.id().0,
entry_point: Some(Cow::Owned(descriptor.vertex.parent.entryPoint.to_string())),
constants: Cow::Owned(HashMap::new()),
zero_initialize_workgroup_memory: true,
},
buffers: Cow::Owned(
descriptor
.vertex
.buffers
.iter()
.map(|buffer| wgpu_pipe::VertexBufferLayout {
array_stride: buffer.arrayStride,
step_mode: match buffer.stepMode {
GPUVertexStepMode::Vertex => wgt::VertexStepMode::Vertex,
GPUVertexStepMode::Instance => wgt::VertexStepMode::Instance,
},
attributes: Cow::Owned(
buffer
.attributes
.iter()
.map(|att| wgt::VertexAttribute {
format: convert_vertex_format(att.format),
offset: att.offset,
shader_location: att.shaderLocation,
})
.collect::<Vec<_>>(),
),
})
.collect::<Vec<_>>(),
),
},
fragment: descriptor
.fragment
.as_ref()
.map(|stage| wgpu_pipe::FragmentState {
stage: wgpu_pipe::ProgrammableStageDescriptor {
module: stage.parent.module.id().0,
entry_point: Some(Cow::Owned(stage.parent.entryPoint.to_string())),
constants: Cow::Owned(HashMap::new()),
zero_initialize_workgroup_memory: true,
},
targets: Cow::Owned(
stage
.targets
.iter()
.map(|state| {
Some(wgt::ColorTargetState {
format: convert_texture_format(state.format),
write_mask: wgt::ColorWrites::from_bits_retain(state.writeMask),
blend: state.blend.as_ref().map(|blend| wgt::BlendState {
color: convert_blend_component(&blend.color),
alpha: convert_blend_component(&blend.alpha),
}),
})
})
.collect::<Vec<_>>(),
),
}),
primitive: convert_primitive_state(&descriptor.primitive),
depth_stencil: descriptor.depthStencil.as_ref().map(|dss_desc| {
wgt::DepthStencilState {
format: convert_texture_format(dss_desc.format),
depth_write_enabled: dss_desc.depthWriteEnabled,
depth_compare: convert_compare_function(dss_desc.depthCompare),
stencil: wgt::StencilState {
front: wgt::StencilFaceState {
compare: convert_compare_function(dss_desc.stencilFront.compare),
fail_op: convert_stencil_op(dss_desc.stencilFront.failOp),
depth_fail_op: convert_stencil_op(dss_desc.stencilFront.depthFailOp),
pass_op: convert_stencil_op(dss_desc.stencilFront.passOp),
},
back: wgt::StencilFaceState {
compare: convert_compare_function(dss_desc.stencilBack.compare),
fail_op: convert_stencil_op(dss_desc.stencilBack.failOp),
depth_fail_op: convert_stencil_op(dss_desc.stencilBack.depthFailOp),
pass_op: convert_stencil_op(dss_desc.stencilBack.passOp),
},
read_mask: dss_desc.stencilReadMask,
write_mask: dss_desc.stencilWriteMask,
},
bias: wgt::DepthBiasState {
constant: dss_desc.depthBias,
slope_scale: *dss_desc.depthBiasSlopeScale,
clamp: *dss_desc.depthBiasClamp,
},
}
}),
multisample: wgt::MultisampleState {
count: descriptor.multisample.count,
mask: descriptor.multisample.mask as u64,
alpha_to_coverage_enabled: descriptor.multisample.alphaToCoverageEnabled,
},
multiview: None,
};
(implicit_ids, desc)
}
/// <https://gpuweb.github.io/gpuweb/#lose-the-device> /// <https://gpuweb.github.io/gpuweb/#lose-the-device>
pub fn lose(&self, reason: GPUDeviceLostReason, msg: String) { pub fn lose(&self, reason: GPUDeviceLostReason, msg: String) {
let lost_promise = &(*self.lost_promise.borrow()); let lost_promise = &(*self.lost_promise.borrow());
@ -564,7 +682,7 @@ impl GPUDeviceMethods for GPUDevice {
.wgpu_id_hub() .wgpu_id_hub()
.create_compute_pipeline_id(self.device.0.backend()); .create_compute_pipeline_id(self.device.0.backend());
let (layout, implicit_ids, bgls) = self.get_pipeline_layout_data(&descriptor.parent.layout); let (layout, implicit_ids, _) = self.get_pipeline_layout_data(&descriptor.parent.layout);
let desc = wgpu_pipe::ComputePipelineDescriptor { let desc = wgpu_pipe::ComputePipelineDescriptor {
label: convert_label(&descriptor.parent.parent), label: convert_label(&descriptor.parent.parent),
@ -585,16 +703,15 @@ impl GPUDeviceMethods for GPUDevice {
compute_pipeline_id, compute_pipeline_id,
descriptor: desc, descriptor: desc,
implicit_ids, implicit_ids,
async_sender: None,
}) })
.expect("Failed to create WebGPU ComputePipeline"); .expect("Failed to create WebGPU ComputePipeline");
let compute_pipeline = webgpu::WebGPUComputePipeline(compute_pipeline_id); let compute_pipeline = webgpu::WebGPUComputePipeline(compute_pipeline_id);
GPUComputePipeline::new( GPUComputePipeline::new(
&self.global(), &self.global(),
self.channel.clone(),
compute_pipeline, compute_pipeline,
descriptor.parent.parent.label.clone().unwrap_or_default(), descriptor.parent.parent.label.clone().unwrap_or_default(),
bgls,
self, self,
) )
} }
@ -606,7 +723,36 @@ impl GPUDeviceMethods for GPUDevice {
comp: InRealm, comp: InRealm,
) -> Rc<Promise> { ) -> Rc<Promise> {
let promise = Promise::new_in_current_realm(comp); let promise = Promise::new_in_current_realm(comp);
promise.resolve_native(&self.CreateComputePipeline(descriptor)); let sender = response_async(&promise, self);
let compute_pipeline_id = self
.global()
.wgpu_id_hub()
.create_compute_pipeline_id(self.device.0.backend());
let (layout, implicit_ids, _) = self.get_pipeline_layout_data(&descriptor.parent.layout);
let desc = wgpu_pipe::ComputePipelineDescriptor {
label: convert_label(&descriptor.parent.parent),
layout,
stage: wgpu_pipe::ProgrammableStageDescriptor {
module: descriptor.compute.module.id().0,
entry_point: Some(Cow::Owned(descriptor.compute.entryPoint.to_string())),
constants: Cow::Owned(HashMap::new()),
zero_initialize_workgroup_memory: true,
},
cache: None,
};
self.channel
.0
.send(WebGPURequest::CreateComputePipeline {
device_id: self.device.0,
compute_pipeline_id,
descriptor: desc,
implicit_ids,
async_sender: Some(sender),
})
.expect("Failed to create WebGPU ComputePipeline");
promise promise
} }
@ -747,129 +893,7 @@ impl GPUDeviceMethods for GPUDevice {
&self, &self,
descriptor: &GPURenderPipelineDescriptor, descriptor: &GPURenderPipelineDescriptor,
) -> DomRoot<GPURenderPipeline> { ) -> DomRoot<GPURenderPipeline> {
let mut valid = true; let (implicit_ids, desc) = self.parse_render_pipeline(&descriptor);
let (layout, implicit_ids, bgls) = self.get_pipeline_layout_data(&descriptor.parent.layout);
let desc = if valid {
Some(wgpu_pipe::RenderPipelineDescriptor {
label: convert_label(&descriptor.parent.parent),
layout,
cache: None,
vertex: wgpu_pipe::VertexState {
stage: wgpu_pipe::ProgrammableStageDescriptor {
module: descriptor.vertex.parent.module.id().0,
entry_point: Some(Cow::Owned(
descriptor.vertex.parent.entryPoint.to_string(),
)),
constants: Cow::Owned(HashMap::new()),
zero_initialize_workgroup_memory: true,
},
buffers: Cow::Owned(
descriptor
.vertex
.buffers
.iter()
.map(|buffer| wgpu_pipe::VertexBufferLayout {
array_stride: buffer.arrayStride,
step_mode: match buffer.stepMode {
GPUVertexStepMode::Vertex => wgt::VertexStepMode::Vertex,
GPUVertexStepMode::Instance => wgt::VertexStepMode::Instance,
},
attributes: Cow::Owned(
buffer
.attributes
.iter()
.map(|att| wgt::VertexAttribute {
format: convert_vertex_format(att.format),
offset: att.offset,
shader_location: att.shaderLocation,
})
.collect::<Vec<_>>(),
),
})
.collect::<Vec<_>>(),
),
},
fragment: descriptor
.fragment
.as_ref()
.map(|stage| wgpu_pipe::FragmentState {
stage: wgpu_pipe::ProgrammableStageDescriptor {
module: stage.parent.module.id().0,
entry_point: Some(Cow::Owned(stage.parent.entryPoint.to_string())),
constants: Cow::Owned(HashMap::new()),
zero_initialize_workgroup_memory: true,
},
targets: Cow::Owned(
stage
.targets
.iter()
.map(|state| {
Some(wgt::ColorTargetState {
format: convert_texture_format(state.format),
write_mask: match wgt::ColorWrites::from_bits(
state.writeMask,
) {
Some(mask) => mask,
None => {
valid = false;
wgt::ColorWrites::empty()
},
},
blend: state.blend.as_ref().map(|blend| wgt::BlendState {
color: convert_blend_component(&blend.color),
alpha: convert_blend_component(&blend.alpha),
}),
})
})
.collect::<Vec<_>>(),
),
}),
primitive: convert_primitive_state(&descriptor.primitive),
depth_stencil: descriptor.depthStencil.as_ref().map(|dss_desc| {
wgt::DepthStencilState {
format: convert_texture_format(dss_desc.format),
depth_write_enabled: dss_desc.depthWriteEnabled,
depth_compare: convert_compare_function(dss_desc.depthCompare),
stencil: wgt::StencilState {
front: wgt::StencilFaceState {
compare: convert_compare_function(dss_desc.stencilFront.compare),
fail_op: convert_stencil_op(dss_desc.stencilFront.failOp),
depth_fail_op: convert_stencil_op(
dss_desc.stencilFront.depthFailOp,
),
pass_op: convert_stencil_op(dss_desc.stencilFront.passOp),
},
back: wgt::StencilFaceState {
compare: convert_compare_function(dss_desc.stencilBack.compare),
fail_op: convert_stencil_op(dss_desc.stencilBack.failOp),
depth_fail_op: convert_stencil_op(dss_desc.stencilBack.depthFailOp),
pass_op: convert_stencil_op(dss_desc.stencilBack.passOp),
},
read_mask: dss_desc.stencilReadMask,
write_mask: dss_desc.stencilWriteMask,
},
bias: wgt::DepthBiasState {
constant: dss_desc.depthBias,
slope_scale: *dss_desc.depthBiasSlopeScale,
clamp: *dss_desc.depthBiasClamp,
},
}
}),
multisample: wgt::MultisampleState {
count: descriptor.multisample.count,
mask: descriptor.multisample.mask as u64,
alpha_to_coverage_enabled: descriptor.multisample.alphaToCoverageEnabled,
},
multiview: None,
})
} else {
self.dispatch_error(webgpu::Error::Validation(String::from(
"Invalid GPUColorWriteFlags",
)));
None
};
let render_pipeline_id = self let render_pipeline_id = self
.global() .global()
@ -883,6 +907,7 @@ impl GPUDeviceMethods for GPUDevice {
render_pipeline_id, render_pipeline_id,
descriptor: desc, descriptor: desc,
implicit_ids, implicit_ids,
async_sender: None,
}) })
.expect("Failed to create WebGPU render pipeline"); .expect("Failed to create WebGPU render pipeline");
@ -892,7 +917,6 @@ impl GPUDeviceMethods for GPUDevice {
&self.global(), &self.global(),
render_pipeline, render_pipeline,
descriptor.parent.parent.label.clone().unwrap_or_default(), descriptor.parent.parent.label.clone().unwrap_or_default(),
bgls,
self, self,
) )
} }
@ -904,7 +928,26 @@ impl GPUDeviceMethods for GPUDevice {
comp: InRealm, comp: InRealm,
) -> Rc<Promise> { ) -> Rc<Promise> {
let promise = Promise::new_in_current_realm(comp); let promise = Promise::new_in_current_realm(comp);
promise.resolve_native(&self.CreateRenderPipeline(descriptor)); let (implicit_ids, desc) = self.parse_render_pipeline(&descriptor);
let sender = response_async(&promise, self);
let render_pipeline_id = self
.global()
.wgpu_id_hub()
.create_render_pipeline_id(self.device.0.backend());
self.channel
.0
.send(WebGPURequest::CreateRenderPipeline {
device_id: self.device.0,
render_pipeline_id,
descriptor: desc,
implicit_ids,
async_sender: Some(sender),
})
.expect("Failed to create WebGPU render pipeline");
promise promise
} }
@ -1010,6 +1053,48 @@ impl AsyncWGPUListener for GPUDevice {
promise.resolve_native(&error); promise.resolve_native(&error);
}, },
}, },
WebGPUResponse::ComputePipeline(result) => match result {
Ok(pipeline) => promise.resolve_native(&GPUComputePipeline::new(
&self.global(),
WebGPUComputePipeline(pipeline.id),
pipeline.label.into(),
self,
)),
Err(webgpu::Error::Validation(msg)) => {
promise.reject_native(&GPUPipelineError::new(
&self.global(),
msg.into(),
GPUPipelineErrorReason::Validation,
))
},
Err(webgpu::Error::OutOfMemory(msg) | webgpu::Error::Internal(msg)) => promise
.reject_native(&GPUPipelineError::new(
&self.global(),
msg.into(),
GPUPipelineErrorReason::Internal,
)),
},
WebGPUResponse::RenderPipeline(result) => match result {
Ok(pipeline) => promise.resolve_native(&GPURenderPipeline::new(
&self.global(),
WebGPURenderPipeline(pipeline.id),
pipeline.label.into(),
self,
)),
Err(webgpu::Error::Validation(msg)) => {
promise.reject_native(&GPUPipelineError::new(
&self.global(),
msg.into(),
GPUPipelineErrorReason::Validation,
))
},
Err(webgpu::Error::OutOfMemory(msg) | webgpu::Error::Internal(msg)) => promise
.reject_native(&GPUPipelineError::new(
&self.global(),
msg.into(),
GPUPipelineErrorReason::Internal,
)),
},
_ => unreachable!("Wrong response received on AsyncWGPUListener for GPUDevice"), _ => unreachable!("Wrong response received on AsyncWGPUListener for GPUDevice"),
} }
} }

View file

@ -0,0 +1,70 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use dom_struct::dom_struct;
use js::rust::HandleObject;
use super::bindings::codegen::Bindings::WebGPUBinding::{
GPUPipelineErrorInit, GPUPipelineErrorMethods, GPUPipelineErrorReason,
};
use crate::dom::bindings::reflector::reflect_dom_object_with_proto;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::domexception::DOMException;
use crate::dom::globalscope::GlobalScope;
/// <https://gpuweb.github.io/gpuweb/#gpupipelineerror>
#[dom_struct]
pub struct GPUPipelineError {
exception: DOMException,
reason: GPUPipelineErrorReason,
}
impl GPUPipelineError {
fn new_inherited(message: DOMString, reason: GPUPipelineErrorReason) -> Self {
Self {
exception: DOMException::new_inherited(message, "GPUPipelineError".into()),
reason,
}
}
pub fn new_with_proto(
global: &GlobalScope,
proto: Option<HandleObject>,
message: DOMString,
reason: GPUPipelineErrorReason,
) -> DomRoot<Self> {
reflect_dom_object_with_proto(
Box::new(Self::new_inherited(message, reason)),
global,
proto,
)
}
pub fn new(
global: &GlobalScope,
message: DOMString,
reason: GPUPipelineErrorReason,
) -> DomRoot<Self> {
Self::new_with_proto(global, None, message, reason)
}
/// <https://gpuweb.github.io/gpuweb/#dom-gpupipelineerror-constructor>
#[allow(non_snake_case)]
pub fn Constructor(
global: &GlobalScope,
proto: Option<HandleObject>,
message: DOMString,
options: &GPUPipelineErrorInit,
) -> DomRoot<Self> {
Self::new_with_proto(global, proto, message, options.reason)
}
}
impl GPUPipelineErrorMethods for GPUPipelineError {
/// <https://gpuweb.github.io/gpuweb/#dom-gpupipelineerror-reason>
fn Reason(&self) -> GPUPipelineErrorReason {
self.reason
}
}

View file

@ -2,14 +2,12 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use std::string::String;
use dom_struct::dom_struct; use dom_struct::dom_struct;
use webgpu::{WebGPU, WebGPUBindGroupLayout, WebGPURenderPipeline, WebGPURequest}; use webgpu::{WebGPU, WebGPUBindGroupLayout, WebGPURenderPipeline, WebGPURequest};
use crate::dom::bindings::cell::DomRefCell; use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::WebGPUBinding::GPURenderPipelineMethods; use crate::dom::bindings::codegen::Bindings::WebGPUBinding::GPURenderPipelineMethods;
use crate::dom::bindings::error::{Error, Fallible}; use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::reflector::{reflect_dom_object, DomObject, Reflector}; use crate::dom::bindings::reflector::{reflect_dom_object, DomObject, Reflector};
use crate::dom::bindings::root::{Dom, DomRoot}; use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::str::USVString; use crate::dom::bindings::str::USVString;
@ -26,8 +24,6 @@ pub struct GPURenderPipeline {
label: DomRefCell<USVString>, label: DomRefCell<USVString>,
#[no_trace] #[no_trace]
render_pipeline: WebGPURenderPipeline, render_pipeline: WebGPURenderPipeline,
#[no_trace]
bind_group_layouts: Vec<WebGPUBindGroupLayout>,
device: Dom<GPUDevice>, device: Dom<GPUDevice>,
} }
@ -35,7 +31,6 @@ impl GPURenderPipeline {
fn new_inherited( fn new_inherited(
render_pipeline: WebGPURenderPipeline, render_pipeline: WebGPURenderPipeline,
label: USVString, label: USVString,
bgls: Vec<WebGPUBindGroupLayout>,
device: &GPUDevice, device: &GPUDevice,
) -> Self { ) -> Self {
Self { Self {
@ -43,7 +38,6 @@ impl GPURenderPipeline {
channel: device.channel(), channel: device.channel(),
label: DomRefCell::new(label), label: DomRefCell::new(label),
render_pipeline, render_pipeline,
bind_group_layouts: bgls,
device: Dom::from_ref(device), device: Dom::from_ref(device),
} }
} }
@ -52,14 +46,12 @@ impl GPURenderPipeline {
global: &GlobalScope, global: &GlobalScope,
render_pipeline: WebGPURenderPipeline, render_pipeline: WebGPURenderPipeline,
label: USVString, label: USVString,
bgls: Vec<WebGPUBindGroupLayout>,
device: &GPUDevice, device: &GPUDevice,
) -> DomRoot<Self> { ) -> DomRoot<Self> {
reflect_dom_object( reflect_dom_object(
Box::new(GPURenderPipeline::new_inherited( Box::new(GPURenderPipeline::new_inherited(
render_pipeline, render_pipeline,
label, label,
bgls,
device, device,
)), )),
global, global,
@ -86,13 +78,28 @@ impl GPURenderPipelineMethods for GPURenderPipeline {
/// <https://gpuweb.github.io/gpuweb/#dom-gpupipelinebase-getbindgrouplayout> /// <https://gpuweb.github.io/gpuweb/#dom-gpupipelinebase-getbindgrouplayout>
fn GetBindGroupLayout(&self, index: u32) -> Fallible<DomRoot<GPUBindGroupLayout>> { fn GetBindGroupLayout(&self, index: u32) -> Fallible<DomRoot<GPUBindGroupLayout>> {
if index > self.bind_group_layouts.len() as u32 { let id = self
return Err(Error::Range(String::from("Index out of bounds"))); .global()
.wgpu_id_hub()
.create_bind_group_layout_id(self.render_pipeline.0.backend());
if let Err(e) = self
.channel
.0
.send(WebGPURequest::RenderGetBindGroupLayout {
device_id: self.device.id().0,
pipeline_id: self.render_pipeline.0,
index,
id,
})
{
warn!("Failed to send WebGPURequest::RenderGetBindGroupLayout {e:?}");
} }
Ok(GPUBindGroupLayout::new( Ok(GPUBindGroupLayout::new(
&self.global(), &self.global(),
self.channel.clone(), self.channel.clone(),
self.bind_group_layouts[index as usize], WebGPUBindGroupLayout(id),
USVString::default(), USVString::default(),
)) ))
} }

View file

@ -346,6 +346,7 @@ pub mod gpuerror;
pub mod gpuinternalerror; pub mod gpuinternalerror;
pub mod gpumapmode; pub mod gpumapmode;
pub mod gpuoutofmemoryerror; pub mod gpuoutofmemoryerror;
pub mod gpupipelineerror;
pub mod gpupipelinelayout; pub mod gpupipelinelayout;
pub mod gpuqueryset; pub mod gpuqueryset;
pub mod gpuqueue; pub mod gpuqueue;

View file

@ -518,6 +518,21 @@ interface GPUCompilationInfo {
readonly attribute any messages; readonly attribute any messages;
}; };
[Exposed=(Window, Worker), Pref="dom.webgpu.enabled"]
interface GPUPipelineError : DOMException {
constructor(optional DOMString message = "", GPUPipelineErrorInit options);
readonly attribute GPUPipelineErrorReason reason;
};
dictionary GPUPipelineErrorInit {
required GPUPipelineErrorReason reason;
};
enum GPUPipelineErrorReason {
"validation",
"internal",
};
enum GPUAutoLayoutMode { enum GPUAutoLayoutMode {
"auto" "auto"
}; };

View file

@ -103,6 +103,8 @@ pub enum WebGPURequest {
compute_pipeline_id: id::ComputePipelineId, compute_pipeline_id: id::ComputePipelineId,
descriptor: ComputePipelineDescriptor<'static>, descriptor: ComputePipelineDescriptor<'static>,
implicit_ids: Option<(id::PipelineLayoutId, Vec<id::BindGroupLayoutId>)>, implicit_ids: Option<(id::PipelineLayoutId, Vec<id::BindGroupLayoutId>)>,
/// present only on ASYNC versions
async_sender: Option<IpcSender<WebGPUResponse>>,
}, },
CreateContext(IpcSender<ExternalImageId>), CreateContext(IpcSender<ExternalImageId>),
CreatePipelineLayout { CreatePipelineLayout {
@ -113,8 +115,10 @@ pub enum WebGPURequest {
CreateRenderPipeline { CreateRenderPipeline {
device_id: id::DeviceId, device_id: id::DeviceId,
render_pipeline_id: id::RenderPipelineId, render_pipeline_id: id::RenderPipelineId,
descriptor: Option<RenderPipelineDescriptor<'static>>, descriptor: RenderPipelineDescriptor<'static>,
implicit_ids: Option<(id::PipelineLayoutId, Vec<id::BindGroupLayoutId>)>, implicit_ids: Option<(id::PipelineLayoutId, Vec<id::BindGroupLayoutId>)>,
/// present only on ASYNC versions
async_sender: Option<IpcSender<WebGPUResponse>>,
}, },
CreateSampler { CreateSampler {
device_id: id::DeviceId, device_id: id::DeviceId,
@ -301,4 +305,16 @@ pub enum WebGPURequest {
device_id: id::DeviceId, device_id: id::DeviceId,
sender: IpcSender<WebGPUResponse>, sender: IpcSender<WebGPUResponse>,
}, },
ComputeGetBindGroupLayout {
device_id: id::DeviceId,
pipeline_id: id::ComputePipelineId,
index: u32,
id: id::BindGroupLayoutId,
},
RenderGetBindGroupLayout {
device_id: id::DeviceId,
pipeline_id: id::RenderPipelineId,
index: u32,
id: id::BindGroupLayoutId,
},
} }

View file

@ -6,6 +6,7 @@
use ipc_channel::ipc::IpcSharedMemory; use ipc_channel::ipc::IpcSharedMemory;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use wgc::id;
use wgc::pipeline::CreateShaderModuleError; use wgc::pipeline::CreateShaderModuleError;
use wgpu_core::instance::{RequestAdapterError, RequestDeviceError}; use wgpu_core::instance::{RequestAdapterError, RequestDeviceError};
use wgpu_core::resource::BufferAccessError; use wgpu_core::resource::BufferAccessError;
@ -65,6 +66,12 @@ pub struct Adapter {
pub channel: WebGPU, pub channel: WebGPU,
} }
#[derive(Debug, Deserialize, Serialize)]
pub struct Pipeline<T: std::fmt::Debug + Serialize> {
pub id: T,
pub label: String,
}
#[derive(Debug, Deserialize, Serialize)] #[derive(Debug, Deserialize, Serialize)]
#[allow(clippy::large_enum_variant)] #[allow(clippy::large_enum_variant)]
pub enum WebGPUResponse { pub enum WebGPUResponse {
@ -82,4 +89,6 @@ pub enum WebGPUResponse {
SubmittedWorkDone, SubmittedWorkDone,
PoppedErrorScope(Result<Option<Error>, PopError>), PoppedErrorScope(Result<Option<Error>, PopError>),
CompilationInfo(Option<ShaderCompilationInfo>), CompilationInfo(Option<ShaderCompilationInfo>),
RenderPipeline(Result<Pipeline<id::RenderPipelineId>, Error>),
ComputePipeline(Result<Pipeline<id::ComputePipelineId>, Error>),
} }

View file

@ -29,6 +29,8 @@ use wgc::pipeline::ShaderModuleDescriptor;
use wgc::resource::{BufferMapCallback, BufferMapOperation}; use wgc::resource::{BufferMapCallback, BufferMapOperation};
use wgc::{gfx_select, id}; use wgc::{gfx_select, id};
use wgpu_core::command::RenderPassDescriptor; use wgpu_core::command::RenderPassDescriptor;
use wgpu_core::device::DeviceError;
use wgpu_core::pipeline::{CreateComputePipelineError, CreateRenderPipelineError};
use wgpu_core::resource::BufferAccessResult; use wgpu_core::resource::BufferAccessResult;
use wgpu_types::MemoryHints; use wgpu_types::MemoryHints;
use wgt::InstanceDescriptor; use wgt::InstanceDescriptor;
@ -38,8 +40,8 @@ use crate::gpu_error::ErrorScope;
use crate::poll_thread::Poller; use crate::poll_thread::Poller;
use crate::render_commands::apply_render_command; use crate::render_commands::apply_render_command;
use crate::{ use crate::{
Adapter, ComputePassId, Error, PopError, PresentationData, RenderPassId, WebGPU, WebGPUAdapter, Adapter, ComputePassId, Error, Pipeline, PopError, PresentationData, RenderPassId, WebGPU,
WebGPUDevice, WebGPUMsg, WebGPUQueue, WebGPURequest, WebGPUResponse, WebGPUAdapter, WebGPUDevice, WebGPUMsg, WebGPUQueue, WebGPURequest, WebGPUResponse,
}; };
pub const PRESENTATION_BUFFER_COUNT: usize = 10; pub const PRESENTATION_BUFFER_COUNT: usize = 10;
@ -378,6 +380,7 @@ impl WGPU {
compute_pipeline_id, compute_pipeline_id,
descriptor, descriptor,
implicit_ids, implicit_ids,
async_sender: sender,
} => { } => {
let global = &self.global; let global = &self.global;
let bgls = implicit_ids let bgls = implicit_ids
@ -399,7 +402,24 @@ impl WGPU {
implicit implicit
) )
); );
self.maybe_dispatch_wgpu_error(device_id, error); if let Some(sender) = sender {
let res = match error {
// if device is lost we must return pipeline and not raise any error
Some(CreateComputePipelineError::Device(
DeviceError::Lost | DeviceError::Invalid(_),
)) |
None => Ok(Pipeline {
id: compute_pipeline_id,
label: descriptor.label.unwrap_or_default().to_string(),
}),
Some(e) => Err(Error::from_error(e)),
};
if let Err(e) = sender.send(WebGPUResponse::ComputePipeline(res)) {
warn!("Failed sending WebGPUResponse::ComputePipeline {e:?}");
}
} else {
self.maybe_dispatch_wgpu_error(device_id, error);
}
}, },
WebGPURequest::CreateContext(sender) => { WebGPURequest::CreateContext(sender) => {
let id = self let id = self
@ -426,6 +446,7 @@ impl WGPU {
render_pipeline_id, render_pipeline_id,
descriptor, descriptor,
implicit_ids, implicit_ids,
async_sender: sender,
} => { } => {
let global = &self.global; let global = &self.global;
let bgls = implicit_ids let bgls = implicit_ids
@ -440,14 +461,30 @@ impl WGPU {
root_id: *layout, root_id: *layout,
group_ids: bgls.as_slice(), group_ids: bgls.as_slice(),
}); });
if let Some(desc) = descriptor { let (_, error) = gfx_select!(render_pipeline_id =>
let (_, error) = gfx_select!(render_pipeline_id => global.device_create_render_pipeline(
global.device_create_render_pipeline( device_id,
device_id, &descriptor,
&desc, Some(render_pipeline_id),
Some(render_pipeline_id), implicit)
implicit) );
);
if let Some(sender) = sender {
let res = match error {
// if device is lost we must return pipeline and not raise any error
Some(CreateRenderPipelineError::Device(
DeviceError::Lost | DeviceError::Invalid(_),
)) |
None => Ok(Pipeline {
id: render_pipeline_id,
label: descriptor.label.unwrap_or_default().to_string(),
}),
Some(e) => Err(Error::from_error(e)),
};
if let Err(e) = sender.send(WebGPUResponse::RenderPipeline(res)) {
warn!("Failed sending WebGPUResponse::RenderPipeline {e:?}");
}
} else {
self.maybe_dispatch_wgpu_error(device_id, error); self.maybe_dispatch_wgpu_error(device_id, error);
} }
}, },
@ -1407,6 +1444,28 @@ impl WGPU {
} }
} }
}, },
WebGPURequest::ComputeGetBindGroupLayout {
device_id,
pipeline_id,
index,
id,
} => {
let global = &self.global;
let (_, error) = gfx_select!(pipeline_id =>
global.compute_pipeline_get_bind_group_layout(pipeline_id, index, Some(id)));
self.maybe_dispatch_wgpu_error(device_id, error);
},
WebGPURequest::RenderGetBindGroupLayout {
device_id,
pipeline_id,
index,
id,
} => {
let global = &self.global;
let (_, error) = gfx_select!(pipeline_id =>
global.render_pipeline_get_bind_group_layout(pipeline_id, index, Some(id)));
self.maybe_dispatch_wgpu_error(device_id, error);
},
} }
} }
} }

File diff suppressed because it is too large Load diff