Implement Async Error reporting for WebGPU and update wgpu-core

This commit is contained in:
Kunal Mohan 2020-07-07 14:37:42 +05:30
parent 809b4ae2ee
commit 0dc1514d57
18 changed files with 568 additions and 74 deletions

8
Cargo.lock generated
View file

@ -1936,7 +1936,7 @@ dependencies = [
[[package]]
name = "gfx-descriptor"
version = "0.1.0"
source = "git+https://github.com/gfx-rs/gfx-extras?rev=6387d81773e29be2220cb15186ceb875ed88303b#6387d81773e29be2220cb15186ceb875ed88303b"
source = "git+https://github.com/gfx-rs/gfx-extras?rev=473a4cdc63463e7986707507c4a7f6a3a767e329#473a4cdc63463e7986707507c4a7f6a3a767e329"
dependencies = [
"fxhash",
"gfx-hal",
@ -1956,7 +1956,7 @@ dependencies = [
[[package]]
name = "gfx-memory"
version = "0.1.3"
source = "git+https://github.com/gfx-rs/gfx-extras?rev=6387d81773e29be2220cb15186ceb875ed88303b#6387d81773e29be2220cb15186ceb875ed88303b"
source = "git+https://github.com/gfx-rs/gfx-extras?rev=473a4cdc63463e7986707507c4a7f6a3a767e329#473a4cdc63463e7986707507c4a7f6a3a767e329"
dependencies = [
"fxhash",
"gfx-hal",
@ -6675,6 +6675,7 @@ dependencies = [
"ipc-channel",
"log",
"malloc_size_of",
"msg",
"serde",
"servo_config",
"smallvec 0.6.13",
@ -6813,7 +6814,6 @@ dependencies = [
[[package]]
name = "wgpu-core"
version = "0.5.0"
source = "git+https://github.com/gfx-rs/wgpu#349c02104948fea7aee38c3e7dc197427b1f2414"
dependencies = [
"arrayvec 0.5.1",
"bitflags",
@ -6835,14 +6835,12 @@ dependencies = [
"smallvec 1.4.1",
"spirv_headers",
"tracing",
"vec_map",
"wgpu-types",
]
[[package]]
name = "wgpu-types"
version = "0.5.0"
source = "git+https://github.com/gfx-rs/wgpu#349c02104948fea7aee38c3e7dc197427b1f2414"
dependencies = [
"bitflags",
"serde",

View file

@ -33,3 +33,7 @@ mio = { git = "https://github.com/servo/mio.git", branch = "servo-mio-0.6.22" }
[patch."https://github.com/servo/webrender"]
webrender = { git = "https://github.com/jdm/webrender", branch = "crash-backtrace" }
webrender_api = { git = "https://github.com/jdm/webrender", branch = "crash-backtrace" }
[patch."https://github.com/gfx-rs/wgpu"]
wgpu-core = {path="../wgpu/wgpu-core"}
wgpu-types = {path="../wgpu/wgpu-types"}

View file

@ -154,6 +154,10 @@ DOMInterfaces = {
'GPUBuffer': {
'inRealms': ['MapAsync'],
},
'GPUDevice': {
'inRealms': ['PopErrorScope', 'Lost'],
}
}

View file

@ -5,6 +5,7 @@
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::BroadcastChannelBinding::BroadcastChannelMethods;
use crate::dom::bindings::codegen::Bindings::EventSourceBinding::EventSourceBinding::EventSourceMethods;
use crate::dom::bindings::codegen::Bindings::GPUValidationErrorBinding::GPUError;
use crate::dom::bindings::codegen::Bindings::ImageBitmapBinding::{
ImageBitmapOptions, ImageBitmapSource,
};
@ -34,6 +35,9 @@ use crate::dom::event::{Event, EventBubbles, EventCancelable, EventStatus};
use crate::dom::eventsource::EventSource;
use crate::dom::eventtarget::EventTarget;
use crate::dom::file::File;
use crate::dom::gpudevice::GPUDevice;
use crate::dom::gpuoutofmemoryerror::GPUOutOfMemoryError;
use crate::dom::gpuvalidationerror::GPUValidationError;
use crate::dom::htmlscriptelement::ScriptId;
use crate::dom::identityhub::Identities;
use crate::dom::imagebitmap::ImageBitmap;
@ -122,6 +126,7 @@ use std::sync::Arc;
use std::thread::JoinHandle;
use time::{get_time, Timespec};
use uuid::Uuid;
use webgpu::{identity::WebGPUOpResult, WebGPUDevice};
#[derive(JSTraceable)]
pub struct AutoCloseWorker {
@ -289,6 +294,9 @@ pub struct GlobalScope {
#[ignore_malloc_size_of = "defined in wgpu"]
gpu_id_hub: Arc<Mutex<Identities>>,
/// WebGPU devices
gpu_devices: DomRefCell<HashMap<WebGPUDevice, Dom<GPUDevice>>>,
// https://w3c.github.io/performance-timeline/#supportedentrytypes-attribute
#[ignore_malloc_size_of = "mozjs"]
frozen_supported_performance_entry_types: DomRefCell<Option<Heap<JSVal>>>,
@ -745,6 +753,7 @@ impl GlobalScope {
is_headless,
user_agent,
gpu_id_hub,
gpu_devices: DomRefCell::new(HashMap::new()),
frozen_supported_performance_entry_types: DomRefCell::new(Default::default()),
https_state: Cell::new(HttpsState::None),
console_group_stack: DomRefCell::new(Vec::new()),
@ -2939,6 +2948,31 @@ impl GlobalScope {
self.gpu_id_hub.clone()
}
pub fn add_gpu_device(&self, device: &GPUDevice) {
self.gpu_devices
.borrow_mut()
.insert(device.id(), Dom::from_ref(device));
}
pub fn handle_wgpu_msg(&self, device: WebGPUDevice, scope: u64, result: WebGPUOpResult) {
let result = match result {
WebGPUOpResult::Success => Ok(()),
WebGPUOpResult::ValidationError(m) => {
let val_err = GPUValidationError::new(&self, DOMString::from_string(m));
Err(GPUError::GPUValidationError(val_err))
},
WebGPUOpResult::OutOfMemoryError => {
let oom_err = GPUOutOfMemoryError::new(&self);
Err(GPUError::GPUOutOfMemoryError(oom_err))
},
};
self.gpu_devices
.borrow()
.get(&device)
.expect("GPUDevice not found")
.handle_server_msg(scope, result);
}
pub(crate) fn current_group_label(&self) -> Option<DOMString> {
self.console_group_stack
.borrow()

View file

@ -34,7 +34,7 @@ pub struct GPUAdapter {
}
impl GPUAdapter {
pub fn new_inherited(
fn new_inherited(
channel: WebGPU,
name: DOMString,
extensions: Heap<*mut JSObject>,
@ -93,6 +93,7 @@ impl GPUAdapterMethods for GPUAdapter {
.wgpu_id_hub()
.lock()
.create_device_id(self.adapter.0.backend());
let pipeline_id = self.global().pipeline_id();
if self
.channel
.0
@ -101,6 +102,7 @@ impl GPUAdapterMethods for GPUAdapter {
adapter_id: self.adapter,
descriptor: desc,
device_id: id,
pipeline_id,
})
.is_err()
{
@ -127,6 +129,7 @@ impl AsyncWGPUListener for GPUAdapter {
device_id,
queue_id,
);
self.global().add_gpu_device(&device);
promise.resolve_native(&device);
},
_ => promise.reject_error(Error::Operation),

View file

@ -24,7 +24,8 @@ use dom_struct::dom_struct;
use std::cell::Cell;
use std::collections::HashSet;
use webgpu::wgpu::command::{
ColorAttachmentDescriptor, DepthStencilAttachmentDescriptor, RenderPass, RenderPassDescriptor,
ColorAttachmentDescriptor, DepthStencilAttachmentDescriptor, LoadOp, PassChannel, RenderPass,
RenderPassDescriptor, StoreOp,
};
use webgpu::{self, wgt, WebGPU, WebGPUDevice, WebGPURequest};
@ -138,9 +139,9 @@ impl GPUCommandEncoderMethods for GPUCommandEncoder {
.iter()
.map(|color| {
let (load_op, clear_value) = match color.loadValue {
GPUColorLoad::GPULoadOp(_) => (wgt::LoadOp::Load, wgt::Color::TRANSPARENT),
GPUColorLoad::GPULoadOp(_) => (LoadOp::Load, wgt::Color::TRANSPARENT),
GPUColorLoad::DoubleSequence(ref s) => (
wgt::LoadOp::Clear,
LoadOp::Clear,
wgt::Color {
r: *s[0],
g: *s[1],
@ -149,7 +150,7 @@ impl GPUCommandEncoderMethods for GPUCommandEncoder {
},
),
GPUColorLoad::GPUColorDict(ref d) => (
wgt::LoadOp::Clear,
LoadOp::Clear,
wgt::Color {
r: *d.r,
g: *d.g,
@ -158,11 +159,11 @@ impl GPUCommandEncoderMethods for GPUCommandEncoder {
},
),
};
let channel = wgt::PassChannel {
let channel = PassChannel {
load_op,
store_op: match color.storeOp {
GPUStoreOp::Store => wgt::StoreOp::Store,
GPUStoreOp::Clear => wgt::StoreOp::Clear,
GPUStoreOp::Store => StoreOp::Store,
GPUStoreOp::Clear => StoreOp::Clear,
},
clear_value,
read_only: false,
@ -177,27 +178,27 @@ impl GPUCommandEncoderMethods for GPUCommandEncoder {
let depth_stencil = descriptor.depthStencilAttachment.as_ref().map(|depth| {
let (depth_load_op, clear_depth) = match depth.depthLoadValue {
GPULoadOpOrFloat::GPULoadOp(_) => (wgt::LoadOp::Load, 0.0f32),
GPULoadOpOrFloat::Float(f) => (wgt::LoadOp::Clear, *f),
GPULoadOpOrFloat::GPULoadOp(_) => (LoadOp::Load, 0.0f32),
GPULoadOpOrFloat::Float(f) => (LoadOp::Clear, *f),
};
let (stencil_load_op, clear_stencil) = match depth.stencilLoadValue {
GPUStencilLoadValue::GPULoadOp(_) => (wgt::LoadOp::Load, 0u32),
GPUStencilLoadValue::RangeEnforcedUnsignedLong(l) => (wgt::LoadOp::Clear, l),
GPUStencilLoadValue::GPULoadOp(_) => (LoadOp::Load, 0u32),
GPUStencilLoadValue::RangeEnforcedUnsignedLong(l) => (LoadOp::Clear, l),
};
let depth_channel = wgt::PassChannel {
let depth_channel = PassChannel {
load_op: depth_load_op,
store_op: match depth.depthStoreOp {
GPUStoreOp::Store => wgt::StoreOp::Store,
GPUStoreOp::Clear => wgt::StoreOp::Clear,
GPUStoreOp::Store => StoreOp::Store,
GPUStoreOp::Clear => StoreOp::Clear,
},
clear_value: clear_depth,
read_only: depth.depthReadOnly,
};
let stencil_channel = wgt::PassChannel {
let stencil_channel = PassChannel {
load_op: stencil_load_op,
store_op: match depth.stencilStoreOp {
GPUStoreOp::Store => wgt::StoreOp::Store,
GPUStoreOp::Clear => wgt::StoreOp::Clear,
GPUStoreOp::Store => StoreOp::Store,
GPUStoreOp::Clear => StoreOp::Clear,
},
clear_value: clear_stencil,
read_only: depth.stencilReadOnly,

View file

@ -31,7 +31,11 @@ use crate::dom::bindings::codegen::Bindings::GPUTextureBinding::{
GPUTextureDimension, GPUTextureFormat,
};
use crate::dom::bindings::codegen::Bindings::GPUTextureViewBinding::GPUTextureViewDimension;
use crate::dom::bindings::codegen::Bindings::GPUValidationErrorBinding::{
GPUError, GPUErrorFilter,
};
use crate::dom::bindings::codegen::UnionTypes::Uint32ArrayOrString::{String, Uint32Array};
use crate::dom::bindings::error::Error;
use crate::dom::bindings::reflector::{reflect_dom_object, DomObject};
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::str::DOMString;
@ -50,16 +54,29 @@ use crate::dom::gpurenderpipeline::GPURenderPipeline;
use crate::dom::gpusampler::GPUSampler;
use crate::dom::gpushadermodule::GPUShaderModule;
use crate::dom::gputexture::GPUTexture;
use crate::dom::promise::Promise;
use crate::realms::InRealm;
use crate::script_runtime::JSContext as SafeJSContext;
use arrayvec::ArrayVec;
use dom_struct::dom_struct;
use js::jsapi::{Heap, JSObject};
use std::cell::RefCell;
use std::cell::{Cell, RefCell};
use std::collections::HashMap;
use std::ptr::NonNull;
use std::rc::Rc;
use webgpu::wgpu::binding_model::BufferBinding;
use webgpu::{self, wgt, WebGPU, WebGPUBindings, WebGPURequest};
#[derive(JSTraceable, MallocSizeOf)]
struct ErrorScopeInfo {
filter: GPUErrorFilter,
op_count: u64,
#[ignore_malloc_size_of = "defined in webgpu"]
error: Option<GPUError>,
#[ignore_malloc_size_of = "promises are hard"]
promise: Option<Rc<Promise>>,
}
#[dom_struct]
pub struct GPUDevice {
eventtarget: EventTarget,
@ -73,6 +90,11 @@ pub struct GPUDevice {
label: DomRefCell<Option<DOMString>>,
device: webgpu::WebGPUDevice,
default_queue: Dom<GPUQueue>,
error_scopes: DomRefCell<HashMap<u64, ErrorScopeInfo>>,
scope_stack: DomRefCell<Vec<u64>>,
next_scope_id: Cell<u64>,
#[ignore_malloc_size_of = "promises are hard"]
lost_promise: DomRefCell<Option<Rc<Promise>>>,
}
impl GPUDevice {
@ -93,6 +115,10 @@ impl GPUDevice {
label: DomRefCell::new(None),
device,
default_queue: Dom::from_ref(queue),
error_scopes: DomRefCell::new(HashMap::new()),
scope_stack: DomRefCell::new(Vec::new()),
next_scope_id: Cell::new(0),
lost_promise: DomRefCell::new(None),
}
}
@ -119,6 +145,37 @@ impl GPUDevice {
pub fn id(&self) -> webgpu::WebGPUDevice {
self.device
}
pub fn handle_server_msg(&self, scope: u64, result: Result<(), GPUError>) {
let mut err_scope;
{
err_scope = self.error_scopes.borrow_mut().remove(&scope).unwrap();
}
err_scope.op_count -= 1;
match result {
Ok(()) => {},
Err(e) => {
if err_scope.error.is_none() {
err_scope.error = Some(e);
}
},
}
if let Some(ref promise) = err_scope.promise {
if !promise.is_fulfilled() {
if let Some(ref e) = err_scope.error {
match e {
GPUError::GPUValidationError(v) => promise.resolve_native(&v),
GPUError::GPUOutOfMemoryError(w) => promise.resolve_native(&w),
}
} else if err_scope.op_count == 0 {
promise.resolve_native(&None::<GPUError>);
}
}
}
if err_scope.op_count > 0 || err_scope.promise.is_none() {
let _ = self.error_scopes.borrow_mut().insert(scope, err_scope);
}
}
}
impl GPUDeviceMethods for GPUDevice {
@ -152,6 +209,13 @@ impl GPUDeviceMethods for GPUDevice {
*self.label.borrow_mut() = value;
}
/// https://gpuweb.github.io/gpuweb/#dom-gpudevice-lost
fn Lost(&self, comp: InRealm) -> Rc<Promise> {
let promise = Promise::new_in_current_realm(&self.global(), comp);
*self.lost_promise.borrow_mut() = Some(promise.clone());
promise
}
/// https://gpuweb.github.io/gpuweb/#dom-gpudevice-createbuffer
fn CreateBuffer(&self, descriptor: &GPUBufferDescriptor) -> DomRoot<GPUBuffer> {
let wgpu_descriptor = wgt::BufferDescriptor {
@ -289,6 +353,16 @@ impl GPUDeviceMethods for GPUDevice {
})
.collect::<Vec<_>>();
let mut scope_id = None;
if let Some(s_id) = self.scope_stack.borrow_mut().last() {
if let Some(mut scope) = self.error_scopes.borrow_mut().get_mut(&s_id) {
scope.op_count += 1;
scope_id = Some(*s_id);
} else {
warn!("Could not find Error Scope for id {}", s_id);
}
}
let bind_group_layout_id = self
.global()
.wgpu_id_hub()
@ -300,6 +374,7 @@ impl GPUDeviceMethods for GPUDevice {
device_id: self.device.0,
bind_group_layout_id,
entries,
scope_id,
})
.expect("Failed to create WebGPU BindGroupLayout");
@ -320,6 +395,16 @@ impl GPUDeviceMethods for GPUDevice {
}
});
let mut scope_id = None;
if let Some(s_id) = self.scope_stack.borrow_mut().last() {
if let Some(mut scope) = self.error_scopes.borrow_mut().get_mut(&s_id) {
scope.op_count += 1;
scope_id = Some(*s_id);
} else {
warn!("Could not find Error Scope for id {}", s_id);
}
}
let pipeline_layout_id = self
.global()
.wgpu_id_hub()
@ -331,6 +416,7 @@ impl GPUDeviceMethods for GPUDevice {
device_id: self.device.0,
pipeline_layout_id,
bind_group_layouts: bgl_ids,
scope_id,
})
.expect("Failed to create WebGPU PipelineLayout");
@ -369,6 +455,16 @@ impl GPUDeviceMethods for GPUDevice {
})
.collect::<Vec<_>>();
let mut scope_id = None;
if let Some(s_id) = self.scope_stack.borrow_mut().last() {
if let Some(mut scope) = self.error_scopes.borrow_mut().get_mut(&s_id) {
scope.op_count += 1;
scope_id = Some(*s_id);
} else {
warn!("Could not find Error Scope for id {}", s_id);
}
}
let bind_group_id = self
.global()
.wgpu_id_hub()
@ -381,10 +477,12 @@ impl GPUDeviceMethods for GPUDevice {
bind_group_id,
bind_group_layout_id: descriptor.layout.id().0,
entries,
scope_id,
})
.expect("Failed to create WebGPU BindGroup");
let bind_group = webgpu::WebGPUBindGroup(bind_group_id);
GPUBindGroup::new(
&self.global(),
bind_group,
@ -436,10 +534,21 @@ impl GPUDeviceMethods for GPUDevice {
.lock()
.create_compute_pipeline_id(self.device.0.backend());
let mut scope_id = None;
if let Some(s_id) = self.scope_stack.borrow_mut().last() {
if let Some(mut scope) = self.error_scopes.borrow_mut().get_mut(&s_id) {
scope.op_count += 1;
scope_id = Some(*s_id);
} else {
warn!("Could not find Error Scope for id {}", s_id);
}
}
self.channel
.0
.send(WebGPURequest::CreateComputePipeline {
device_id: self.device.0,
scope_id,
compute_pipeline_id,
pipeline_layout_id: pipeline.0,
program_id: program.0,
@ -683,11 +792,22 @@ impl GPUDeviceMethods for GPUDevice {
.lock()
.create_render_pipeline_id(self.device.0.backend());
let mut scope_id = None;
if let Some(s_id) = self.scope_stack.borrow_mut().last() {
if let Some(mut scope) = self.error_scopes.borrow_mut().get_mut(&s_id) {
scope.op_count += 1;
scope_id = Some(*s_id);
} else {
warn!("Could not find Error Scope for id {}", s_id);
}
}
self.channel
.0
.send(WebGPURequest::CreateRenderPipeline {
device_id: self.device.0,
render_pipeline_id,
scope_id,
pipeline_layout_id: descriptor.parent.layout.id().0,
vertex_module,
vertex_entry_point,
@ -708,6 +828,49 @@ impl GPUDeviceMethods for GPUDevice {
GPURenderPipeline::new(&self.global(), render_pipeline, self.device, valid)
}
/// https://gpuweb.github.io/gpuweb/#dom-gpudevice-pusherrorscope
fn PushErrorScope(&self, filter: GPUErrorFilter) {
let scope_id = self.next_scope_id.get();
self.next_scope_id.set(scope_id + 1);
let err_scope = ErrorScopeInfo {
filter,
op_count: 0,
error: None,
promise: None,
};
let res = self.error_scopes.borrow_mut().insert(scope_id, err_scope);
self.scope_stack.borrow_mut().push(scope_id);
assert!(res.is_none());
}
/// https://gpuweb.github.io/gpuweb/#dom-gpudevice-poperrorscope
fn PopErrorScope(&self, comp: InRealm) -> Rc<Promise> {
let promise = Promise::new_in_current_realm(&self.global(), comp);
let scope_id = if let Some(e) = self.scope_stack.borrow_mut().pop() {
e
} else {
promise.reject_error(Error::Operation);
return promise;
};
let mut err_scope;
{
err_scope = self.error_scopes.borrow_mut().remove(&scope_id).unwrap();
}
if let Some(ref e) = err_scope.error {
match e {
GPUError::GPUValidationError(ref v) => promise.resolve_native(&v),
GPUError::GPUOutOfMemoryError(ref w) => promise.resolve_native(&w),
}
} else if err_scope.op_count == 0 {
promise.resolve_native(&None::<GPUError>);
}
err_scope.promise = Some(promise.clone());
if err_scope.op_count > 0 {
self.error_scopes.borrow_mut().insert(scope_id, err_scope);
}
promise
}
}
fn convert_address_mode(address_mode: GPUAddressMode) -> wgt::AddressMode {

View file

@ -0,0 +1,38 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
#![allow(dead_code)]
use crate::dom::bindings::codegen::Bindings::GPUDeviceLostInfoBinding::GPUDeviceLostInfoMethods;
use crate::dom::bindings::reflector::{reflect_dom_object, Reflector};
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::globalscope::GlobalScope;
use dom_struct::dom_struct;
#[dom_struct]
pub struct GPUDeviceLostInfo {
reflector_: Reflector,
message: DOMString,
}
impl GPUDeviceLostInfo {
fn new_inherited(message: DOMString) -> Self {
Self {
reflector_: Reflector::new(),
message,
}
}
pub fn new(global: &GlobalScope, message: DOMString) -> DomRoot<Self> {
reflect_dom_object(Box::new(GPUDeviceLostInfo::new_inherited(message)), global)
}
}
impl GPUDeviceLostInfoMethods for GPUDeviceLostInfo {
/// https://gpuweb.github.io/gpuweb/#dom-gpudevicelostinfo-message
fn Message(&self) -> DOMString {
self.message.clone()
}
}

View file

@ -0,0 +1,31 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::reflector::{reflect_dom_object, Reflector};
use crate::dom::bindings::root::DomRoot;
use crate::dom::globalscope::GlobalScope;
use dom_struct::dom_struct;
#[dom_struct]
pub struct GPUOutOfMemoryError {
reflector_: Reflector,
}
impl GPUOutOfMemoryError {
fn new_inherited() -> Self {
Self {
reflector_: Reflector::new(),
}
}
pub fn new(global: &GlobalScope) -> DomRoot<Self> {
reflect_dom_object(Box::new(GPUOutOfMemoryError::new_inherited()), global)
}
/// https://gpuweb.github.io/gpuweb/#dom-gpuoutofmemoryerror-gpuoutofmemoryerror
#[allow(non_snake_case)]
pub fn Constructor(global: &GlobalScope) -> DomRoot<Self> {
GPUOutOfMemoryError::new(global)
}
}

View file

@ -0,0 +1,42 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::GPUValidationErrorBinding::GPUValidationErrorMethods;
use crate::dom::bindings::reflector::{reflect_dom_object, Reflector};
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::globalscope::GlobalScope;
use dom_struct::dom_struct;
#[dom_struct]
pub struct GPUValidationError {
reflector_: Reflector,
message: DOMString,
}
impl GPUValidationError {
fn new_inherited(message: DOMString) -> Self {
Self {
reflector_: Reflector::new(),
message,
}
}
pub fn new(global: &GlobalScope, message: DOMString) -> DomRoot<Self> {
reflect_dom_object(Box::new(GPUValidationError::new_inherited(message)), global)
}
/// https://gpuweb.github.io/gpuweb/#dom-gpuvalidationerror-gpuvalidationerror
#[allow(non_snake_case)]
pub fn Constructor(global: &GlobalScope, message: DOMString) -> DomRoot<Self> {
GPUValidationError::new(global, message)
}
}
impl GPUValidationErrorMethods for GPUValidationError {
/// https://gpuweb.github.io/gpuweb/#dom-gpuvalidationerror-message
fn Message(&self) -> DOMString {
self.message.clone()
}
}

View file

@ -332,7 +332,9 @@ pub mod gpucommandencoder;
pub mod gpucomputepassencoder;
pub mod gpucomputepipeline;
pub mod gpudevice;
pub mod gpudevicelostinfo;
pub mod gpumapmode;
pub mod gpuoutofmemoryerror;
pub mod gpupipelinelayout;
pub mod gpuqueue;
pub mod gpurenderpassencoder;
@ -344,6 +346,7 @@ pub mod gpuswapchain;
pub mod gputexture;
pub mod gputextureusage;
pub mod gputextureview;
pub mod gpuvalidationerror;
pub mod hashchangeevent;
pub mod headers;
pub mod history;

View file

@ -0,0 +1,13 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
// https://gpuweb.github.io/gpuweb/#gpudevicelostinfo
[Exposed=(Window, DedicatedWorker), Pref="dom.webgpu.enabled"]
interface GPUDeviceLostInfo {
readonly attribute DOMString message;
};
partial interface GPUDevice {
readonly attribute Promise<GPUDeviceLostInfo> lost;
};

View file

@ -0,0 +1,9 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
// https://gpuweb.github.io/gpuweb/#gpuoutofmemoryerror
[Exposed=(Window, DedicatedWorker), Pref="dom.webgpu.enabled"]
interface GPUOutOfMemoryError {
constructor();
};

View file

@ -0,0 +1,22 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
// https://gpuweb.github.io/gpuweb/#gpuvalidationerror
[Exposed=(Window, DedicatedWorker), Pref="dom.webgpu.enabled"]
interface GPUValidationError {
constructor(DOMString message);
readonly attribute DOMString message;
};
typedef (GPUOutOfMemoryError or GPUValidationError) GPUError;
enum GPUErrorFilter {
"out-of-memory",
"validation"
};
partial interface GPUDevice {
void pushErrorScope(GPUErrorFilter filter);
Promise<GPUError?> popErrorScope();
};

View file

@ -2060,6 +2060,15 @@ impl ScriptThread {
WebGPUMsg::FreeTexture(id) => self.gpu_id_hub.lock().kill_texture_id(id),
WebGPUMsg::FreeTextureView(id) => self.gpu_id_hub.lock().kill_texture_view_id(id),
WebGPUMsg::Exit => *self.webgpu_port.borrow_mut() = None,
WebGPUMsg::WebGPUOpResult {
device,
scope_id,
pipeline_id,
result,
} => {
let global = self.documents.borrow().find_global(pipeline_id).unwrap();
global.handle_wgpu_msg(device, scope_id, result);
},
_ => {},
}
}

View file

@ -16,6 +16,7 @@ euclid = "0.20"
ipc-channel = "0.14"
log = "0.4"
malloc_size_of = { path = "../malloc_size_of" }
msg = { path = "../msg" }
serde = { version = "1.0", features = ["serde_derive"] }
servo_config = { path = "../config" }
smallvec = { version = "0.6", features = ["serde"] }

View file

@ -2,7 +2,9 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::WebGPUDevice;
use ipc_channel::ipc::IpcSender;
use msg::constellation_msg::PipelineId;
use serde::{Deserialize, Serialize};
use wgpu::{
hub::{GlobalIdentityHandlerFactory, IdentityHandler, IdentityHandlerFactory},
@ -14,7 +16,14 @@ use wgpu::{
};
use wgt::Backend;
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum WebGPUOpResult {
ValidationError(String),
OutOfMemoryError,
Success,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum WebGPUMsg {
FreeAdapter(AdapterId),
FreeDevice(DeviceId),
@ -32,6 +41,12 @@ pub enum WebGPUMsg {
FreeSurface(SurfaceId),
FreeShaderModule(ShaderModuleId),
FreeRenderBundle(RenderBundleId),
WebGPUOpResult {
device: WebGPUDevice,
scope_id: u64,
pipeline_id: PipelineId,
result: WebGPUOpResult,
},
Exit,
}
@ -56,7 +71,7 @@ macro_rules! impl_identity_handler {
fn free(&self, id: $id) {
log::debug!("free {} {:?}", $st, id);
let msg = $($var)*(id);
if self.sender.send(msg).is_err() {
if self.sender.send(msg.clone()).is_err() {
log::error!("Failed to send {:?}", msg);
}
}

View file

@ -12,9 +12,10 @@ pub mod identity;
use arrayvec::ArrayVec;
use euclid::default::Size2D;
use identity::{IdentityRecyclerFactory, WebGPUMsg};
use identity::{IdentityRecyclerFactory, WebGPUMsg, WebGPUOpResult};
use ipc_channel::ipc::{self, IpcReceiver, IpcSender, IpcSharedMemory};
use malloc_size_of::{MallocSizeOf, MallocSizeOfOps};
use msg::constellation_msg::PipelineId;
use serde::{Deserialize, Serialize};
use servo_config::pref;
use smallvec::SmallVec;
@ -82,12 +83,14 @@ pub enum WebGPURequest {
},
CreateBindGroup {
device_id: id::DeviceId,
scope_id: Option<u64>,
bind_group_id: id::BindGroupId,
bind_group_layout_id: id::BindGroupLayoutId,
entries: Vec<(u32, WebGPUBindings)>,
},
CreateBindGroupLayout {
device_id: id::DeviceId,
scope_id: Option<u64>,
bind_group_layout_id: id::BindGroupLayoutId,
entries: Vec<wgt::BindGroupLayoutEntry>,
},
@ -104,6 +107,7 @@ pub enum WebGPURequest {
},
CreateComputePipeline {
device_id: id::DeviceId,
scope_id: Option<u64>,
compute_pipeline_id: id::ComputePipelineId,
pipeline_layout_id: id::PipelineLayoutId,
program_id: id::ShaderModuleId,
@ -112,11 +116,13 @@ pub enum WebGPURequest {
CreateContext(IpcSender<webrender_api::ExternalImageId>),
CreatePipelineLayout {
device_id: id::DeviceId,
scope_id: Option<u64>,
pipeline_layout_id: id::PipelineLayoutId,
bind_group_layouts: Vec<id::BindGroupLayoutId>,
},
CreateRenderPipeline {
device_id: id::DeviceId,
scope_id: Option<u64>,
render_pipeline_id: id::RenderPipelineId,
pipeline_layout_id: id::PipelineLayoutId,
vertex_module: id::ShaderModuleId,
@ -180,6 +186,7 @@ pub enum WebGPURequest {
adapter_id: WebGPUAdapter,
descriptor: wgt::DeviceDescriptor,
device_id: id::DeviceId,
pipeline_id: PipelineId,
},
RunComputePass {
command_encoder_id: id::CommandEncoderId,
@ -310,7 +317,7 @@ struct WGPU<'a> {
script_sender: IpcSender<WebGPUMsg>,
global: wgpu::hub::Global<IdentityRecyclerFactory>,
adapters: Vec<WebGPUAdapter>,
devices: Vec<WebGPUDevice>,
devices: HashMap<WebGPUDevice, PipelineId>,
// Track invalid adapters https://gpuweb.github.io/gpuweb/#invalid
_invalid_adapters: Vec<WebGPUAdapter>,
// Buffers with pending mapping
@ -343,7 +350,7 @@ impl<'a> WGPU<'a> {
script_sender,
global: wgpu::hub::Global::new("wgpu-core", factory, wgt::BackendBit::PRIMARY),
adapters: Vec::new(),
devices: Vec::new(),
devices: HashMap::new(),
_invalid_adapters: Vec::new(),
buffer_maps: HashMap::new(),
present_buffer_maps: HashMap::new(),
@ -443,6 +450,7 @@ impl<'a> WGPU<'a> {
},
WebGPURequest::CreateBindGroup {
device_id,
scope_id,
bind_group_id,
bind_group_layout_id,
mut entries,
@ -467,23 +475,66 @@ impl<'a> WGPU<'a> {
let descriptor = BindGroupDescriptor {
label: None,
layout: bind_group_layout_id,
bindings: bindings.as_slice(),
entries: bindings.as_slice(),
};
let _ = gfx_select!(bind_group_id =>
let result = gfx_select!(bind_group_id =>
global.device_create_bind_group(device_id, &descriptor, bind_group_id));
if let Some(s_id) = scope_id {
let &pipeline_id = self.devices.get(&WebGPUDevice(device_id)).unwrap();
let op_result;
if let Err(e) = result {
let error_msg = format!("{:?}", e);
op_result = WebGPUOpResult::ValidationError(error_msg);
} else {
op_result = WebGPUOpResult::Success;
}
if let Err(w) = self.script_sender.send(WebGPUMsg::WebGPUOpResult {
device: WebGPUDevice(device_id),
scope_id: s_id,
pipeline_id,
result: op_result,
}) {
warn!(
"Failed to send BindGroupResult({:?}) ({})",
bind_group_id, w
);
}
}
},
WebGPURequest::CreateBindGroupLayout {
device_id,
scope_id,
bind_group_layout_id,
entries,
} => {
let global = &self.global;
let descriptor = wgt::BindGroupLayoutDescriptor {
bindings: entries.as_slice(),
entries: entries.as_slice(),
label: None,
};
let _ = gfx_select!(bind_group_layout_id =>
let result = gfx_select!(bind_group_layout_id =>
global.device_create_bind_group_layout(device_id, &descriptor, bind_group_layout_id));
if let Some(s_id) = scope_id {
let &pipeline_id = self.devices.get(&WebGPUDevice(device_id)).unwrap();
let op_result;
if let Err(e) = result {
let error_msg = format!("{:?}", e);
op_result = WebGPUOpResult::ValidationError(error_msg);
} else {
op_result = WebGPUOpResult::Success;
}
if let Err(w) = self.script_sender.send(WebGPUMsg::WebGPUOpResult {
device: WebGPUDevice(device_id),
pipeline_id,
scope_id: s_id,
result: op_result,
}) {
warn!(
"Failed to send BindGroupLayoutResult({:?}) ({})",
bind_group_layout_id, w
);
}
}
},
WebGPURequest::CreateBuffer {
device_id,
@ -506,22 +557,43 @@ impl<'a> WGPU<'a> {
},
WebGPURequest::CreateComputePipeline {
device_id,
scope_id,
compute_pipeline_id,
pipeline_layout_id,
program_id,
entry_point,
} => {
let global = &self.global;
let entry_point = std::ffi::CString::new(entry_point).unwrap();
let descriptor = wgpu_core::pipeline::ComputePipelineDescriptor {
layout: pipeline_layout_id,
compute_stage: wgpu_core::pipeline::ProgrammableStageDescriptor {
module: program_id,
entry_point: entry_point.as_ptr(),
entry_point: entry_point.as_str(),
},
};
let _ = gfx_select!(compute_pipeline_id =>
let result = gfx_select!(compute_pipeline_id =>
global.device_create_compute_pipeline(device_id, &descriptor, compute_pipeline_id));
if let Some(s_id) = scope_id {
let &pipeline_id = self.devices.get(&WebGPUDevice(device_id)).unwrap();
let op_result;
if let Err(e) = result {
let error_msg = format!("{:?}", e);
op_result = WebGPUOpResult::ValidationError(error_msg);
} else {
op_result = WebGPUOpResult::Success;
}
if let Err(w) = self.script_sender.send(WebGPUMsg::WebGPUOpResult {
device: WebGPUDevice(device_id),
scope_id: s_id,
pipeline_id,
result: op_result,
}) {
warn!(
"Failed to send ComputePipelineResult({:?}) ({})",
compute_pipeline_id, w
);
}
}
},
WebGPURequest::CreateContext(sender) => {
let id = self
@ -535,20 +607,43 @@ impl<'a> WGPU<'a> {
},
WebGPURequest::CreatePipelineLayout {
device_id,
scope_id,
pipeline_layout_id,
bind_group_layouts,
} => {
let global = &self.global;
let descriptor = wgpu_core::binding_model::PipelineLayoutDescriptor {
bind_group_layouts: bind_group_layouts.as_ptr(),
bind_group_layouts_length: bind_group_layouts.len(),
let descriptor = wgt::PipelineLayoutDescriptor {
bind_group_layouts: bind_group_layouts.as_slice(),
push_constant_ranges: &[],
};
let _ = gfx_select!(pipeline_layout_id =>
let result = gfx_select!(pipeline_layout_id =>
global.device_create_pipeline_layout(device_id, &descriptor, pipeline_layout_id));
if let Some(s_id) = scope_id {
let &pipeline_id = self.devices.get(&WebGPUDevice(device_id)).unwrap();
let op_result;
if let Err(e) = result {
let error_msg = format!("{:?}", e);
op_result = WebGPUOpResult::ValidationError(error_msg);
} else {
op_result = WebGPUOpResult::Success;
}
if let Err(w) = self.script_sender.send(WebGPUMsg::WebGPUOpResult {
device: WebGPUDevice(device_id),
scope_id: s_id,
pipeline_id,
result: op_result,
}) {
warn!(
"Failed to send PipelineLayoutResult({:?}) ({})",
pipeline_layout_id, w
);
}
}
},
//TODO: consider https://github.com/gfx-rs/wgpu/issues/684
WebGPURequest::CreateRenderPipeline {
device_id,
scope_id,
render_pipeline_id,
pipeline_layout_id,
vertex_module,
@ -565,31 +660,27 @@ impl<'a> WGPU<'a> {
alpha_to_coverage_enabled,
} => {
let global = &self.global;
let vertex_ep = std::ffi::CString::new(vertex_entry_point).unwrap();
let frag_ep;
let frag_stage = match fragment_module {
let fragment_stage = match fragment_module {
Some(frag) => {
frag_ep =
std::ffi::CString::new(fragment_entry_point.unwrap()).unwrap();
frag_ep = fragment_entry_point.unwrap().clone();
let frag_module =
wgpu_core::pipeline::ProgrammableStageDescriptor {
module: frag,
entry_point: frag_ep.as_ptr(),
entry_point: frag_ep.as_str(),
};
Some(frag_module)
},
None => None,
};
let vert_buffers = vertex_state
.1
.iter()
.map(|&(array_stride, step_mode, ref attributes)| {
wgpu_core::pipeline::VertexBufferLayoutDescriptor {
array_stride,
.map(|&(stride, step_mode, ref attributes)| {
wgt::VertexBufferDescriptor {
stride,
step_mode,
attributes_length: attributes.len(),
attributes: attributes.as_ptr(),
attributes: attributes.as_slice(),
}
})
.collect::<Vec<_>>();
@ -597,30 +688,45 @@ impl<'a> WGPU<'a> {
layout: pipeline_layout_id,
vertex_stage: wgpu_core::pipeline::ProgrammableStageDescriptor {
module: vertex_module,
entry_point: vertex_ep.as_ptr(),
entry_point: vertex_entry_point.as_str(),
},
fragment_stage: frag_stage
.as_ref()
.map_or(ptr::null(), |fs| fs as *const _),
fragment_stage,
primitive_topology,
rasterization_state: &rasterization_state as *const _,
color_states: color_states.as_ptr(),
color_states_length: color_states.len(),
depth_stencil_state: depth_stencil_state
.as_ref()
.map_or(ptr::null(), |dss| dss as *const _),
vertex_state: wgpu_core::pipeline::VertexStateDescriptor {
rasterization_state: Some(rasterization_state),
color_states: color_states.as_slice(),
depth_stencil_state,
vertex_state: wgt::VertexStateDescriptor {
index_format: vertex_state.0,
vertex_buffers_length: vertex_state.1.len(),
vertex_buffers: vert_buffers.as_ptr(),
vertex_buffers: vert_buffers.as_slice(),
},
sample_count,
sample_mask,
alpha_to_coverage_enabled,
};
let _ = gfx_select!(render_pipeline_id =>
let result = gfx_select!(render_pipeline_id =>
global.device_create_render_pipeline(device_id, &descriptor, render_pipeline_id));
if let Some(s_id) = scope_id {
let &pipeline_id = self.devices.get(&WebGPUDevice(device_id)).unwrap();
let op_result;
if let Err(e) = result {
let error_msg = format!("{:?}", e);
op_result = WebGPUOpResult::ValidationError(error_msg);
} else {
op_result = WebGPUOpResult::Success;
}
if let Err(w) = self.script_sender.send(WebGPUMsg::WebGPUOpResult {
device: WebGPUDevice(device_id),
scope_id: s_id,
pipeline_id,
result: op_result,
}) {
warn!(
"Failed to send RenderPipelineResult({:?}) ({})",
render_pipeline_id, w
);
}
}
},
WebGPURequest::CreateSampler {
device_id,
@ -764,7 +870,6 @@ impl<'a> WGPU<'a> {
} => {
let adapter_id = match self.global.pick_adapter(
&options,
wgt::UnsafeFeatures::disallow(),
wgpu::instance::AdapterInputs::IdSet(&ids, |id| id.backend()),
) {
Some(id) => id,
@ -800,20 +905,22 @@ impl<'a> WGPU<'a> {
adapter_id,
descriptor,
device_id,
pipeline_id,
} => {
let global = &self.global;
let id = gfx_select!(device_id => global.adapter_request_device(
let result = gfx_select!(device_id => global.adapter_request_device(
adapter_id.0,
&descriptor,
None,
device_id
));
// TODO: Handle error gracefully acc. to spec.
let id = result.unwrap();
let device = WebGPUDevice(id);
// Note: (zakorgy) Note sure if sending the queue is needed at all,
// since wgpu-core uses the same id for the device and the queue
let queue = WebGPUQueue(id);
self.devices.push(device);
self.devices.insert(device, pipeline_id);
if let Err(e) = sender.send(Ok(WebGPUResponse::RequestDevice {
device_id: device,
queue_id: queue,
@ -850,10 +957,7 @@ impl<'a> WGPU<'a> {
command_buffers,
} => {
let global = &self.global;
let _ = gfx_select!(queue_id => global.queue_submit(
queue_id,
&command_buffers
));
gfx_select!(queue_id => global.queue_submit(queue_id, &command_buffers));
},
WebGPURequest::SwapChainPresent {
external_id,
@ -936,7 +1040,7 @@ impl<'a> WGPU<'a> {
height: size.height as u32,
depth: 1,
};
gfx_select!(encoder_id => global.command_encoder_copy_texture_to_buffer(
let _ = gfx_select!(encoder_id => global.command_encoder_copy_texture_to_buffer(
encoder_id,
&texture_cv,
&buffer_cv,