Fixing some style related issues in WebGPU.

Changed the Requests/Response from tuples to named struct variants and also sorted in alphabetical order.
Replaced the ID generator functions from `globalscope` with a single function,
which returns a `RefMut` and can call the appropriate method to generate resource IDs.
This commit is contained in:
Istvan Miklos 2020-02-05 11:32:52 +01:00
parent 0f9b04680a
commit 000a5d543d
10 changed files with 561 additions and 472 deletions

View file

@ -2207,7 +2207,11 @@ where
Some(browsing_context_group) => {
let adapter_request =
if let FromScriptMsg::RequestAdapter(sender, options, ids) = request {
WebGPURequest::RequestAdapter(sender, options, ids)
WebGPURequest::RequestAdapter {
sender,
options,
ids,
}
} else {
return warn!("Wrong message type in handle_request_wgpu_adapter");
};

View file

@ -91,9 +91,8 @@ use script_traits::{
};
use script_traits::{TimerEventId, TimerSchedulerMsg, TimerSource};
use servo_url::{MutableOrigin, ServoUrl};
use smallvec::SmallVec;
use std::borrow::Cow;
use std::cell::{Cell, RefCell};
use std::cell::{Cell, RefCell, RefMut};
use std::collections::hash_map::Entry;
use std::collections::{HashMap, VecDeque};
use std::ffi::CString;
@ -104,13 +103,6 @@ use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use time::{get_time, Timespec};
use uuid::Uuid;
use webgpu::wgpu::{
id::{
AdapterId, BindGroupId, BindGroupLayoutId, BufferId, CommandEncoderId, ComputePipelineId,
DeviceId, PipelineLayoutId, ShaderModuleId,
},
Backend,
};
#[derive(JSTraceable)]
pub struct AutoCloseWorker(Arc<AtomicBool>);
@ -2429,50 +2421,8 @@ impl GlobalScope {
None
}
pub fn wgpu_create_adapter_ids(&self) -> SmallVec<[AdapterId; 4]> {
self.gpu_id_hub.borrow_mut().create_adapter_ids()
}
pub fn wgpu_create_bind_group_id(&self, backend: Backend) -> BindGroupId {
self.gpu_id_hub.borrow_mut().create_bind_group_id(backend)
}
pub fn wgpu_create_bind_group_layout_id(&self, backend: Backend) -> BindGroupLayoutId {
self.gpu_id_hub
.borrow_mut()
.create_bind_group_layout_id(backend)
}
pub fn wgpu_create_buffer_id(&self, backend: Backend) -> BufferId {
self.gpu_id_hub.borrow_mut().create_buffer_id(backend)
}
pub fn wgpu_create_device_id(&self, backend: Backend) -> DeviceId {
self.gpu_id_hub.borrow_mut().create_device_id(backend)
}
pub fn wgpu_create_pipeline_layout_id(&self, backend: Backend) -> PipelineLayoutId {
self.gpu_id_hub
.borrow_mut()
.create_pipeline_layout_id(backend)
}
pub fn wgpu_create_shader_module_id(&self, backend: Backend) -> ShaderModuleId {
self.gpu_id_hub
.borrow_mut()
.create_shader_module_id(backend)
}
pub fn wgpu_create_compute_pipeline_id(&self, backend: Backend) -> ComputePipelineId {
self.gpu_id_hub
.borrow_mut()
.create_compute_pipeline_id(backend)
}
pub fn wgpu_create_command_encoder_id(&self, backend: Backend) -> CommandEncoderId {
self.gpu_id_hub
.borrow_mut()
.create_command_encoder_id(backend)
pub fn wgpu_id_hub(&self) -> RefMut<Identities> {
self.gpu_id_hub.borrow_mut()
}
}

View file

@ -115,7 +115,7 @@ impl GPUMethods for GPU {
},
None => wgpu::instance::PowerPreference::Default,
};
let ids = global.wgpu_create_adapter_ids();
let ids = global.wgpu_id_hub().create_adapter_ids();
let script_to_constellation_chan = global.script_to_constellation_chan();
if script_to_constellation_chan
@ -135,13 +135,17 @@ impl GPUMethods for GPU {
impl AsyncWGPUListener for GPU {
fn handle_response(&self, response: WebGPUResponse, promise: &Rc<Promise>) {
match response {
WebGPUResponse::RequestAdapter(name, adapter, channel) => {
WebGPUResponse::RequestAdapter {
adapter_name,
adapter_id,
channel,
} => {
let adapter = GPUAdapter::new(
&self.global(),
channel,
DOMString::from(format!("{} ({:?})", name, adapter.0.backend())),
DOMString::from(format!("{} ({:?})", adapter_name, adapter_id.0.backend())),
Heap::default(),
adapter,
adapter_id,
);
promise.resolve_native(&adapter);
},

View file

@ -91,11 +91,17 @@ impl GPUAdapterMethods for GPUAdapter {
};
let id = self
.global()
.wgpu_create_device_id(self.adapter.0.backend());
.wgpu_id_hub()
.create_device_id(self.adapter.0.backend());
if self
.channel
.0
.send(WebGPURequest::RequestDevice(sender, self.adapter, desc, id))
.send(WebGPURequest::RequestDevice {
sender,
adapter_id: self.adapter,
descriptor: desc,
device_id: id,
})
.is_err()
{
promise.reject_error(Error::Operation);
@ -107,7 +113,11 @@ impl GPUAdapterMethods for GPUAdapter {
impl AsyncWGPUListener for GPUAdapter {
fn handle_response(&self, response: WebGPUResponse, promise: &Rc<Promise>) {
match response {
WebGPUResponse::RequestDevice(device_id, queue_id, _descriptor) => {
WebGPUResponse::RequestDevice {
device_id,
queue_id,
_descriptor,
} => {
let device = GPUDevice::new(
&self.global(),
self.channel.clone(),

View file

@ -144,11 +144,11 @@ impl GPUBufferMethods for GPUBuffer {
Ok(array_buffer) => {
self.channel
.0
.send(WebGPURequest::UnmapBuffer(
self.device.0,
self.id(),
array_buffer.to_vec(),
))
.send(WebGPURequest::UnmapBuffer {
device_id: self.device.0,
buffer_id: self.id().0,
array_buffer: array_buffer.to_vec(),
})
.unwrap();
// Step 3.2
unsafe {
@ -187,7 +187,7 @@ impl GPUBufferMethods for GPUBuffer {
};
self.channel
.0
.send(WebGPURequest::DestroyBuffer(self.buffer))
.send(WebGPURequest::DestroyBuffer(self.buffer.0))
.unwrap();
*self.state.borrow_mut() = GPUBufferState::Destroyed;
}
@ -241,13 +241,13 @@ impl GPUBufferMethods for GPUBuffer {
if self
.channel
.0
.send(WebGPURequest::MapReadAsync(
.send(WebGPURequest::MapReadAsync {
sender,
self.buffer.0,
self.device.0,
self.usage,
self.size,
))
buffer_id: self.buffer.0,
device_id: self.device.0,
usage: self.usage,
size: self.size,
})
.is_err()
{
promise.reject_error(Error::Operation);

View file

@ -87,14 +87,14 @@ impl GPUCommandEncoderMethods for GPUCommandEncoder {
.insert(DomRoot::from_ref(destination));
self.channel
.0
.send(WebGPURequest::CopyBufferToBuffer(
self.encoder.0,
source.id().0,
.send(WebGPURequest::CopyBufferToBuffer {
command_encoder_id: self.encoder.0,
source_id: source.id().0,
source_offset,
destination.id().0,
destination_id: destination.id().0,
destination_offset,
size,
))
})
.expect("Failed to send CopyBufferToBuffer");
}
@ -103,12 +103,12 @@ impl GPUCommandEncoderMethods for GPUCommandEncoder {
let (sender, receiver) = ipc::channel().unwrap();
self.channel
.0
.send(WebGPURequest::CommandEncoderFinish(
.send(WebGPURequest::CommandEncoderFinish {
sender,
self.encoder.0,
command_encoder_id: self.encoder.0,
// TODO(zakorgy): We should use `_descriptor` here after it's not empty
// and the underlying wgpu-core struct is serializable
))
})
.expect("Failed to send Finish");
let buffer = receiver.recv().unwrap();

View file

@ -81,11 +81,14 @@ impl GPUComputePassEncoderMethods for GPUComputePassEncoder {
/// https://gpuweb.github.io/gpuweb/#dom-gpurenderpassencoder-endpass
fn EndPass(&self) {
if let Some(raw_pass) = self.raw_pass.borrow_mut().take() {
let (pass_data, id) = unsafe { raw_pass.finish_compute() };
let (pass_data, command_encoder_id) = unsafe { raw_pass.finish_compute() };
self.channel
.0
.send(WebGPURequest::RunComputePass(id, pass_data))
.send(WebGPURequest::RunComputePass {
command_encoder_id,
pass_data,
})
.unwrap();
}
}

View file

@ -169,15 +169,18 @@ impl GPUDeviceMethods for GPUDevice {
fn CreateBuffer(&self, descriptor: &GPUBufferDescriptor) -> DomRoot<GPUBuffer> {
let (valid, wgpu_descriptor) = self.validate_buffer_descriptor(descriptor);
let (sender, receiver) = ipc::channel().unwrap();
let id = self.global().wgpu_create_buffer_id(self.device.0.backend());
let id = self
.global()
.wgpu_id_hub()
.create_buffer_id(self.device.0.backend());
self.channel
.0
.send(WebGPURequest::CreateBuffer(
.send(WebGPURequest::CreateBuffer {
sender,
self.device,
id,
wgpu_descriptor,
))
device_id: self.device.0,
buffer_id: id,
descriptor: wgpu_descriptor,
})
.expect("Failed to create WebGPU buffer");
let buffer = receiver.recv().unwrap();
@ -203,15 +206,18 @@ impl GPUDeviceMethods for GPUDevice {
) -> Vec<JSVal> {
let (valid, wgpu_descriptor) = self.validate_buffer_descriptor(descriptor);
let (sender, receiver) = ipc::channel().unwrap();
let id = self.global().wgpu_create_buffer_id(self.device.0.backend());
let buffer_id = self
.global()
.wgpu_id_hub()
.create_buffer_id(self.device.0.backend());
self.channel
.0
.send(WebGPURequest::CreateBufferMapped(
.send(WebGPURequest::CreateBufferMapped {
sender,
self.device,
id,
wgpu_descriptor.clone(),
))
device_id: self.device.0,
buffer_id,
descriptor: wgpu_descriptor.clone(),
})
.expect("Failed to create WebGPU buffer");
rooted!(in(*cx) let mut js_array_buffer = ptr::null_mut::<JSObject>());
@ -385,17 +391,18 @@ impl GPUDeviceMethods for GPUDevice {
max_dynamic_storage_buffers_per_pipeline_layout >= 0;
let (sender, receiver) = ipc::channel().unwrap();
let id = self
let bind_group_layout_id = self
.global()
.wgpu_create_bind_group_layout_id(self.device.0.backend());
.wgpu_id_hub()
.create_bind_group_layout_id(self.device.0.backend());
self.channel
.0
.send(WebGPURequest::CreateBindGroupLayout(
.send(WebGPURequest::CreateBindGroupLayout {
sender,
self.device,
id,
bindings.clone(),
))
device_id: self.device.0,
bind_group_layout_id,
bindings: bindings.clone(),
})
.expect("Failed to create WebGPU BindGroupLayout");
let bgl = receiver.recv().unwrap();
@ -463,17 +470,18 @@ impl GPUDeviceMethods for GPUDevice {
max_dynamic_storage_buffers_per_pipeline_layout >= 0;
let (sender, receiver) = ipc::channel().unwrap();
let id = self
let pipeline_layout_id = self
.global()
.wgpu_create_pipeline_layout_id(self.device.0.backend());
.wgpu_id_hub()
.create_pipeline_layout_id(self.device.0.backend());
self.channel
.0
.send(WebGPURequest::CreatePipelineLayout(
.send(WebGPURequest::CreatePipelineLayout {
sender,
self.device,
id,
bgl_ids,
))
device_id: self.device.0,
pipeline_layout_id,
bind_group_layouts: bgl_ids,
})
.expect("Failed to create WebGPU PipelineLayout");
let pipeline_layout = receiver.recv().unwrap();
@ -522,18 +530,19 @@ impl GPUDeviceMethods for GPUDevice {
})
.collect::<Vec<_>>();
let (sender, receiver) = ipc::channel().unwrap();
let id = self
let bind_group_id = self
.global()
.wgpu_create_bind_group_id(self.device.0.backend());
.wgpu_id_hub()
.create_bind_group_id(self.device.0.backend());
self.channel
.0
.send(WebGPURequest::CreateBindGroup(
.send(WebGPURequest::CreateBindGroup {
sender,
self.device,
id,
descriptor.layout.id(),
device_id: self.device.0,
bind_group_id,
bind_group_layout_id: descriptor.layout.id().0,
bindings,
))
})
.expect("Failed to create WebGPU BindGroup");
let bind_group = receiver.recv().unwrap();
@ -550,17 +559,18 @@ impl GPUDeviceMethods for GPUDevice {
Uint32Array(program) => program.to_vec(),
String(program) => program.chars().map(|c| c as u32).collect::<Vec<u32>>(),
};
let id = self
let program_id = self
.global()
.wgpu_create_shader_module_id(self.device.0.backend());
.wgpu_id_hub()
.create_shader_module_id(self.device.0.backend());
self.channel
.0
.send(WebGPURequest::CreateShaderModule(
.send(WebGPURequest::CreateShaderModule {
sender,
self.device,
id,
device_id: self.device.0,
program_id,
program,
))
})
.expect("Failed to create WebGPU ShaderModule");
let shader_module = receiver.recv().unwrap();
@ -575,20 +585,21 @@ impl GPUDeviceMethods for GPUDevice {
let pipeline = descriptor.parent.layout.id();
let program = descriptor.computeStage.module.id();
let entry_point = descriptor.computeStage.entryPoint.to_string();
let id = self
let compute_pipeline_id = self
.global()
.wgpu_create_compute_pipeline_id(self.device.0.backend());
.wgpu_id_hub()
.create_compute_pipeline_id(self.device.0.backend());
let (sender, receiver) = ipc::channel().unwrap();
self.channel
.0
.send(WebGPURequest::CreateComputePipeline(
.send(WebGPURequest::CreateComputePipeline {
sender,
self.device,
id,
pipeline.0,
program.0,
device_id: self.device.0,
compute_pipeline_id,
pipeline_layout_id: pipeline.0,
program_id: program.0,
entry_point,
))
})
.expect("Failed to create WebGPU ComputePipeline");
let compute_pipeline = receiver.recv().unwrap();
@ -600,12 +611,17 @@ impl GPUDeviceMethods for GPUDevice {
_descriptor: &GPUCommandEncoderDescriptor,
) -> DomRoot<GPUCommandEncoder> {
let (sender, receiver) = ipc::channel().unwrap();
let id = self
let command_encoder_id = self
.global()
.wgpu_create_command_encoder_id(self.device.0.backend());
.wgpu_id_hub()
.create_command_encoder_id(self.device.0.backend());
self.channel
.0
.send(WebGPURequest::CreateCommandEncoder(sender, self.device, id))
.send(WebGPURequest::CreateCommandEncoder {
sender,
device_id: self.device.0,
command_encoder_id,
})
.expect("Failed to create WebGPU command encoder");
let encoder = receiver.recv().unwrap();

View file

@ -64,10 +64,13 @@ impl GPUQueueMethods for GPUQueue {
// TODO: Generate error to the ErrorScope
return;
}
let buffer_ids = command_buffers.iter().map(|cb| cb.id().0).collect();
let command_buffers = command_buffers.iter().map(|cb| cb.id().0).collect();
self.channel
.0
.send(WebGPURequest::Submit(self.queue.0, buffer_ids))
.send(WebGPURequest::Submit {
queue_id: self.queue.0,
command_buffers,
})
.unwrap();
}
}

View file

@ -11,11 +11,29 @@ use ipc_channel::ipc::{self, IpcReceiver, IpcSender, IpcSharedMemory};
use malloc_size_of::{MallocSizeOf, MallocSizeOfOps};
use servo_config::pref;
use smallvec::SmallVec;
use wgpu::{
binding_model::{BindGroupBinding, BindGroupLayoutBinding},
id::{
AdapterId, BindGroupId, BindGroupLayoutId, BufferId, CommandBufferId, CommandEncoderId,
ComputePipelineId, DeviceId, PipelineLayoutId, QueueId, ShaderModuleId,
},
instance::{DeviceDescriptor, RequestAdapterOptions},
resource::BufferDescriptor,
BufferAddress,
};
#[derive(Debug, Deserialize, Serialize)]
pub enum WebGPUResponse {
RequestAdapter(String, WebGPUAdapter, WebGPU),
RequestDevice(WebGPUDevice, WebGPUQueue, wgpu::instance::DeviceDescriptor),
RequestAdapter {
adapter_name: String,
adapter_id: WebGPUAdapter,
channel: WebGPU,
},
RequestDevice {
device_id: WebGPUDevice,
queue_id: WebGPUQueue,
_descriptor: DeviceDescriptor,
},
MapReadAsync(IpcSharedMemory),
}
@ -23,95 +41,105 @@ pub type WebGPUResponseResult = Result<WebGPUResponse, String>;
#[derive(Debug, Deserialize, Serialize)]
pub enum WebGPURequest {
RequestAdapter(
IpcSender<WebGPUResponseResult>,
wgpu::instance::RequestAdapterOptions,
SmallVec<[wgpu::id::AdapterId; 4]>,
),
RequestDevice(
IpcSender<WebGPUResponseResult>,
WebGPUAdapter,
wgpu::instance::DeviceDescriptor,
wgpu::id::DeviceId,
),
Exit(IpcSender<()>),
CreateBuffer(
IpcSender<WebGPUBuffer>,
WebGPUDevice,
wgpu::id::BufferId,
wgpu::resource::BufferDescriptor,
),
CreateBufferMapped(
IpcSender<WebGPUBuffer>,
WebGPUDevice,
wgpu::id::BufferId,
wgpu::resource::BufferDescriptor,
),
CreateBindGroup(
IpcSender<WebGPUBindGroup>,
WebGPUDevice,
wgpu::id::BindGroupId,
WebGPUBindGroupLayout,
Vec<wgpu::binding_model::BindGroupBinding>,
),
CreateBindGroupLayout(
IpcSender<WebGPUBindGroupLayout>,
WebGPUDevice,
wgpu::id::BindGroupLayoutId,
Vec<wgpu::binding_model::BindGroupLayoutBinding>,
),
CreateComputePipeline(
IpcSender<WebGPUComputePipeline>,
WebGPUDevice,
wgpu::id::ComputePipelineId,
wgpu::id::PipelineLayoutId,
wgpu::id::ShaderModuleId,
String,
),
CreatePipelineLayout(
IpcSender<WebGPUPipelineLayout>,
WebGPUDevice,
wgpu::id::PipelineLayoutId,
Vec<wgpu::id::BindGroupLayoutId>,
),
CreateShaderModule(
IpcSender<WebGPUShaderModule>,
WebGPUDevice,
wgpu::id::ShaderModuleId,
Vec<u32>,
),
MapReadAsync(
IpcSender<WebGPUResponseResult>,
wgpu::id::BufferId,
wgpu::id::DeviceId,
u32,
u64,
),
UnmapBuffer(wgpu::id::DeviceId, WebGPUBuffer, Vec<u8>),
DestroyBuffer(WebGPUBuffer),
CreateCommandEncoder(
IpcSender<WebGPUCommandEncoder>,
WebGPUDevice,
// TODO(zakorgy): Serialize CommandEncoderDescriptor in wgpu-core
// wgpu::command::CommandEncoderDescriptor,
wgpu::id::CommandEncoderId,
),
CopyBufferToBuffer(
wgpu::id::CommandEncoderId,
wgpu::id::BufferId,
wgpu::BufferAddress,
wgpu::id::BufferId,
wgpu::BufferAddress,
wgpu::BufferAddress,
),
CommandEncoderFinish(
IpcSender<WebGPUCommandBuffer>,
wgpu::id::CommandEncoderId,
CommandEncoderFinish {
sender: IpcSender<WebGPUCommandBuffer>,
command_encoder_id: CommandEncoderId,
// TODO(zakorgy): Serialize CommandBufferDescriptor in wgpu-core
// wgpu::command::CommandBufferDescriptor,
),
Submit(wgpu::id::QueueId, Vec<wgpu::id::CommandBufferId>),
RunComputePass(wgpu::id::CommandEncoderId, Vec<u8>),
},
CopyBufferToBuffer {
command_encoder_id: CommandEncoderId,
source_id: BufferId,
source_offset: BufferAddress,
destination_id: BufferId,
destination_offset: BufferAddress,
size: BufferAddress,
},
CreateBindGroup {
sender: IpcSender<WebGPUBindGroup>,
device_id: DeviceId,
bind_group_id: BindGroupId,
bind_group_layout_id: BindGroupLayoutId,
bindings: Vec<BindGroupBinding>,
},
CreateBindGroupLayout {
sender: IpcSender<WebGPUBindGroupLayout>,
device_id: DeviceId,
bind_group_layout_id: BindGroupLayoutId,
bindings: Vec<BindGroupLayoutBinding>,
},
CreateBuffer {
sender: IpcSender<WebGPUBuffer>,
device_id: DeviceId,
buffer_id: BufferId,
descriptor: BufferDescriptor,
},
CreateBufferMapped {
sender: IpcSender<WebGPUBuffer>,
device_id: DeviceId,
buffer_id: BufferId,
descriptor: BufferDescriptor,
},
CreateCommandEncoder {
sender: IpcSender<WebGPUCommandEncoder>,
device_id: DeviceId,
// TODO(zakorgy): Serialize CommandEncoderDescriptor in wgpu-core
// wgpu::command::CommandEncoderDescriptor,
command_encoder_id: CommandEncoderId,
},
CreateComputePipeline {
sender: IpcSender<WebGPUComputePipeline>,
device_id: DeviceId,
compute_pipeline_id: ComputePipelineId,
pipeline_layout_id: PipelineLayoutId,
program_id: ShaderModuleId,
entry_point: String,
},
CreatePipelineLayout {
sender: IpcSender<WebGPUPipelineLayout>,
device_id: DeviceId,
pipeline_layout_id: PipelineLayoutId,
bind_group_layouts: Vec<BindGroupLayoutId>,
},
CreateShaderModule {
sender: IpcSender<WebGPUShaderModule>,
device_id: DeviceId,
program_id: ShaderModuleId,
program: Vec<u32>,
},
DestroyBuffer(BufferId),
Exit(IpcSender<()>),
MapReadAsync {
sender: IpcSender<WebGPUResponseResult>,
buffer_id: BufferId,
device_id: DeviceId,
usage: u32,
size: u64,
},
RequestAdapter {
sender: IpcSender<WebGPUResponseResult>,
options: RequestAdapterOptions,
ids: SmallVec<[AdapterId; 4]>,
},
RequestDevice {
sender: IpcSender<WebGPUResponseResult>,
adapter_id: WebGPUAdapter,
descriptor: DeviceDescriptor,
device_id: DeviceId,
},
RunComputePass {
command_encoder_id: CommandEncoderId,
pass_data: Vec<u8>,
},
Submit {
queue_id: QueueId,
command_buffers: Vec<CommandBufferId>,
},
UnmapBuffer {
device_id: DeviceId,
buffer_id: BufferId,
array_buffer: Vec<u8>,
},
}
#[derive(Clone, Debug, Deserialize, Serialize)]
@ -182,231 +210,10 @@ impl WGPU {
fn run(mut self) {
while let Ok(msg) = self.receiver.recv() {
match msg {
WebGPURequest::RequestAdapter(sender, options, ids) => {
let adapter_id = if let Some(pos) = self
.adapters
.iter()
.position(|adapter| ids.contains(&adapter.0))
{
self.adapters[pos].0
} else {
let adapter_id = match self.global.pick_adapter(
&options,
wgpu::instance::AdapterInputs::IdSet(&ids, |id| id.backend()),
) {
Some(id) => id,
None => {
if let Err(e) =
sender.send(Err("Failed to get webgpu adapter".to_string()))
{
warn!(
"Failed to send response to WebGPURequest::RequestAdapter ({})",
e
)
}
return;
},
};
adapter_id
};
let adapter = WebGPUAdapter(adapter_id);
self.adapters.push(adapter);
let global = &self.global;
let info = gfx_select!(adapter_id => global.adapter_get_info(adapter_id));
if let Err(e) = sender.send(Ok(WebGPUResponse::RequestAdapter(
info.name,
adapter,
WebGPU(self.sender.clone()),
))) {
warn!(
"Failed to send response to WebGPURequest::RequestAdapter ({})",
e
)
}
},
WebGPURequest::RequestDevice(sender, adapter, descriptor, id) => {
let global = &self.global;
let id = gfx_select!(id => global.adapter_request_device(
adapter.0,
&descriptor,
id
));
let device = WebGPUDevice(id);
// Note: (zakorgy) Note sure if sending the queue is needed at all,
// since wgpu-core uses the same id for the device and the queue
let queue = WebGPUQueue(id);
self.devices.push(device);
if let Err(e) =
sender.send(Ok(WebGPUResponse::RequestDevice(device, queue, descriptor)))
{
warn!(
"Failed to send response to WebGPURequest::RequestDevice ({})",
e
)
}
},
WebGPURequest::CreateBuffer(sender, device, id, descriptor) => {
let global = &self.global;
let buffer_id =
gfx_select!(id => global.device_create_buffer(device.0, &descriptor, id));
let buffer = WebGPUBuffer(buffer_id);
if let Err(e) = sender.send(buffer) {
warn!(
"Failed to send response to WebGPURequest::CreateBuffer ({})",
e
)
}
},
WebGPURequest::CreateBufferMapped(sender, device, id, descriptor) => {
let global = &self.global;
let (buffer_id, _arr_buff_ptr) = gfx_select!(id =>
global.device_create_buffer_mapped(device.0, &descriptor, id));
let buffer = WebGPUBuffer(buffer_id);
if let Err(e) = sender.send(buffer) {
warn!(
"Failed to send response to WebGPURequest::CreateBufferMapped ({})",
e
)
}
},
WebGPURequest::UnmapBuffer(device_id, buffer, array_buffer) => {
let global = &self.global;
gfx_select!(buffer.0 => global.device_set_buffer_sub_data(
device_id,
buffer.0,
0,
array_buffer.as_slice()
));
},
WebGPURequest::DestroyBuffer(buffer) => {
let global = &self.global;
gfx_select!(buffer.0 => global.buffer_destroy(buffer.0));
},
WebGPURequest::CreateBindGroup(sender, device, id, layout_id, bindings) => {
let global = &self.global;
let descriptor = wgpu_core::binding_model::BindGroupDescriptor {
layout: layout_id.0,
bindings: bindings.as_ptr(),
bindings_length: bindings.len(),
};
let bg_id = gfx_select!(id => global.device_create_bind_group(device.0, &descriptor, id));
let bind_group = WebGPUBindGroup(bg_id);
if let Err(e) = sender.send(bind_group) {
warn!(
"Failed to send response to WebGPURequest::CreateBindGroup ({})",
e
)
}
},
WebGPURequest::CreateBindGroupLayout(sender, device, id, bindings) => {
let global = &self.global;
let descriptor = wgpu_core::binding_model::BindGroupLayoutDescriptor {
bindings: bindings.as_ptr(),
bindings_length: bindings.len(),
};
let bgl_id = gfx_select!(id => global.device_create_bind_group_layout(device.0, &descriptor, id));
let bgl = WebGPUBindGroupLayout(bgl_id);
if let Err(e) = sender.send(bgl) {
warn!(
"Failed to send response to WebGPURequest::CreateBindGroupLayout ({})",
e
)
}
},
WebGPURequest::CreatePipelineLayout(sender, device, id, bind_group_layouts) => {
let global = &self.global;
let descriptor = wgpu_core::binding_model::PipelineLayoutDescriptor {
bind_group_layouts: bind_group_layouts.as_ptr(),
bind_group_layouts_length: bind_group_layouts.len(),
};
let pl_id = gfx_select!(id => global.device_create_pipeline_layout(device.0, &descriptor, id));
let pipeline_layout = WebGPUPipelineLayout(pl_id);
if let Err(e) = sender.send(pipeline_layout) {
warn!(
"Failed to send response to WebGPURequest::CreatePipelineLayout ({})",
e
)
}
},
WebGPURequest::CreateShaderModule(sender, device, id, program) => {
let global = &self.global;
let descriptor = wgpu_core::pipeline::ShaderModuleDescriptor {
code: wgpu_core::U32Array {
bytes: program.as_ptr(),
length: program.len(),
},
};
let sm_id = gfx_select!(id => global.device_create_shader_module(device.0, &descriptor, id));
let shader_module = WebGPUShaderModule(sm_id);
if let Err(e) = sender.send(shader_module) {
warn!(
"Failed to send response to WebGPURequest::CreateShaderModule ({})",
e
)
}
},
WebGPURequest::CreateComputePipeline(
WebGPURequest::CommandEncoderFinish {
sender,
device,
id,
layout,
program,
entry,
) => {
let global = &self.global;
let entry_point = std::ffi::CString::new(entry).unwrap();
let descriptor = wgpu_core::pipeline::ComputePipelineDescriptor {
layout,
compute_stage: wgpu_core::pipeline::ProgrammableStageDescriptor {
module: program,
entry_point: entry_point.as_ptr(),
},
};
let cp_id = gfx_select!(id => global.device_create_compute_pipeline(device.0, &descriptor, id));
let compute_pipeline = WebGPUComputePipeline(cp_id);
if let Err(e) = sender.send(compute_pipeline) {
warn!(
"Failed to send response to WebGPURequest::CreateComputePipeline ({})",
e
)
}
},
WebGPURequest::CreateCommandEncoder(sender, device, id) => {
let global = &self.global;
let id = gfx_select!(id => global.device_create_command_encoder(device.0, &Default::default(), id));
if let Err(e) = sender.send(WebGPUCommandEncoder(id)) {
warn!(
"Failed to send response to WebGPURequest::CreateCommandEncoder ({})",
e
)
}
},
WebGPURequest::CopyBufferToBuffer(
command_encoder_id,
source,
source_offset,
destination,
destination_offset,
size,
) => {
let global = &self.global;
let _ = gfx_select!(command_encoder_id => global.command_encoder_copy_buffer_to_buffer(
command_encoder_id,
source,
source_offset,
destination,
destination_offset,
size
));
},
WebGPURequest::CommandEncoderFinish(sender, command_encoder_id) => {
} => {
let global = &self.global;
let command_buffer_id = gfx_select!(command_encoder_id => global.command_encoder_finish(
command_encoder_id,
@ -419,7 +226,211 @@ impl WGPU {
)
}
},
WebGPURequest::MapReadAsync(sender, buffer_id, device_id, usage, size) => {
WebGPURequest::CopyBufferToBuffer {
command_encoder_id,
source_id,
source_offset,
destination_id,
destination_offset,
size,
} => {
let global = &self.global;
let _ = gfx_select!(command_encoder_id => global.command_encoder_copy_buffer_to_buffer(
command_encoder_id,
source_id,
source_offset,
destination_id,
destination_offset,
size
));
},
WebGPURequest::CreateBindGroup {
sender,
device_id,
bind_group_id,
bind_group_layout_id,
bindings,
} => {
let global = &self.global;
let descriptor = wgpu_core::binding_model::BindGroupDescriptor {
layout: bind_group_layout_id,
bindings: bindings.as_ptr(),
bindings_length: bindings.len(),
};
let bg_id = gfx_select!(bind_group_id =>
global.device_create_bind_group(device_id, &descriptor, bind_group_id));
let bind_group = WebGPUBindGroup(bg_id);
if let Err(e) = sender.send(bind_group) {
warn!(
"Failed to send response to WebGPURequest::CreateBindGroup ({})",
e
)
}
},
WebGPURequest::CreateBindGroupLayout {
sender,
device_id,
bind_group_layout_id,
bindings,
} => {
let global = &self.global;
let descriptor = wgpu_core::binding_model::BindGroupLayoutDescriptor {
bindings: bindings.as_ptr(),
bindings_length: bindings.len(),
};
let bgl_id = gfx_select!(bind_group_layout_id =>
global.device_create_bind_group_layout(device_id, &descriptor, bind_group_layout_id));
let bgl = WebGPUBindGroupLayout(bgl_id);
if let Err(e) = sender.send(bgl) {
warn!(
"Failed to send response to WebGPURequest::CreateBindGroupLayout ({})",
e
)
}
},
WebGPURequest::CreateBuffer {
sender,
device_id,
buffer_id,
descriptor,
} => {
let global = &self.global;
let id = gfx_select!(buffer_id => global.device_create_buffer(device_id, &descriptor, buffer_id));
let buffer = WebGPUBuffer(id);
if let Err(e) = sender.send(buffer) {
warn!(
"Failed to send response to WebGPURequest::CreateBuffer ({})",
e
)
}
},
WebGPURequest::CreateBufferMapped {
sender,
device_id,
buffer_id,
descriptor,
} => {
let global = &self.global;
let (buffer_id, _arr_buff_ptr) = gfx_select!(buffer_id =>
global.device_create_buffer_mapped(device_id, &descriptor, buffer_id));
let buffer = WebGPUBuffer(buffer_id);
if let Err(e) = sender.send(buffer) {
warn!(
"Failed to send response to WebGPURequest::CreateBufferMapped ({})",
e
)
}
},
WebGPURequest::CreateCommandEncoder {
sender,
device_id,
command_encoder_id,
} => {
let global = &self.global;
let id = gfx_select!(command_encoder_id =>
global.device_create_command_encoder(device_id, &Default::default(), command_encoder_id));
if let Err(e) = sender.send(WebGPUCommandEncoder(id)) {
warn!(
"Failed to send response to WebGPURequest::CreateCommandEncoder ({})",
e
)
}
},
WebGPURequest::CreateComputePipeline {
sender,
device_id,
compute_pipeline_id,
pipeline_layout_id,
program_id,
entry_point,
} => {
let global = &self.global;
let entry_point = std::ffi::CString::new(entry_point).unwrap();
let descriptor = wgpu_core::pipeline::ComputePipelineDescriptor {
layout: pipeline_layout_id,
compute_stage: wgpu_core::pipeline::ProgrammableStageDescriptor {
module: program_id,
entry_point: entry_point.as_ptr(),
},
};
let cp_id = gfx_select!(compute_pipeline_id =>
global.device_create_compute_pipeline(device_id, &descriptor, compute_pipeline_id));
let compute_pipeline = WebGPUComputePipeline(cp_id);
if let Err(e) = sender.send(compute_pipeline) {
warn!(
"Failed to send response to WebGPURequest::CreateComputePipeline ({})",
e
)
}
},
WebGPURequest::CreatePipelineLayout {
sender,
device_id,
pipeline_layout_id,
bind_group_layouts,
} => {
let global = &self.global;
let descriptor = wgpu_core::binding_model::PipelineLayoutDescriptor {
bind_group_layouts: bind_group_layouts.as_ptr(),
bind_group_layouts_length: bind_group_layouts.len(),
};
let pl_id = gfx_select!(pipeline_layout_id =>
global.device_create_pipeline_layout(device_id, &descriptor, pipeline_layout_id));
let pipeline_layout = WebGPUPipelineLayout(pl_id);
if let Err(e) = sender.send(pipeline_layout) {
warn!(
"Failed to send response to WebGPURequest::CreatePipelineLayout ({})",
e
)
}
},
WebGPURequest::CreateShaderModule {
sender,
device_id,
program_id,
program,
} => {
let global = &self.global;
let descriptor = wgpu_core::pipeline::ShaderModuleDescriptor {
code: wgpu_core::U32Array {
bytes: program.as_ptr(),
length: program.len(),
},
};
let sm_id = gfx_select!(program_id =>
global.device_create_shader_module(device_id, &descriptor, program_id));
let shader_module = WebGPUShaderModule(sm_id);
if let Err(e) = sender.send(shader_module) {
warn!(
"Failed to send response to WebGPURequest::CreateShaderModule ({})",
e
)
}
},
WebGPURequest::DestroyBuffer(buffer) => {
let global = &self.global;
gfx_select!(buffer => global.buffer_destroy(buffer));
},
WebGPURequest::Exit(sender) => {
self.deinit();
if let Err(e) = sender.send(()) {
warn!("Failed to send response to WebGPURequest::Exit ({})", e)
}
return;
},
WebGPURequest::MapReadAsync {
sender,
buffer_id,
device_id,
usage,
size,
} => {
let global = &self.global;
let on_read = move |status: wgpu::resource::BufferMapAsyncStatus,
ptr: *const u8| {
@ -456,26 +467,114 @@ impl WGPU {
));
gfx_select!(device_id => global.device_poll(device_id, true));
},
WebGPURequest::Submit(queue_id, command_buffer_ids) => {
WebGPURequest::RequestAdapter {
sender,
options,
ids,
} => {
let adapter_id = if let Some(pos) = self
.adapters
.iter()
.position(|adapter| ids.contains(&adapter.0))
{
self.adapters[pos].0
} else {
let adapter_id = match self.global.pick_adapter(
&options,
wgpu::instance::AdapterInputs::IdSet(&ids, |id| id.backend()),
) {
Some(id) => id,
None => {
if let Err(e) =
sender.send(Err("Failed to get webgpu adapter".to_string()))
{
warn!(
"Failed to send response to WebGPURequest::RequestAdapter ({})",
e
)
}
return;
},
};
adapter_id
};
let adapter = WebGPUAdapter(adapter_id);
self.adapters.push(adapter);
let global = &self.global;
let _ = gfx_select!(queue_id => global.queue_submit(
queue_id,
&command_buffer_ids
));
let info = gfx_select!(adapter_id => global.adapter_get_info(adapter_id));
if let Err(e) = sender.send(Ok(WebGPUResponse::RequestAdapter {
adapter_name: info.name,
adapter_id: adapter,
channel: WebGPU(self.sender.clone()),
})) {
warn!(
"Failed to send response to WebGPURequest::RequestAdapter ({})",
e
)
}
},
WebGPURequest::RunComputePass(command_encoder_id, raw_data) => {
WebGPURequest::RequestDevice {
sender,
adapter_id,
descriptor,
device_id,
} => {
let global = &self.global;
let id = gfx_select!(device_id => global.adapter_request_device(
adapter_id.0,
&descriptor,
device_id
));
let device = WebGPUDevice(id);
// Note: (zakorgy) Note sure if sending the queue is needed at all,
// since wgpu-core uses the same id for the device and the queue
let queue = WebGPUQueue(id);
self.devices.push(device);
if let Err(e) = sender.send(Ok(WebGPUResponse::RequestDevice {
device_id: device,
queue_id: queue,
_descriptor: descriptor,
})) {
warn!(
"Failed to send response to WebGPURequest::RequestDevice ({})",
e
)
}
},
WebGPURequest::RunComputePass {
command_encoder_id,
pass_data,
} => {
let global = &self.global;
gfx_select!(command_encoder_id => global.command_encoder_run_compute_pass(
command_encoder_id,
&raw_data
&pass_data
));
},
WebGPURequest::Exit(sender) => {
self.deinit();
if let Err(e) = sender.send(()) {
warn!("Failed to send response to WebGPURequest::Exit ({})", e)
}
return;
WebGPURequest::Submit {
queue_id,
command_buffers,
} => {
let global = &self.global;
let _ = gfx_select!(queue_id => global.queue_submit(
queue_id,
&command_buffers
));
},
WebGPURequest::UnmapBuffer {
device_id,
buffer_id,
array_buffer,
} => {
let global = &self.global;
gfx_select!(buffer_id => global.device_set_buffer_sub_data(
device_id,
buffer_id,
0,
array_buffer.as_slice()
));
},
}
}
@ -497,14 +596,14 @@ macro_rules! webgpu_resource {
};
}
webgpu_resource!(WebGPUAdapter, wgpu::id::AdapterId);
webgpu_resource!(WebGPUDevice, wgpu::id::DeviceId);
webgpu_resource!(WebGPUBuffer, wgpu::id::BufferId);
webgpu_resource!(WebGPUBindGroup, wgpu::id::BindGroupId);
webgpu_resource!(WebGPUBindGroupLayout, wgpu::id::BindGroupLayoutId);
webgpu_resource!(WebGPUComputePipeline, wgpu::id::ComputePipelineId);
webgpu_resource!(WebGPUPipelineLayout, wgpu::id::PipelineLayoutId);
webgpu_resource!(WebGPUShaderModule, wgpu::id::ShaderModuleId);
webgpu_resource!(WebGPUCommandEncoder, wgpu::id::CommandEncoderId);
webgpu_resource!(WebGPUCommandBuffer, wgpu::id::CommandBufferId);
webgpu_resource!(WebGPUQueue, wgpu::id::QueueId);
webgpu_resource!(WebGPUAdapter, AdapterId);
webgpu_resource!(WebGPUBindGroup, BindGroupId);
webgpu_resource!(WebGPUBindGroupLayout, BindGroupLayoutId);
webgpu_resource!(WebGPUBuffer, BufferId);
webgpu_resource!(WebGPUCommandBuffer, CommandBufferId);
webgpu_resource!(WebGPUCommandEncoder, CommandEncoderId);
webgpu_resource!(WebGPUComputePipeline, ComputePipelineId);
webgpu_resource!(WebGPUDevice, DeviceId);
webgpu_resource!(WebGPUPipelineLayout, PipelineLayoutId);
webgpu_resource!(WebGPUQueue, QueueId);
webgpu_resource!(WebGPUShaderModule, ShaderModuleId);