mirror of
https://github.com/servo/servo.git
synced 2025-08-05 05:30:08 +01:00
Fixing some style related issues in WebGPU.
Changed the Requests/Response from tuples to named struct variants and also sorted in alphabetical order. Replaced the ID generator functions from `globalscope` with a single function, which returns a `RefMut` and can call the appropriate method to generate resource IDs.
This commit is contained in:
parent
0f9b04680a
commit
000a5d543d
10 changed files with 561 additions and 472 deletions
|
@ -91,9 +91,8 @@ use script_traits::{
|
|||
};
|
||||
use script_traits::{TimerEventId, TimerSchedulerMsg, TimerSource};
|
||||
use servo_url::{MutableOrigin, ServoUrl};
|
||||
use smallvec::SmallVec;
|
||||
use std::borrow::Cow;
|
||||
use std::cell::{Cell, RefCell};
|
||||
use std::cell::{Cell, RefCell, RefMut};
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
use std::ffi::CString;
|
||||
|
@ -104,13 +103,6 @@ use std::sync::atomic::{AtomicBool, Ordering};
|
|||
use std::sync::Arc;
|
||||
use time::{get_time, Timespec};
|
||||
use uuid::Uuid;
|
||||
use webgpu::wgpu::{
|
||||
id::{
|
||||
AdapterId, BindGroupId, BindGroupLayoutId, BufferId, CommandEncoderId, ComputePipelineId,
|
||||
DeviceId, PipelineLayoutId, ShaderModuleId,
|
||||
},
|
||||
Backend,
|
||||
};
|
||||
|
||||
#[derive(JSTraceable)]
|
||||
pub struct AutoCloseWorker(Arc<AtomicBool>);
|
||||
|
@ -2429,50 +2421,8 @@ impl GlobalScope {
|
|||
None
|
||||
}
|
||||
|
||||
pub fn wgpu_create_adapter_ids(&self) -> SmallVec<[AdapterId; 4]> {
|
||||
self.gpu_id_hub.borrow_mut().create_adapter_ids()
|
||||
}
|
||||
|
||||
pub fn wgpu_create_bind_group_id(&self, backend: Backend) -> BindGroupId {
|
||||
self.gpu_id_hub.borrow_mut().create_bind_group_id(backend)
|
||||
}
|
||||
|
||||
pub fn wgpu_create_bind_group_layout_id(&self, backend: Backend) -> BindGroupLayoutId {
|
||||
self.gpu_id_hub
|
||||
.borrow_mut()
|
||||
.create_bind_group_layout_id(backend)
|
||||
}
|
||||
|
||||
pub fn wgpu_create_buffer_id(&self, backend: Backend) -> BufferId {
|
||||
self.gpu_id_hub.borrow_mut().create_buffer_id(backend)
|
||||
}
|
||||
|
||||
pub fn wgpu_create_device_id(&self, backend: Backend) -> DeviceId {
|
||||
self.gpu_id_hub.borrow_mut().create_device_id(backend)
|
||||
}
|
||||
|
||||
pub fn wgpu_create_pipeline_layout_id(&self, backend: Backend) -> PipelineLayoutId {
|
||||
self.gpu_id_hub
|
||||
.borrow_mut()
|
||||
.create_pipeline_layout_id(backend)
|
||||
}
|
||||
|
||||
pub fn wgpu_create_shader_module_id(&self, backend: Backend) -> ShaderModuleId {
|
||||
self.gpu_id_hub
|
||||
.borrow_mut()
|
||||
.create_shader_module_id(backend)
|
||||
}
|
||||
|
||||
pub fn wgpu_create_compute_pipeline_id(&self, backend: Backend) -> ComputePipelineId {
|
||||
self.gpu_id_hub
|
||||
.borrow_mut()
|
||||
.create_compute_pipeline_id(backend)
|
||||
}
|
||||
|
||||
pub fn wgpu_create_command_encoder_id(&self, backend: Backend) -> CommandEncoderId {
|
||||
self.gpu_id_hub
|
||||
.borrow_mut()
|
||||
.create_command_encoder_id(backend)
|
||||
pub fn wgpu_id_hub(&self) -> RefMut<Identities> {
|
||||
self.gpu_id_hub.borrow_mut()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -115,7 +115,7 @@ impl GPUMethods for GPU {
|
|||
},
|
||||
None => wgpu::instance::PowerPreference::Default,
|
||||
};
|
||||
let ids = global.wgpu_create_adapter_ids();
|
||||
let ids = global.wgpu_id_hub().create_adapter_ids();
|
||||
|
||||
let script_to_constellation_chan = global.script_to_constellation_chan();
|
||||
if script_to_constellation_chan
|
||||
|
@ -135,13 +135,17 @@ impl GPUMethods for GPU {
|
|||
impl AsyncWGPUListener for GPU {
|
||||
fn handle_response(&self, response: WebGPUResponse, promise: &Rc<Promise>) {
|
||||
match response {
|
||||
WebGPUResponse::RequestAdapter(name, adapter, channel) => {
|
||||
WebGPUResponse::RequestAdapter {
|
||||
adapter_name,
|
||||
adapter_id,
|
||||
channel,
|
||||
} => {
|
||||
let adapter = GPUAdapter::new(
|
||||
&self.global(),
|
||||
channel,
|
||||
DOMString::from(format!("{} ({:?})", name, adapter.0.backend())),
|
||||
DOMString::from(format!("{} ({:?})", adapter_name, adapter_id.0.backend())),
|
||||
Heap::default(),
|
||||
adapter,
|
||||
adapter_id,
|
||||
);
|
||||
promise.resolve_native(&adapter);
|
||||
},
|
||||
|
|
|
@ -91,11 +91,17 @@ impl GPUAdapterMethods for GPUAdapter {
|
|||
};
|
||||
let id = self
|
||||
.global()
|
||||
.wgpu_create_device_id(self.adapter.0.backend());
|
||||
.wgpu_id_hub()
|
||||
.create_device_id(self.adapter.0.backend());
|
||||
if self
|
||||
.channel
|
||||
.0
|
||||
.send(WebGPURequest::RequestDevice(sender, self.adapter, desc, id))
|
||||
.send(WebGPURequest::RequestDevice {
|
||||
sender,
|
||||
adapter_id: self.adapter,
|
||||
descriptor: desc,
|
||||
device_id: id,
|
||||
})
|
||||
.is_err()
|
||||
{
|
||||
promise.reject_error(Error::Operation);
|
||||
|
@ -107,7 +113,11 @@ impl GPUAdapterMethods for GPUAdapter {
|
|||
impl AsyncWGPUListener for GPUAdapter {
|
||||
fn handle_response(&self, response: WebGPUResponse, promise: &Rc<Promise>) {
|
||||
match response {
|
||||
WebGPUResponse::RequestDevice(device_id, queue_id, _descriptor) => {
|
||||
WebGPUResponse::RequestDevice {
|
||||
device_id,
|
||||
queue_id,
|
||||
_descriptor,
|
||||
} => {
|
||||
let device = GPUDevice::new(
|
||||
&self.global(),
|
||||
self.channel.clone(),
|
||||
|
|
|
@ -144,11 +144,11 @@ impl GPUBufferMethods for GPUBuffer {
|
|||
Ok(array_buffer) => {
|
||||
self.channel
|
||||
.0
|
||||
.send(WebGPURequest::UnmapBuffer(
|
||||
self.device.0,
|
||||
self.id(),
|
||||
array_buffer.to_vec(),
|
||||
))
|
||||
.send(WebGPURequest::UnmapBuffer {
|
||||
device_id: self.device.0,
|
||||
buffer_id: self.id().0,
|
||||
array_buffer: array_buffer.to_vec(),
|
||||
})
|
||||
.unwrap();
|
||||
// Step 3.2
|
||||
unsafe {
|
||||
|
@ -187,7 +187,7 @@ impl GPUBufferMethods for GPUBuffer {
|
|||
};
|
||||
self.channel
|
||||
.0
|
||||
.send(WebGPURequest::DestroyBuffer(self.buffer))
|
||||
.send(WebGPURequest::DestroyBuffer(self.buffer.0))
|
||||
.unwrap();
|
||||
*self.state.borrow_mut() = GPUBufferState::Destroyed;
|
||||
}
|
||||
|
@ -241,13 +241,13 @@ impl GPUBufferMethods for GPUBuffer {
|
|||
if self
|
||||
.channel
|
||||
.0
|
||||
.send(WebGPURequest::MapReadAsync(
|
||||
.send(WebGPURequest::MapReadAsync {
|
||||
sender,
|
||||
self.buffer.0,
|
||||
self.device.0,
|
||||
self.usage,
|
||||
self.size,
|
||||
))
|
||||
buffer_id: self.buffer.0,
|
||||
device_id: self.device.0,
|
||||
usage: self.usage,
|
||||
size: self.size,
|
||||
})
|
||||
.is_err()
|
||||
{
|
||||
promise.reject_error(Error::Operation);
|
||||
|
|
|
@ -87,14 +87,14 @@ impl GPUCommandEncoderMethods for GPUCommandEncoder {
|
|||
.insert(DomRoot::from_ref(destination));
|
||||
self.channel
|
||||
.0
|
||||
.send(WebGPURequest::CopyBufferToBuffer(
|
||||
self.encoder.0,
|
||||
source.id().0,
|
||||
.send(WebGPURequest::CopyBufferToBuffer {
|
||||
command_encoder_id: self.encoder.0,
|
||||
source_id: source.id().0,
|
||||
source_offset,
|
||||
destination.id().0,
|
||||
destination_id: destination.id().0,
|
||||
destination_offset,
|
||||
size,
|
||||
))
|
||||
})
|
||||
.expect("Failed to send CopyBufferToBuffer");
|
||||
}
|
||||
|
||||
|
@ -103,12 +103,12 @@ impl GPUCommandEncoderMethods for GPUCommandEncoder {
|
|||
let (sender, receiver) = ipc::channel().unwrap();
|
||||
self.channel
|
||||
.0
|
||||
.send(WebGPURequest::CommandEncoderFinish(
|
||||
.send(WebGPURequest::CommandEncoderFinish {
|
||||
sender,
|
||||
self.encoder.0,
|
||||
command_encoder_id: self.encoder.0,
|
||||
// TODO(zakorgy): We should use `_descriptor` here after it's not empty
|
||||
// and the underlying wgpu-core struct is serializable
|
||||
))
|
||||
})
|
||||
.expect("Failed to send Finish");
|
||||
|
||||
let buffer = receiver.recv().unwrap();
|
||||
|
|
|
@ -81,11 +81,14 @@ impl GPUComputePassEncoderMethods for GPUComputePassEncoder {
|
|||
/// https://gpuweb.github.io/gpuweb/#dom-gpurenderpassencoder-endpass
|
||||
fn EndPass(&self) {
|
||||
if let Some(raw_pass) = self.raw_pass.borrow_mut().take() {
|
||||
let (pass_data, id) = unsafe { raw_pass.finish_compute() };
|
||||
let (pass_data, command_encoder_id) = unsafe { raw_pass.finish_compute() };
|
||||
|
||||
self.channel
|
||||
.0
|
||||
.send(WebGPURequest::RunComputePass(id, pass_data))
|
||||
.send(WebGPURequest::RunComputePass {
|
||||
command_encoder_id,
|
||||
pass_data,
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -169,15 +169,18 @@ impl GPUDeviceMethods for GPUDevice {
|
|||
fn CreateBuffer(&self, descriptor: &GPUBufferDescriptor) -> DomRoot<GPUBuffer> {
|
||||
let (valid, wgpu_descriptor) = self.validate_buffer_descriptor(descriptor);
|
||||
let (sender, receiver) = ipc::channel().unwrap();
|
||||
let id = self.global().wgpu_create_buffer_id(self.device.0.backend());
|
||||
let id = self
|
||||
.global()
|
||||
.wgpu_id_hub()
|
||||
.create_buffer_id(self.device.0.backend());
|
||||
self.channel
|
||||
.0
|
||||
.send(WebGPURequest::CreateBuffer(
|
||||
.send(WebGPURequest::CreateBuffer {
|
||||
sender,
|
||||
self.device,
|
||||
id,
|
||||
wgpu_descriptor,
|
||||
))
|
||||
device_id: self.device.0,
|
||||
buffer_id: id,
|
||||
descriptor: wgpu_descriptor,
|
||||
})
|
||||
.expect("Failed to create WebGPU buffer");
|
||||
|
||||
let buffer = receiver.recv().unwrap();
|
||||
|
@ -203,15 +206,18 @@ impl GPUDeviceMethods for GPUDevice {
|
|||
) -> Vec<JSVal> {
|
||||
let (valid, wgpu_descriptor) = self.validate_buffer_descriptor(descriptor);
|
||||
let (sender, receiver) = ipc::channel().unwrap();
|
||||
let id = self.global().wgpu_create_buffer_id(self.device.0.backend());
|
||||
let buffer_id = self
|
||||
.global()
|
||||
.wgpu_id_hub()
|
||||
.create_buffer_id(self.device.0.backend());
|
||||
self.channel
|
||||
.0
|
||||
.send(WebGPURequest::CreateBufferMapped(
|
||||
.send(WebGPURequest::CreateBufferMapped {
|
||||
sender,
|
||||
self.device,
|
||||
id,
|
||||
wgpu_descriptor.clone(),
|
||||
))
|
||||
device_id: self.device.0,
|
||||
buffer_id,
|
||||
descriptor: wgpu_descriptor.clone(),
|
||||
})
|
||||
.expect("Failed to create WebGPU buffer");
|
||||
|
||||
rooted!(in(*cx) let mut js_array_buffer = ptr::null_mut::<JSObject>());
|
||||
|
@ -385,17 +391,18 @@ impl GPUDeviceMethods for GPUDevice {
|
|||
max_dynamic_storage_buffers_per_pipeline_layout >= 0;
|
||||
|
||||
let (sender, receiver) = ipc::channel().unwrap();
|
||||
let id = self
|
||||
let bind_group_layout_id = self
|
||||
.global()
|
||||
.wgpu_create_bind_group_layout_id(self.device.0.backend());
|
||||
.wgpu_id_hub()
|
||||
.create_bind_group_layout_id(self.device.0.backend());
|
||||
self.channel
|
||||
.0
|
||||
.send(WebGPURequest::CreateBindGroupLayout(
|
||||
.send(WebGPURequest::CreateBindGroupLayout {
|
||||
sender,
|
||||
self.device,
|
||||
id,
|
||||
bindings.clone(),
|
||||
))
|
||||
device_id: self.device.0,
|
||||
bind_group_layout_id,
|
||||
bindings: bindings.clone(),
|
||||
})
|
||||
.expect("Failed to create WebGPU BindGroupLayout");
|
||||
|
||||
let bgl = receiver.recv().unwrap();
|
||||
|
@ -463,17 +470,18 @@ impl GPUDeviceMethods for GPUDevice {
|
|||
max_dynamic_storage_buffers_per_pipeline_layout >= 0;
|
||||
|
||||
let (sender, receiver) = ipc::channel().unwrap();
|
||||
let id = self
|
||||
let pipeline_layout_id = self
|
||||
.global()
|
||||
.wgpu_create_pipeline_layout_id(self.device.0.backend());
|
||||
.wgpu_id_hub()
|
||||
.create_pipeline_layout_id(self.device.0.backend());
|
||||
self.channel
|
||||
.0
|
||||
.send(WebGPURequest::CreatePipelineLayout(
|
||||
.send(WebGPURequest::CreatePipelineLayout {
|
||||
sender,
|
||||
self.device,
|
||||
id,
|
||||
bgl_ids,
|
||||
))
|
||||
device_id: self.device.0,
|
||||
pipeline_layout_id,
|
||||
bind_group_layouts: bgl_ids,
|
||||
})
|
||||
.expect("Failed to create WebGPU PipelineLayout");
|
||||
|
||||
let pipeline_layout = receiver.recv().unwrap();
|
||||
|
@ -522,18 +530,19 @@ impl GPUDeviceMethods for GPUDevice {
|
|||
})
|
||||
.collect::<Vec<_>>();
|
||||
let (sender, receiver) = ipc::channel().unwrap();
|
||||
let id = self
|
||||
let bind_group_id = self
|
||||
.global()
|
||||
.wgpu_create_bind_group_id(self.device.0.backend());
|
||||
.wgpu_id_hub()
|
||||
.create_bind_group_id(self.device.0.backend());
|
||||
self.channel
|
||||
.0
|
||||
.send(WebGPURequest::CreateBindGroup(
|
||||
.send(WebGPURequest::CreateBindGroup {
|
||||
sender,
|
||||
self.device,
|
||||
id,
|
||||
descriptor.layout.id(),
|
||||
device_id: self.device.0,
|
||||
bind_group_id,
|
||||
bind_group_layout_id: descriptor.layout.id().0,
|
||||
bindings,
|
||||
))
|
||||
})
|
||||
.expect("Failed to create WebGPU BindGroup");
|
||||
|
||||
let bind_group = receiver.recv().unwrap();
|
||||
|
@ -550,17 +559,18 @@ impl GPUDeviceMethods for GPUDevice {
|
|||
Uint32Array(program) => program.to_vec(),
|
||||
String(program) => program.chars().map(|c| c as u32).collect::<Vec<u32>>(),
|
||||
};
|
||||
let id = self
|
||||
let program_id = self
|
||||
.global()
|
||||
.wgpu_create_shader_module_id(self.device.0.backend());
|
||||
.wgpu_id_hub()
|
||||
.create_shader_module_id(self.device.0.backend());
|
||||
self.channel
|
||||
.0
|
||||
.send(WebGPURequest::CreateShaderModule(
|
||||
.send(WebGPURequest::CreateShaderModule {
|
||||
sender,
|
||||
self.device,
|
||||
id,
|
||||
device_id: self.device.0,
|
||||
program_id,
|
||||
program,
|
||||
))
|
||||
})
|
||||
.expect("Failed to create WebGPU ShaderModule");
|
||||
|
||||
let shader_module = receiver.recv().unwrap();
|
||||
|
@ -575,20 +585,21 @@ impl GPUDeviceMethods for GPUDevice {
|
|||
let pipeline = descriptor.parent.layout.id();
|
||||
let program = descriptor.computeStage.module.id();
|
||||
let entry_point = descriptor.computeStage.entryPoint.to_string();
|
||||
let id = self
|
||||
let compute_pipeline_id = self
|
||||
.global()
|
||||
.wgpu_create_compute_pipeline_id(self.device.0.backend());
|
||||
.wgpu_id_hub()
|
||||
.create_compute_pipeline_id(self.device.0.backend());
|
||||
let (sender, receiver) = ipc::channel().unwrap();
|
||||
self.channel
|
||||
.0
|
||||
.send(WebGPURequest::CreateComputePipeline(
|
||||
.send(WebGPURequest::CreateComputePipeline {
|
||||
sender,
|
||||
self.device,
|
||||
id,
|
||||
pipeline.0,
|
||||
program.0,
|
||||
device_id: self.device.0,
|
||||
compute_pipeline_id,
|
||||
pipeline_layout_id: pipeline.0,
|
||||
program_id: program.0,
|
||||
entry_point,
|
||||
))
|
||||
})
|
||||
.expect("Failed to create WebGPU ComputePipeline");
|
||||
|
||||
let compute_pipeline = receiver.recv().unwrap();
|
||||
|
@ -600,12 +611,17 @@ impl GPUDeviceMethods for GPUDevice {
|
|||
_descriptor: &GPUCommandEncoderDescriptor,
|
||||
) -> DomRoot<GPUCommandEncoder> {
|
||||
let (sender, receiver) = ipc::channel().unwrap();
|
||||
let id = self
|
||||
let command_encoder_id = self
|
||||
.global()
|
||||
.wgpu_create_command_encoder_id(self.device.0.backend());
|
||||
.wgpu_id_hub()
|
||||
.create_command_encoder_id(self.device.0.backend());
|
||||
self.channel
|
||||
.0
|
||||
.send(WebGPURequest::CreateCommandEncoder(sender, self.device, id))
|
||||
.send(WebGPURequest::CreateCommandEncoder {
|
||||
sender,
|
||||
device_id: self.device.0,
|
||||
command_encoder_id,
|
||||
})
|
||||
.expect("Failed to create WebGPU command encoder");
|
||||
let encoder = receiver.recv().unwrap();
|
||||
|
||||
|
|
|
@ -64,10 +64,13 @@ impl GPUQueueMethods for GPUQueue {
|
|||
// TODO: Generate error to the ErrorScope
|
||||
return;
|
||||
}
|
||||
let buffer_ids = command_buffers.iter().map(|cb| cb.id().0).collect();
|
||||
let command_buffers = command_buffers.iter().map(|cb| cb.id().0).collect();
|
||||
self.channel
|
||||
.0
|
||||
.send(WebGPURequest::Submit(self.queue.0, buffer_ids))
|
||||
.send(WebGPURequest::Submit {
|
||||
queue_id: self.queue.0,
|
||||
command_buffers,
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue