mirror of
https://github.com/servo/servo.git
synced 2025-08-07 22:45:34 +01:00
task -> thread
This commit is contained in:
parent
f00532bab0
commit
1f02c4ebbb
119 changed files with 1209 additions and 1207 deletions
|
@ -71,10 +71,10 @@ pub enum ImageCacheCommand {
|
|||
/// Synchronously check the state of an image in the cache.
|
||||
/// TODO(gw): Profile this on some real world sites and see
|
||||
/// if it's worth caching the results of this locally in each
|
||||
/// layout / paint task.
|
||||
/// layout / paint thread.
|
||||
GetImageIfAvailable(Url, UsePlaceholder, IpcSender<Result<Arc<Image>, ImageState>>),
|
||||
|
||||
/// Clients must wait for a response before shutting down the ResourceTask
|
||||
/// Clients must wait for a response before shutting down the ResourceThread
|
||||
Exit(IpcSender<()>),
|
||||
}
|
||||
|
||||
|
@ -84,19 +84,19 @@ pub enum UsePlaceholder {
|
|||
Yes,
|
||||
}
|
||||
|
||||
/// The client side of the image cache task. This can be safely cloned
|
||||
/// and passed to different tasks.
|
||||
/// The client side of the image cache thread. This can be safely cloned
|
||||
/// and passed to different threads.
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
pub struct ImageCacheTask {
|
||||
pub struct ImageCacheThread {
|
||||
chan: IpcSender<ImageCacheCommand>,
|
||||
}
|
||||
|
||||
/// The public API for the image cache task.
|
||||
impl ImageCacheTask {
|
||||
/// The public API for the image cache thread.
|
||||
impl ImageCacheThread {
|
||||
|
||||
/// Construct a new image cache
|
||||
pub fn new(chan: IpcSender<ImageCacheCommand>) -> ImageCacheTask {
|
||||
ImageCacheTask {
|
||||
pub fn new(chan: IpcSender<ImageCacheCommand>) -> ImageCacheThread {
|
||||
ImageCacheThread {
|
||||
chan: chan,
|
||||
}
|
||||
}
|
||||
|
@ -119,7 +119,7 @@ impl ImageCacheTask {
|
|||
receiver.recv().unwrap()
|
||||
}
|
||||
|
||||
/// Shutdown the image cache task.
|
||||
/// Shutdown the image cache thread.
|
||||
pub fn exit(&self) {
|
||||
let (response_chan, response_port) = ipc::channel().unwrap();
|
||||
self.chan.send(ImageCacheCommand::Exit(response_chan)).unwrap();
|
|
@ -37,10 +37,10 @@ use util::mem::HeapSizeOf;
|
|||
use websocket::header;
|
||||
|
||||
pub mod hosts;
|
||||
pub mod image_cache_task;
|
||||
pub mod image_cache_thread;
|
||||
pub mod net_error_list;
|
||||
pub mod response;
|
||||
pub mod storage_task;
|
||||
pub mod storage_thread;
|
||||
|
||||
/// Image handling.
|
||||
///
|
||||
|
@ -160,8 +160,8 @@ pub enum LoadConsumer {
|
|||
Listener(AsyncResponseTarget),
|
||||
}
|
||||
|
||||
/// Handle to a resource task
|
||||
pub type ResourceTask = IpcSender<ControlMsg>;
|
||||
/// Handle to a resource thread
|
||||
pub type ResourceThread = IpcSender<ControlMsg>;
|
||||
|
||||
#[derive(PartialEq, Copy, Clone, Deserialize, Serialize)]
|
||||
pub enum IncludeSubdomains {
|
||||
|
@ -220,10 +220,10 @@ pub enum ControlMsg {
|
|||
}
|
||||
|
||||
/// Initialized but unsent request. Encapsulates everything necessary to instruct
|
||||
/// the resource task to make a new request. The `load` method *must* be called before
|
||||
/// destruction or the task will panic.
|
||||
/// the resource thread to make a new request. The `load` method *must* be called before
|
||||
/// destruction or the thread will panic.
|
||||
pub struct PendingAsyncLoad {
|
||||
resource_task: ResourceTask,
|
||||
resource_thread: ResourceThread,
|
||||
url: Url,
|
||||
pipeline: Option<PipelineId>,
|
||||
guard: PendingLoadGuard,
|
||||
|
@ -249,10 +249,10 @@ impl Drop for PendingLoadGuard {
|
|||
}
|
||||
|
||||
impl PendingAsyncLoad {
|
||||
pub fn new(context: LoadContext, resource_task: ResourceTask, url: Url, pipeline: Option<PipelineId>)
|
||||
pub fn new(context: LoadContext, resource_thread: ResourceThread, url: Url, pipeline: Option<PipelineId>)
|
||||
-> PendingAsyncLoad {
|
||||
PendingAsyncLoad {
|
||||
resource_task: resource_task,
|
||||
resource_thread: resource_thread,
|
||||
url: url,
|
||||
pipeline: pipeline,
|
||||
guard: PendingLoadGuard { loaded: false, },
|
||||
|
@ -265,7 +265,7 @@ impl PendingAsyncLoad {
|
|||
self.guard.neuter();
|
||||
let load_data = LoadData::new(self.context, self.url, self.pipeline);
|
||||
let consumer = LoadConsumer::Listener(listener);
|
||||
self.resource_task.send(ControlMsg::Load(load_data, consumer, None)).unwrap();
|
||||
self.resource_thread.send(ControlMsg::Load(load_data, consumer, None)).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -360,12 +360,12 @@ pub enum ProgressMsg {
|
|||
|
||||
/// Convenience function for synchronously loading a whole resource.
|
||||
pub fn load_whole_resource(context: LoadContext,
|
||||
resource_task: &ResourceTask,
|
||||
resource_thread: &ResourceThread,
|
||||
url: Url,
|
||||
pipeline_id: Option<PipelineId>)
|
||||
-> Result<(Metadata, Vec<u8>), String> {
|
||||
let (start_chan, start_port) = ipc::channel().unwrap();
|
||||
resource_task.send(ControlMsg::Load(LoadData::new(context, url, pipeline_id),
|
||||
resource_thread.send(ControlMsg::Load(LoadData::new(context, url, pipeline_id),
|
||||
LoadConsumer::Channel(start_chan), None)).unwrap();
|
||||
let response = start_port.recv().unwrap();
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ pub enum StorageType {
|
|||
|
||||
/// Request operations on the storage data associated with a particular url
|
||||
#[derive(Deserialize, Serialize)]
|
||||
pub enum StorageTaskMsg {
|
||||
pub enum StorageThreadMsg {
|
||||
/// gets the number of key/value pairs present in the associated storage data
|
||||
Length(IpcSender<usize>, Url, StorageType),
|
||||
|
||||
|
@ -35,11 +35,11 @@ pub enum StorageTaskMsg {
|
|||
/// clears the associated storage data by removing all the key/value pairs
|
||||
Clear(IpcSender<bool>, Url, StorageType),
|
||||
|
||||
/// shut down this task
|
||||
/// shut down this thread
|
||||
Exit
|
||||
}
|
||||
|
||||
/// Handle to a storage task
|
||||
pub type StorageTask = IpcSender<StorageTaskMsg>;
|
||||
/// Handle to a storage thread
|
||||
pub type StorageThread = IpcSender<StorageThreadMsg>;
|
||||
|
||||
|
Loading…
Add table
Add a link
Reference in a new issue