Auto merge of #6513 - Ms2ger:workqueue-data, r=pcwalton

Remove the data field from WorkQueue.



<!-- Reviewable:start -->
[<img src="https://reviewable.io/review_button.png" height=40 alt="Review on Reviewable"/>](https://reviewable.io/reviews/servo/servo/6513)
<!-- Reviewable:end -->
This commit is contained in:
bors-servo 2015-07-03 07:22:44 -06:00
commit d09881b051
3 changed files with 14 additions and 31 deletions

View file

@ -300,8 +300,7 @@ impl LayoutTask {
opts::get().initial_window_size.as_f32() * ScaleFactor::new(1.0));
let parallel_traversal = if opts::get().layout_threads != 1 {
Some(WorkQueue::new("LayoutWorker", task_state::LAYOUT,
opts::get().layout_threads,
SharedLayoutContextWrapper(ptr::null())))
opts::get().layout_threads))
} else {
None
};

View file

@ -21,7 +21,6 @@ use wrapper::{PreorderDomTraversal, PostorderDomTraversal};
use profile_traits::time::{self, ProfilerMetadata, profile};
use std::mem;
use std::ptr;
use std::sync::atomic::{AtomicIsize, Ordering};
use util::opts;
use util::workqueue::{WorkQueue, WorkUnit, WorkerProxy};
@ -446,28 +445,25 @@ fn build_display_list(unsafe_flow: UnsafeFlow,
fn run_queue_with_custom_work_data_type<To,F>(
queue: &mut WorkQueue<SharedLayoutContextWrapper, WorkQueueData>,
callback: F)
callback: F,
shared_layout_context: &SharedLayoutContext)
where To: 'static + Send, F: FnOnce(&mut WorkQueue<SharedLayoutContextWrapper,To>) {
unsafe {
let queue: &mut WorkQueue<SharedLayoutContextWrapper,To> = mem::transmute(queue);
let queue: &mut WorkQueue<SharedLayoutContextWrapper,To> = unsafe {
mem::transmute(queue)
};
callback(queue);
queue.run();
}
queue.run(SharedLayoutContextWrapper(shared_layout_context as *const _));
}
pub fn traverse_dom_preorder(root: LayoutNode,
shared_layout_context: &SharedLayoutContext,
queue: &mut WorkQueue<SharedLayoutContextWrapper, WorkQueueData>) {
queue.data = SharedLayoutContextWrapper(shared_layout_context as *const _);
run_queue_with_custom_work_data_type(queue, |queue| {
queue.push(WorkUnit {
fun: recalc_style,
data: (box vec![layout_node_to_unsafe_layout_node(&root)], 0),
});
});
queue.data = SharedLayoutContextWrapper(ptr::null());
}, shared_layout_context);
}
pub fn traverse_flow_tree_preorder(
@ -482,8 +478,6 @@ pub fn traverse_flow_tree_preorder(
root.traverse_postorder(&bubble_inline_sizes);
}
queue.data = SharedLayoutContextWrapper(shared_layout_context as *const _);
run_queue_with_custom_work_data_type(queue, |queue| {
profile(time::ProfilerCategory::LayoutParallelWarmup, profiler_metadata,
time_profiler_chan, || {
@ -492,9 +486,7 @@ pub fn traverse_flow_tree_preorder(
data: (box vec![mut_owned_flow_to_unsafe_flow(root)], 0),
})
});
});
queue.data = SharedLayoutContextWrapper(ptr::null())
}, shared_layout_context);
}
pub fn build_display_list_for_subtree(
@ -503,8 +495,6 @@ pub fn build_display_list_for_subtree(
time_profiler_chan: time::ProfilerChan,
shared_layout_context: &SharedLayoutContext,
queue: &mut WorkQueue<SharedLayoutContextWrapper, WorkQueueData>) {
queue.data = SharedLayoutContextWrapper(shared_layout_context as *const _);
run_queue_with_custom_work_data_type(queue, |queue| {
profile(time::ProfilerCategory::LayoutParallelWarmup, profiler_metadata,
time_profiler_chan, || {
@ -513,7 +503,5 @@ pub fn build_display_list_for_subtree(
data: (box vec![mut_owned_flow_to_unsafe_flow(root)], 0),
})
});
});
queue.data = SharedLayoutContextWrapper(ptr::null())
}, shared_layout_context);
}

View file

@ -230,8 +230,6 @@ pub struct WorkQueue<QueueData: 'static, WorkData: 'static> {
port: Receiver<SupervisorMsg<QueueData, WorkData>>,
/// The amount of work that has been enqueued.
work_count: usize,
/// Arbitrary user data.
pub data: QueueData,
}
impl<QueueData: Send, WorkData: Send> WorkQueue<QueueData, WorkData> {
@ -239,8 +237,7 @@ impl<QueueData: Send, WorkData: Send> WorkQueue<QueueData, WorkData> {
/// it.
pub fn new(task_name: &'static str,
state: task_state::TaskState,
thread_count: usize,
user_data: QueueData) -> WorkQueue<QueueData, WorkData> {
thread_count: usize) -> WorkQueue<QueueData, WorkData> {
// Set up data structures.
let (supervisor_chan, supervisor_port) = channel();
let (mut infos, mut threads) = (vec!(), vec!());
@ -288,7 +285,6 @@ impl<QueueData: Send, WorkData: Send> WorkQueue<QueueData, WorkData> {
workers: infos,
port: supervisor_port,
work_count: 0,
data: user_data,
}
}
@ -306,13 +302,13 @@ impl<QueueData: Send, WorkData: Send> WorkQueue<QueueData, WorkData> {
}
/// Synchronously runs all the enqueued tasks and waits for them to complete.
pub fn run(&mut self) {
pub fn run(&mut self, data: QueueData) {
// Tell the workers to start.
let mut work_count = AtomicUsize::new(self.work_count);
for worker in self.workers.iter_mut() {
worker.chan.send(WorkerMsg::Start(worker.deque.take().unwrap(),
&mut work_count,
&self.data)).unwrap()
&data)).unwrap()
}
// Wait for the work to finish.