mirror of
https://github.com/servo/servo.git
synced 2025-08-02 20:20:14 +01:00
Move parallel_traversal from LayoutTaskData to LayoutTask.
This commit is contained in:
parent
49691c1638
commit
dcea03c2b4
1 changed files with 20 additions and 23 deletions
|
@ -113,9 +113,6 @@ pub struct LayoutTaskData {
|
|||
/// Performs CSS selector matching and style resolution.
|
||||
pub stylist: Box<Stylist>,
|
||||
|
||||
/// The workers that we use for parallel operation.
|
||||
pub parallel_traversal: Option<WorkQueue<SharedLayoutContext, WorkQueueData>>,
|
||||
|
||||
/// Starts at zero, and increased by one every time a layout completes.
|
||||
/// This can be used to easily check for invalid stale data.
|
||||
pub generation: u32,
|
||||
|
@ -225,6 +222,9 @@ pub struct LayoutTask {
|
|||
pub canvas_layers_receiver: Receiver<(LayerId, IpcSender<CanvasMsg>)>,
|
||||
pub canvas_layers_sender: Sender<(LayerId, IpcSender<CanvasMsg>)>,
|
||||
|
||||
/// The workers that we use for parallel operation.
|
||||
parallel_traversal: Option<WorkQueue<SharedLayoutContext, WorkQueueData>>,
|
||||
|
||||
/// A mutex to allow for fast, read-only RPC of layout's internal data
|
||||
/// structures, while still letting the LayoutTask modify them.
|
||||
///
|
||||
|
@ -434,6 +434,7 @@ impl LayoutTask {
|
|||
font_cache_sender: font_cache_sender,
|
||||
canvas_layers_receiver: canvas_layers_receiver,
|
||||
canvas_layers_sender: canvas_layers_sender,
|
||||
parallel_traversal: parallel_traversal,
|
||||
rw_data: Arc::new(Mutex::new(
|
||||
LayoutTaskData {
|
||||
root_flow: None,
|
||||
|
@ -442,7 +443,6 @@ impl LayoutTask {
|
|||
viewport_size: Size2D::new(Au(0), Au(0)),
|
||||
stacking_context: None,
|
||||
stylist: stylist,
|
||||
parallel_traversal: parallel_traversal,
|
||||
generation: 0,
|
||||
content_box_response: Rect::zero(),
|
||||
content_boxes_response: Vec::new(),
|
||||
|
@ -671,7 +671,7 @@ impl LayoutTask {
|
|||
});
|
||||
|
||||
// ... as do each of the LayoutWorkers, if present.
|
||||
if let Some(ref traversal) = rw_data.parallel_traversal {
|
||||
if let Some(ref traversal) = self.parallel_traversal {
|
||||
let sizes = traversal.heap_size_of_tls(heap_size_of_local_context);
|
||||
for (i, size) in sizes.iter().enumerate() {
|
||||
reports.push(Report {
|
||||
|
@ -708,7 +708,7 @@ impl LayoutTask {
|
|||
|
||||
/// Enters a quiescent state in which no new messages will be processed until an `ExitNow` is
|
||||
/// received. A pong is immediately sent on the given response channel.
|
||||
fn prepare_to_exit<'a, 'b>(&self,
|
||||
fn prepare_to_exit<'a, 'b>(&mut self,
|
||||
response_chan: Sender<()>,
|
||||
possibly_locked_rw_data: &mut RwData<'a, 'b>) {
|
||||
response_chan.send(()).unwrap();
|
||||
|
@ -736,13 +736,9 @@ impl LayoutTask {
|
|||
|
||||
/// Shuts down the layout task now. If there are any DOM nodes left, layout will now (safely)
|
||||
/// crash.
|
||||
fn exit_now<'a, 'b>(&self, possibly_locked_rw_data: &mut RwData<'a, 'b>) {
|
||||
{
|
||||
let mut rw_data = possibly_locked_rw_data.lock();
|
||||
if let Some(ref mut traversal) = (&mut *rw_data).parallel_traversal {
|
||||
traversal.shutdown()
|
||||
}
|
||||
possibly_locked_rw_data.block(rw_data);
|
||||
fn exit_now<'a, 'b>(&mut self, _: &mut RwData<'a, 'b>) {
|
||||
if let Some(ref mut traversal) = self.parallel_traversal {
|
||||
traversal.shutdown()
|
||||
}
|
||||
|
||||
let (response_chan, response_port) = ipc::channel().unwrap();
|
||||
|
@ -997,15 +993,16 @@ impl LayoutTask {
|
|||
}
|
||||
}
|
||||
|
||||
fn compute_abs_pos_and_build_display_list<'a>(&'a self,
|
||||
fn compute_abs_pos_and_build_display_list<'a>(&'a mut self,
|
||||
data: &Reflow,
|
||||
layout_root: &mut FlowRef,
|
||||
shared_layout_context: &mut SharedLayoutContext,
|
||||
rw_data: &mut LayoutTaskData) {
|
||||
let writing_mode = flow::base(&**layout_root).writing_mode;
|
||||
let (metadata, sender) = (self.profiler_metadata(), self.time_profiler_chan.clone());
|
||||
profile(time::ProfilerCategory::LayoutDispListBuild,
|
||||
self.profiler_metadata(),
|
||||
self.time_profiler_chan.clone(),
|
||||
metadata.clone(),
|
||||
sender.clone(),
|
||||
|| {
|
||||
flow::mut_base(flow_ref::deref_mut(layout_root)).stacking_relative_position =
|
||||
LogicalPoint::zero(writing_mode).to_physical(writing_mode,
|
||||
|
@ -1014,11 +1011,11 @@ impl LayoutTask {
|
|||
flow::mut_base(flow_ref::deref_mut(layout_root)).clip =
|
||||
ClippingRegion::from_rect(&data.page_clip_rect);
|
||||
|
||||
match (&mut rw_data.parallel_traversal, opts::get().parallel_display_list_building) {
|
||||
match (&mut self.parallel_traversal, opts::get().parallel_display_list_building) {
|
||||
(&mut Some(ref mut traversal), true) => {
|
||||
parallel::build_display_list_for_subtree(layout_root,
|
||||
self.profiler_metadata(),
|
||||
self.time_profiler_chan.clone(),
|
||||
metadata,
|
||||
sender,
|
||||
shared_layout_context,
|
||||
traversal);
|
||||
}
|
||||
|
@ -1183,8 +1180,7 @@ impl LayoutTask {
|
|||
self.time_profiler_chan.clone(),
|
||||
|| {
|
||||
// Perform CSS selector matching and flow construction.
|
||||
let rw_data = &mut *rw_data;
|
||||
match rw_data.parallel_traversal {
|
||||
match self.parallel_traversal {
|
||||
None => {
|
||||
sequential::traverse_dom_preorder(node, &shared_layout_context);
|
||||
}
|
||||
|
@ -1379,7 +1375,8 @@ impl LayoutTask {
|
|||
self.profiler_metadata(),
|
||||
self.time_profiler_chan.clone(),
|
||||
|| {
|
||||
match rw_data.parallel_traversal {
|
||||
let profiler_metadata = self.profiler_metadata();
|
||||
match self.parallel_traversal {
|
||||
None => {
|
||||
// Sequential mode.
|
||||
LayoutTask::solve_constraints(&mut root_flow, &layout_context)
|
||||
|
@ -1388,7 +1385,7 @@ impl LayoutTask {
|
|||
// Parallel mode.
|
||||
LayoutTask::solve_constraints_parallel(parallel,
|
||||
&mut root_flow,
|
||||
self.profiler_metadata(),
|
||||
profiler_metadata,
|
||||
self.time_profiler_chan.clone(),
|
||||
&*layout_context);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue