mirror of
https://github.com/servo/servo.git
synced 2025-07-23 15:23:42 +01:00
GC the rule tree only when the free list gets to a certain size.
This commit is contained in:
parent
181208a4e4
commit
4e52bb49b9
2 changed files with 34 additions and 7 deletions
|
@ -1180,11 +1180,8 @@ impl LayoutThread {
|
||||||
shared_layout_context.style_context.stylist.rule_tree.dump_stdout();
|
shared_layout_context.style_context.stylist.rule_tree.dump_stdout();
|
||||||
}
|
}
|
||||||
|
|
||||||
// GC The rule tree.
|
// GC the rule tree if some heuristics are met.
|
||||||
//
|
unsafe { shared_layout_context.style_context.stylist.rule_tree.maybe_gc(); }
|
||||||
// FIXME(emilio): The whole point of the free list is not always freeing
|
|
||||||
// the list, find a good heuristic here for that.
|
|
||||||
unsafe { shared_layout_context.style_context.stylist.rule_tree.gc() }
|
|
||||||
|
|
||||||
// Perform post-style recalculation layout passes.
|
// Perform post-style recalculation layout passes.
|
||||||
self.perform_post_style_recalc_layout_passes(&data.reflow_info,
|
self.perform_post_style_recalc_layout_passes(&data.reflow_info,
|
||||||
|
|
|
@ -124,8 +124,18 @@ impl RuleTree {
|
||||||
pub unsafe fn gc(&self) {
|
pub unsafe fn gc(&self) {
|
||||||
self.root.gc();
|
self.root.gc();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// This can only be called when no other threads is accessing this tree.
|
||||||
|
pub unsafe fn maybe_gc(&self) {
|
||||||
|
self.root.maybe_gc();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The number of RuleNodes added to the free list before we will consider
|
||||||
|
/// doing a GC when calling maybe_gc(). (The value is copied from Gecko,
|
||||||
|
/// where it likely did not result from a rigorous performance analysis.)
|
||||||
|
const RULE_TREE_GC_INTERVAL: usize = 300;
|
||||||
|
|
||||||
struct RuleNode {
|
struct RuleNode {
|
||||||
/// The root node. Only the root has no root pointer, for obvious reasons.
|
/// The root node. Only the root has no root pointer, for obvious reasons.
|
||||||
root: Option<WeakRuleNode>,
|
root: Option<WeakRuleNode>,
|
||||||
|
@ -148,6 +158,14 @@ struct RuleNode {
|
||||||
|
|
||||||
/// The next item in the rule tree free list, that starts on the root node.
|
/// The next item in the rule tree free list, that starts on the root node.
|
||||||
next_free: AtomicPtr<RuleNode>,
|
next_free: AtomicPtr<RuleNode>,
|
||||||
|
|
||||||
|
/// Number of RuleNodes we have added to the free list since the last GC.
|
||||||
|
/// (We don't update this if we rescue a RuleNode from the free list. It's
|
||||||
|
/// just used as a heuristic to decide when to run GC.)
|
||||||
|
///
|
||||||
|
/// Only used on the root RuleNode. (We could probably re-use one of the
|
||||||
|
/// sibling pointers to save space.)
|
||||||
|
free_count: AtomicUsize,
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl Sync for RuleTree {}
|
unsafe impl Sync for RuleTree {}
|
||||||
|
@ -169,6 +187,7 @@ impl RuleNode {
|
||||||
next_sibling: AtomicPtr::new(ptr::null_mut()),
|
next_sibling: AtomicPtr::new(ptr::null_mut()),
|
||||||
prev_sibling: AtomicPtr::new(ptr::null_mut()),
|
prev_sibling: AtomicPtr::new(ptr::null_mut()),
|
||||||
next_free: AtomicPtr::new(ptr::null_mut()),
|
next_free: AtomicPtr::new(ptr::null_mut()),
|
||||||
|
free_count: AtomicUsize::new(0),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -183,6 +202,7 @@ impl RuleNode {
|
||||||
next_sibling: AtomicPtr::new(ptr::null_mut()),
|
next_sibling: AtomicPtr::new(ptr::null_mut()),
|
||||||
prev_sibling: AtomicPtr::new(ptr::null_mut()),
|
prev_sibling: AtomicPtr::new(ptr::null_mut()),
|
||||||
next_free: AtomicPtr::new(FREE_LIST_SENTINEL),
|
next_free: AtomicPtr::new(FREE_LIST_SENTINEL),
|
||||||
|
free_count: AtomicUsize::new(0),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -455,8 +475,17 @@ impl StrongRuleNode {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
me.free_count.store(0, Ordering::SeqCst);
|
||||||
|
|
||||||
debug_assert!(me.next_free.load(Ordering::SeqCst) == FREE_LIST_SENTINEL);
|
debug_assert!(me.next_free.load(Ordering::SeqCst) == FREE_LIST_SENTINEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsafe fn maybe_gc(&self) {
|
||||||
|
debug_assert!(self.get().is_root(), "Can't call GC on a non-root node!");
|
||||||
|
if self.get().free_count.load(Ordering::SeqCst) > RULE_TREE_GC_INTERVAL {
|
||||||
|
self.gc();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
|
@ -521,8 +550,8 @@ impl Drop for StrongRuleNode {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
let free_list =
|
let root = unsafe { &*node.root.as_ref().unwrap().ptr() };
|
||||||
&unsafe { &*node.root.as_ref().unwrap().ptr() }.next_free;
|
let free_list = &root.next_free;
|
||||||
loop {
|
loop {
|
||||||
let next_free = free_list.load(Ordering::SeqCst);
|
let next_free = free_list.load(Ordering::SeqCst);
|
||||||
debug_assert!(!next_free.is_null());
|
debug_assert!(!next_free.is_null());
|
||||||
|
@ -535,6 +564,7 @@ impl Drop for StrongRuleNode {
|
||||||
Ordering::SeqCst);
|
Ordering::SeqCst);
|
||||||
if existing == next_free {
|
if existing == next_free {
|
||||||
// Successfully inserted, yay! Otherwise try again.
|
// Successfully inserted, yay! Otherwise try again.
|
||||||
|
root.free_count.fetch_add(1, Ordering::Relaxed);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue