Auto merge of #17368 - bholley:rule_tree_gc, r=emilio

Rule tree gc never runs until teardown

https://bugzilla.mozilla.org/show_bug.cgi?id=1373725

<!-- Reviewable:start -->
---
This change is [<img src="https://reviewable.io/review_button.svg" height="34" align="absmiddle" alt="Reviewable"/>](https://reviewable.io/reviews/servo/servo/17368)
<!-- Reviewable:end -->
This commit is contained in:
bors-servo 2017-06-16 14:13:38 -07:00 committed by GitHub
commit 2a856edf57
4 changed files with 67 additions and 22 deletions

View file

@ -122,6 +122,11 @@ impl RuleTree {
self.root.clone() self.root.clone()
} }
/// Returns true if there are no nodes in the tree aside from the rule node.
pub fn is_empty(&self) -> bool {
self.root.get().first_child.load(Ordering::Relaxed).is_null()
}
fn dump<W: Write>(&self, guards: &StylesheetGuards, writer: &mut W) { fn dump<W: Write>(&self, guards: &StylesheetGuards, writer: &mut W) {
let _ = writeln!(writer, " + RuleTree"); let _ = writeln!(writer, " + RuleTree");
self.root.get().dump(guards, writer, 0); self.root.get().dump(guards, writer, 0);
@ -498,6 +503,25 @@ impl CascadeLevel {
} }
} }
// The root node never has siblings, but needs a free count. We use the same
// storage for both to save memory.
struct PrevSiblingOrFreeCount(AtomicPtr<RuleNode>);
impl PrevSiblingOrFreeCount {
fn new() -> Self {
PrevSiblingOrFreeCount(AtomicPtr::new(ptr::null_mut()))
}
unsafe fn as_prev_sibling(&self) -> &AtomicPtr<RuleNode> {
&self.0
}
unsafe fn as_free_count(&self) -> &AtomicUsize {
unsafe {
mem::transmute(&self.0)
}
}
}
/// A node in the rule tree. /// A node in the rule tree.
pub struct RuleNode { pub struct RuleNode {
/// The root node. Only the root has no root pointer, for obvious reasons. /// The root node. Only the root has no root pointer, for obvious reasons.
@ -516,18 +540,16 @@ pub struct RuleNode {
refcount: AtomicUsize, refcount: AtomicUsize,
first_child: AtomicPtr<RuleNode>, first_child: AtomicPtr<RuleNode>,
next_sibling: AtomicPtr<RuleNode>, next_sibling: AtomicPtr<RuleNode>,
prev_sibling: AtomicPtr<RuleNode>,
/// Previous sibling pointer for all non-root nodes.
///
/// For the root, stores the of RuleNodes we have added to the free list
/// since the last GC. (We don't update this if we rescue a RuleNode from
/// the free list. It's just used as a heuristic to decide when to run GC.)
prev_sibling_or_free_count: PrevSiblingOrFreeCount,
/// The next item in the rule tree free list, that starts on the root node. /// The next item in the rule tree free list, that starts on the root node.
next_free: AtomicPtr<RuleNode>, next_free: AtomicPtr<RuleNode>,
/// Number of RuleNodes we have added to the free list since the last GC.
/// (We don't update this if we rescue a RuleNode from the free list. It's
/// just used as a heuristic to decide when to run GC.)
///
/// Only used on the root RuleNode. (We could probably re-use one of the
/// sibling pointers to save space.)
free_count: AtomicUsize,
} }
unsafe impl Sync for RuleTree {} unsafe impl Sync for RuleTree {}
@ -547,9 +569,8 @@ impl RuleNode {
refcount: AtomicUsize::new(1), refcount: AtomicUsize::new(1),
first_child: AtomicPtr::new(ptr::null_mut()), first_child: AtomicPtr::new(ptr::null_mut()),
next_sibling: AtomicPtr::new(ptr::null_mut()), next_sibling: AtomicPtr::new(ptr::null_mut()),
prev_sibling: AtomicPtr::new(ptr::null_mut()), prev_sibling_or_free_count: PrevSiblingOrFreeCount::new(),
next_free: AtomicPtr::new(ptr::null_mut()), next_free: AtomicPtr::new(ptr::null_mut()),
free_count: AtomicUsize::new(0),
} }
} }
@ -562,9 +583,8 @@ impl RuleNode {
refcount: AtomicUsize::new(1), refcount: AtomicUsize::new(1),
first_child: AtomicPtr::new(ptr::null_mut()), first_child: AtomicPtr::new(ptr::null_mut()),
next_sibling: AtomicPtr::new(ptr::null_mut()), next_sibling: AtomicPtr::new(ptr::null_mut()),
prev_sibling: AtomicPtr::new(ptr::null_mut()), prev_sibling_or_free_count: PrevSiblingOrFreeCount::new(),
next_free: AtomicPtr::new(FREE_LIST_SENTINEL), next_free: AtomicPtr::new(FREE_LIST_SENTINEL),
free_count: AtomicUsize::new(0),
} }
} }
@ -583,6 +603,16 @@ impl RuleNode {
} }
} }
fn prev_sibling(&self) -> &AtomicPtr<RuleNode> {
debug_assert!(!self.is_root());
unsafe { self.prev_sibling_or_free_count.as_prev_sibling() }
}
fn free_count(&self) -> &AtomicUsize {
debug_assert!(self.is_root());
unsafe { self.prev_sibling_or_free_count.as_free_count() }
}
/// Remove this rule node from the child list. /// Remove this rule node from the child list.
/// ///
/// This method doesn't use proper synchronization, and it's expected to be /// This method doesn't use proper synchronization, and it's expected to be
@ -596,7 +626,7 @@ impl RuleNode {
// NB: The other siblings we use in this function can also be dead, so // NB: The other siblings we use in this function can also be dead, so
// we can't use `get` here, since it asserts. // we can't use `get` here, since it asserts.
let prev_sibling = let prev_sibling =
self.prev_sibling.swap(ptr::null_mut(), Ordering::Relaxed); self.prev_sibling().swap(ptr::null_mut(), Ordering::Relaxed);
let next_sibling = let next_sibling =
self.next_sibling.swap(ptr::null_mut(), Ordering::Relaxed); self.next_sibling.swap(ptr::null_mut(), Ordering::Relaxed);
@ -615,7 +645,7 @@ impl RuleNode {
// otherwise we're done. // otherwise we're done.
if !next_sibling.is_null() { if !next_sibling.is_null() {
let next = &*next_sibling; let next = &*next_sibling;
next.prev_sibling.store(prev_sibling, Ordering::Relaxed); next.prev_sibling().store(prev_sibling, Ordering::Relaxed);
} }
} }
@ -760,7 +790,7 @@ impl StrongRuleNode {
// only be accessed again in a single-threaded manner when // only be accessed again in a single-threaded manner when
// we're sweeping possibly dead nodes. // we're sweeping possibly dead nodes.
if let Some(ref l) = last { if let Some(ref l) = last {
node.prev_sibling.store(l.ptr(), Ordering::Relaxed); node.prev_sibling().store(l.ptr(), Ordering::Relaxed);
} }
return StrongRuleNode::new(node); return StrongRuleNode::new(node);
@ -908,14 +938,14 @@ impl StrongRuleNode {
let _ = Box::from_raw(weak.ptr()); let _ = Box::from_raw(weak.ptr());
} }
me.free_count.store(0, Ordering::Relaxed); me.free_count().store(0, Ordering::Relaxed);
debug_assert!(me.next_free.load(Ordering::Relaxed) == FREE_LIST_SENTINEL); debug_assert!(me.next_free.load(Ordering::Relaxed) == FREE_LIST_SENTINEL);
} }
unsafe fn maybe_gc(&self) { unsafe fn maybe_gc(&self) {
debug_assert!(self.get().is_root(), "Can't call GC on a non-root node!"); debug_assert!(self.get().is_root(), "Can't call GC on a non-root node!");
if self.get().free_count.load(Ordering::Relaxed) > RULE_TREE_GC_INTERVAL { if self.get().free_count().load(Ordering::Relaxed) > RULE_TREE_GC_INTERVAL {
self.gc(); self.gc();
} }
} }
@ -1339,6 +1369,11 @@ impl Drop for StrongRuleNode {
// This can be relaxed since this pointer won't be read until GC. // This can be relaxed since this pointer won't be read until GC.
node.next_free.store(old_head, Ordering::Relaxed); node.next_free.store(old_head, Ordering::Relaxed);
// Increment the free count. This doesn't need to be an RMU atomic
// operation, because the free list is "locked".
let old_free_count = root.free_count().load(Ordering::Relaxed);
root.free_count().store(old_free_count + 1, Ordering::Relaxed);
// This can be release because of the locking of the free list, that // This can be release because of the locking of the free list, that
// ensures that all the other nodes racing with this one are using // ensures that all the other nodes racing with this one are using
// `Acquire`. // `Acquire`.

View file

@ -1283,10 +1283,10 @@ impl Drop for Stylist {
// dropped all strong rule node references before now, then we will // dropped all strong rule node references before now, then we will
// leak them, since there will be no way to call gc() on the rule tree // leak them, since there will be no way to call gc() on the rule tree
// after this point. // after this point.
//
// TODO(emilio): We can at least assert all the elements in the free
// list are indeed free.
unsafe { self.rule_tree.gc(); } unsafe { self.rule_tree.gc(); }
// Assert against leaks.
debug_assert!(self.rule_tree.is_empty());
} }
} }

View file

@ -303,6 +303,16 @@ pub extern "C" fn Servo_TraverseSubtree(root: RawGeckoElementBorrowed,
element.has_dirty_descendants() || element.borrow_data().unwrap().restyle.contains_restyle_data() element.has_dirty_descendants() || element.borrow_data().unwrap().restyle.contains_restyle_data()
} }
/// Checks whether the rule tree has crossed its threshold for unused nodes, and
/// if so, frees them.
#[no_mangle]
pub extern "C" fn Servo_MaybeGCRuleTree(raw_data: RawServoStyleSetBorrowed) {
let per_doc_data = PerDocumentStyleData::from_ffi(raw_data).borrow_mut();
unsafe {
per_doc_data.stylist.rule_tree().maybe_gc();
}
}
#[no_mangle] #[no_mangle]
pub extern "C" fn Servo_AnimationValues_Interpolate(from: RawServoAnimationValueBorrowed, pub extern "C" fn Servo_AnimationValues_Interpolate(from: RawServoAnimationValueBorrowed,
to: RawServoAnimationValueBorrowed, to: RawServoAnimationValueBorrowed,

View file

@ -42,7 +42,7 @@ size_of_test!(test_size_of_application_declaration_block, ApplicableDeclarationB
// FIXME(bholley): This can shrink with a little bit of work. // FIXME(bholley): This can shrink with a little bit of work.
// See https://github.com/servo/servo/issues/17280 // See https://github.com/servo/servo/issues/17280
size_of_test!(test_size_of_rule_node, RuleNode, 96); size_of_test!(test_size_of_rule_node, RuleNode, 88);
// This is huge, but we allocate it on the stack and then never move it, // This is huge, but we allocate it on the stack and then never move it,
// we only pass `&mut SourcePropertyDeclaration` references around. // we only pass `&mut SourcePropertyDeclaration` references around.