diff --git a/components/selectors/parser.rs b/components/selectors/parser.rs index a7f3d259fbd..c613d741eb6 100644 --- a/components/selectors/parser.rs +++ b/components/selectors/parser.rs @@ -62,7 +62,7 @@ macro_rules! with_all_bounds { /// NB: We need Clone so that we can derive(Clone) on struct with that /// are parameterized on SelectorImpl. See /// https://github.com/rust-lang/rust/issues/26925 - pub trait SelectorImpl: Clone + Sized { + pub trait SelectorImpl: Clone + Sized + 'static { type AttrValue: $($InSelector)*; type Identifier: $($InSelector)* + PrecomputedHash; type ClassName: $($InSelector)* + PrecomputedHash; diff --git a/components/servo_arc/lib.rs b/components/servo_arc/lib.rs index 5d91c9bc44f..4fbc094d070 100644 --- a/components/servo_arc/lib.rs +++ b/components/servo_arc/lib.rs @@ -69,22 +69,64 @@ macro_rules! offset_of { /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references. const MAX_REFCOUNT: usize = (isize::MAX) as usize; -pub struct Arc { - // FIXME(bholley): When NonZero/Shared/Unique are stabilized, we should use - // Shared here to get the NonZero optimization. Gankro is working on this. - // - // If we need a compact Option> beforehand, we can make a helper - // class that wraps the result of Arc::into_raw. - // - // https://github.com/rust-lang/rust/issues/27730 - ptr: *mut ArcInner, +/// Wrapper type for pointers to get the non-zero optimization. When +/// NonZero/Shared/Unique are stabilized, we should just use Shared +/// here to get the same effect. Gankro is working on this in [1]. +/// +/// It's unfortunate that this needs to infect all the caller types +/// with 'static. It would be nice to just use a &() and a PhantomData +/// instead, but then the compiler can't determine whether the &() should +/// be thin or fat (which depends on whether or not T is sized). Given +/// that this is all a temporary hack, this restriction is fine for now. +/// +/// [1] https://github.com/rust-lang/rust/issues/27730 +pub struct NonZeroPtrMut(&'static mut T); +impl NonZeroPtrMut { + pub fn new(ptr: *mut T) -> Self { + assert!(!(ptr as *mut u8).is_null()); + NonZeroPtrMut(unsafe { mem::transmute(ptr) }) + } + + pub fn ptr(&self) -> *mut T { + self.0 as *const T as *mut T + } +} + +impl Clone for NonZeroPtrMut { + fn clone(&self) -> Self { + NonZeroPtrMut::new(self.ptr()) + } +} + +impl fmt::Pointer for NonZeroPtrMut { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Pointer::fmt(&self.ptr(), f) + } +} + +impl fmt::Debug for NonZeroPtrMut { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + ::fmt(self, f) + } +} + +impl PartialEq for NonZeroPtrMut { + fn eq(&self, other: &Self) -> bool { + self.ptr() == other.ptr() + } +} + +impl Eq for NonZeroPtrMut {} + +pub struct Arc { + p: NonZeroPtrMut>, } /// An Arc that is known to be uniquely owned /// /// This lets us build arcs that we can mutate before /// freezing, without needing to change the allocation -pub struct UniqueArc(Arc); +pub struct UniqueArc(Arc); impl UniqueArc { #[inline] @@ -110,7 +152,7 @@ impl Deref for UniqueArc { impl DerefMut for UniqueArc { fn deref_mut(&mut self) -> &mut T { // We know this to be uniquely owned - unsafe { &mut (*self.0.ptr).data } + unsafe { &mut (*self.0.ptr()).data } } } @@ -132,11 +174,11 @@ impl Arc { count: atomic::AtomicUsize::new(1), data: data, }); - Arc { ptr: Box::into_raw(x) } + Arc { p: NonZeroPtrMut::new(Box::into_raw(x)) } } pub fn into_raw(this: Self) -> *const T { - let ptr = unsafe { &((*this.ptr).data) as *const _ }; + let ptr = unsafe { &((*this.ptr()).data) as *const _ }; mem::forget(this); ptr } @@ -146,7 +188,7 @@ impl Arc { // to subtract the offset of the `data` field from the pointer. let ptr = (ptr as *const u8).offset(-offset_of!(ArcInner, data)); Arc { - ptr: ptr as *mut ArcInner, + p: NonZeroPtrMut::new(ptr as *mut ArcInner), } } } @@ -159,19 +201,23 @@ impl Arc { // `ArcInner` structure itself is `Sync` because the inner data is // `Sync` as well, so we're ok loaning out an immutable pointer to these // contents. - unsafe { &*self.ptr } + unsafe { &*self.ptr() } } // Non-inlined part of `drop`. Just invokes the destructor. #[inline(never)] unsafe fn drop_slow(&mut self) { - let _ = Box::from_raw(self.ptr); + let _ = Box::from_raw(self.ptr()); } #[inline] pub fn ptr_eq(this: &Self, other: &Self) -> bool { - this.ptr == other.ptr + this.ptr() == other.ptr() + } + + fn ptr(&self) -> *mut ArcInner { + self.p.ptr() } } @@ -210,7 +256,7 @@ impl Clone for Arc { panic!(); } - Arc { ptr: self.ptr } + Arc { p: NonZeroPtrMut::new(self.ptr()) } } } @@ -237,7 +283,7 @@ impl Arc { // reference count is guaranteed to be 1 at this point, and we required // the Arc itself to be `mut`, so we're returning the only possible // reference to the inner data. - &mut (*this.ptr).data + &mut (*this.ptr()).data } } } @@ -248,7 +294,7 @@ impl Arc { if this.is_unique() { unsafe { // See make_mut() for documentation of the threadsafety here. - Some(&mut (*this.ptr).data) + Some(&mut (*this.ptr()).data) } } else { None @@ -357,7 +403,7 @@ impl fmt::Debug for Arc { impl fmt::Pointer for Arc { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Pointer::fmt(&self.ptr, f) + fmt::Pointer::fmt(&self.ptr(), f) } } @@ -481,6 +527,8 @@ impl Arc> { // // To avoid alignment issues, we allocate words rather than bytes, // rounding up to the nearest word size. + assert!(mem::align_of::() <= mem::align_of::(), + "We don't handle over-aligned types"); let words_to_allocate = divide_rounding_up(size, size_of::()); let mut vec = Vec::::with_capacity(words_to_allocate); vec.set_len(words_to_allocate); @@ -496,6 +544,9 @@ impl Arc> { ptr = fake_slice as *mut [T] as *mut ArcInner>; // Write the data. + // + // Note that any panics here (i.e. from the iterator) are safe, since + // we'll just leak the uninitialized memory. ptr::write(&mut ((*ptr).count), atomic::AtomicUsize::new(1)); ptr::write(&mut ((*ptr).data.header), header); let mut current: *mut T = &mut (*ptr).data.slice[0]; @@ -511,7 +562,7 @@ impl Arc> { // Return the fat Arc. assert_eq!(size_of::(), size_of::() * 2, "The Arc will be fat"); - Arc { ptr: ptr } + Arc { p: NonZeroPtrMut::new(ptr) } } } @@ -536,8 +587,9 @@ impl HeaderWithLength { } } -pub struct ThinArc { - ptr: *mut ArcInner, [T; 1]>>, +type HeaderSliceWithLength = HeaderSlice, T>; +pub struct ThinArc { + ptr: *mut ArcInner>, } unsafe impl Send for ThinArc {} @@ -546,27 +598,27 @@ unsafe impl Sync for ThinArc {} // Synthesize a fat pointer from a thin pointer. // // See the comment around the analogous operation in from_header_and_iter. -fn thin_to_thick(thin: *mut ArcInner, [T; 1]>>) - -> *mut ArcInner, [T]>> +fn thin_to_thick(thin: *mut ArcInner>) + -> *mut ArcInner> { let len = unsafe { (*thin).data.header.length }; let fake_slice: *mut [T] = unsafe { slice::from_raw_parts_mut(thin as *mut T, len) }; - fake_slice as *mut ArcInner, [T]>> + fake_slice as *mut ArcInner> } -impl ThinArc { +impl ThinArc { /// Temporarily converts |self| into a bonafide Arc and exposes it to the /// provided callback. The refcount is not modified. #[inline(always)] pub fn with_arc(&self, f: F) -> U - where F: FnOnce(&Arc, [T]>>) -> U + where F: FnOnce(&Arc>) -> U { // Synthesize transient Arc, which never touches the refcount of the ArcInner. let transient = NoDrop::new(Arc { - ptr: thin_to_thick(self.ptr) + p: NonZeroPtrMut::new(thin_to_thick(self.ptr)) }); // Expose the transient Arc to the callback, which may clone it if it wants. @@ -581,35 +633,35 @@ impl ThinArc { } impl Deref for ThinArc { - type Target = HeaderSlice, [T]>; + type Target = HeaderSliceWithLength; fn deref(&self) -> &Self::Target { unsafe { &(*thin_to_thick(self.ptr)).data } } } -impl Clone for ThinArc { +impl Clone for ThinArc { fn clone(&self) -> Self { ThinArc::with_arc(self, |a| Arc::into_thin(a.clone())) } } -impl Drop for ThinArc { +impl Drop for ThinArc { fn drop(&mut self) { let _ = Arc::from_thin(ThinArc { ptr: self.ptr }); } } -impl Arc, [T]>> { +impl Arc> { /// Converts an Arc into a ThinArc. This consumes the Arc, so the refcount /// is not modified. pub fn into_thin(a: Self) -> ThinArc { assert!(a.header.length == a.slice.len(), "Length needs to be correct for ThinArc to work"); - let fat_ptr: *mut ArcInner, [T]>> = a.ptr; + let fat_ptr: *mut ArcInner> = a.ptr(); mem::forget(a); let thin_ptr = fat_ptr as *mut [usize] as *mut usize; ThinArc { - ptr: thin_ptr as *mut ArcInner, [T; 1]>> + ptr: thin_ptr as *mut ArcInner> } } @@ -619,12 +671,12 @@ impl Arc, [T]>> { let ptr = thin_to_thick(a.ptr); mem::forget(a); Arc { - ptr: ptr + p: NonZeroPtrMut::new(ptr) } } } -impl PartialEq for ThinArc { +impl PartialEq for ThinArc { fn eq(&self, other: &ThinArc) -> bool { ThinArc::with_arc(self, |a| { ThinArc::with_arc(other, |b| { @@ -634,7 +686,7 @@ impl PartialEq for ThinArc { } } -impl Eq for ThinArc {} +impl Eq for ThinArc {} #[cfg(test)] mod tests { @@ -649,13 +701,7 @@ mod tests { impl Drop for Canary { fn drop(&mut self) { - unsafe { - match *self { - Canary(c) => { - (*c).fetch_add(1, SeqCst); - } - } - } + unsafe { (*self.0).fetch_add(1, SeqCst); } } } diff --git a/components/style/gecko_bindings/sugar/ownership.rs b/components/style/gecko_bindings/sugar/ownership.rs index 6c4746013b3..0b5111b3870 100644 --- a/components/style/gecko_bindings/sugar/ownership.rs +++ b/components/style/gecko_bindings/sugar/ownership.rs @@ -11,7 +11,7 @@ use std::ptr; use stylearc::Arc; /// Indicates that a given Servo type has a corresponding Gecko FFI type. -pub unsafe trait HasFFI : Sized { +pub unsafe trait HasFFI : Sized + 'static { /// The corresponding Gecko type that this rust type represents. /// /// See the examples in `components/style/gecko/conversions.rs`. diff --git a/components/style/properties/properties.mako.rs b/components/style/properties/properties.mako.rs index 4114b2f2b22..1ec0dee0ef7 100644 --- a/components/style/properties/properties.mako.rs +++ b/components/style/properties/properties.mako.rs @@ -2193,7 +2193,7 @@ pub fn get_writing_mode(inheritedbox_style: &style_structs::InheritedBox) -> Wri } /// A reference to a style struct of the parent, or our own style struct. -pub enum StyleStructRef<'a, T: 'a> { +pub enum StyleStructRef<'a, T: 'static> { /// A borrowed struct from the parent, for example, for inheriting style. Borrowed(&'a Arc), /// An owned struct, that we've already mutated. diff --git a/components/style/rule_tree/mod.rs b/components/style/rule_tree/mod.rs index 2557b4c77e5..37e7334b9db 100644 --- a/components/style/rule_tree/mod.rs +++ b/components/style/rule_tree/mod.rs @@ -14,7 +14,7 @@ use smallvec::SmallVec; use std::io::{self, Write}; use std::ptr; use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; -use stylearc::Arc; +use stylearc::{Arc, NonZeroPtrMut}; use stylesheets::StyleRule; use stylist::ApplicableDeclarationList; use thread_state; @@ -637,7 +637,7 @@ impl RuleNode { current: if first_child.is_null() { None } else { - Some(WeakRuleNode { ptr: first_child }) + Some(WeakRuleNode::from_ptr(first_child)) } } } @@ -645,15 +645,13 @@ impl RuleNode { #[derive(Clone)] struct WeakRuleNode { - ptr: *mut RuleNode, + p: NonZeroPtrMut, } /// A strong reference to a rule node. #[derive(Debug, PartialEq)] pub struct StrongRuleNode { - // TODO: Mark this as NonZero once stable to save space inside Option. - // https://github.com/rust-lang/rust/issues/27730 - ptr: *mut RuleNode, + p: NonZeroPtrMut, } #[cfg(feature = "servo")] @@ -670,15 +668,17 @@ impl StrongRuleNode { debug!("Creating rule node: {:p}", ptr); + StrongRuleNode::from_ptr(ptr) + } + + fn from_ptr(ptr: *mut RuleNode) -> Self { StrongRuleNode { - ptr: ptr, + p: NonZeroPtrMut::new(ptr) } } fn downgrade(&self) -> WeakRuleNode { - WeakRuleNode { - ptr: self.ptr, - } + WeakRuleNode::from_ptr(self.ptr()) } fn next_sibling(&self) -> Option { @@ -688,9 +688,7 @@ impl StrongRuleNode { if ptr.is_null() { None } else { - Some(WeakRuleNode { - ptr: ptr - }) + Some(WeakRuleNode::from_ptr(ptr)) } } @@ -748,7 +746,7 @@ impl StrongRuleNode { // Existing is not null: some thread inserted a child node since // we accessed `last`. - strong = WeakRuleNode { ptr: existing }.upgrade(); + strong = WeakRuleNode::from_ptr(existing).upgrade(); if strong.get().source.as_ref().unwrap().ptr_equals(&source) { // That node happens to be for the same style source, use @@ -764,15 +762,15 @@ impl StrongRuleNode { /// Raw pointer to the RuleNode pub fn ptr(&self) -> *mut RuleNode { - self.ptr + self.p.ptr() } fn get(&self) -> &RuleNode { if cfg!(debug_assertions) { - let node = unsafe { &*self.ptr }; + let node = unsafe { &*self.ptr() }; assert!(node.refcount.load(Ordering::Relaxed) > 0); } - unsafe { &*self.ptr } + unsafe { &*self.ptr() } } /// Get the style source corresponding to this rule node. May return `None` @@ -808,7 +806,7 @@ impl StrongRuleNode { unsafe fn pop_from_free_list(&self) -> Option { // NB: This can run from the root node destructor, so we can't use // `get()`, since it asserts the refcount is bigger than zero. - let me = &*self.ptr; + let me = &*self.ptr(); debug_assert!(me.is_root()); @@ -831,7 +829,7 @@ impl StrongRuleNode { debug_assert!(!current.is_null(), "Multiple threads are operating on the free list at the \ same time?"); - debug_assert!(current != self.ptr, + debug_assert!(current != self.ptr(), "How did the root end up in the free list?"); let next = (*current).next_free.swap(ptr::null_mut(), Ordering::Relaxed); @@ -843,17 +841,17 @@ impl StrongRuleNode { debug!("Popping from free list: cur: {:?}, next: {:?}", current, next); - Some(WeakRuleNode { ptr: current }) + Some(WeakRuleNode::from_ptr(current)) } unsafe fn assert_free_list_has_no_duplicates_or_null(&self) { assert!(cfg!(debug_assertions), "This is an expensive check!"); use std::collections::HashSet; - let me = &*self.ptr; + let me = &*self.ptr(); assert!(me.is_root()); - let mut current = self.ptr; + let mut current = self.ptr(); let mut seen = HashSet::new(); while current != FREE_LIST_SENTINEL { let next = (*current).next_free.load(Ordering::Relaxed); @@ -872,7 +870,7 @@ impl StrongRuleNode { // NB: This can run from the root node destructor, so we can't use // `get()`, since it asserts the refcount is bigger than zero. - let me = &*self.ptr; + let me = &*self.ptr(); debug_assert!(me.is_root(), "Can't call GC on a non-root node!"); @@ -1237,19 +1235,17 @@ impl Clone for StrongRuleNode { debug!("{:?}: {:?}+", self.ptr(), self.get().refcount.load(Ordering::Relaxed)); debug_assert!(self.get().refcount.load(Ordering::Relaxed) > 0); self.get().refcount.fetch_add(1, Ordering::Relaxed); - StrongRuleNode { - ptr: self.ptr, - } + StrongRuleNode::from_ptr(self.ptr()) } } impl Drop for StrongRuleNode { fn drop(&mut self) { - let node = unsafe { &*self.ptr }; + let node = unsafe { &*self.ptr() }; debug!("{:?}: {:?}-", self.ptr(), node.refcount.load(Ordering::Relaxed)); debug!("Dropping node: {:?}, root: {:?}, parent: {:?}", - self.ptr, + self.ptr(), node.root.as_ref().map(|r| r.ptr()), node.parent.as_ref().map(|p| p.ptr())); let should_drop = { @@ -1330,9 +1326,7 @@ impl Drop for StrongRuleNode { impl<'a> From<&'a StrongRuleNode> for WeakRuleNode { fn from(node: &'a StrongRuleNode) -> Self { - WeakRuleNode { - ptr: node.ptr(), - } + WeakRuleNode::from_ptr(node.ptr()) } } @@ -1340,15 +1334,19 @@ impl WeakRuleNode { fn upgrade(&self) -> StrongRuleNode { debug!("Upgrading weak node: {:p}", self.ptr()); - let node = unsafe { &*self.ptr }; + let node = unsafe { &*self.ptr() }; node.refcount.fetch_add(1, Ordering::Relaxed); - StrongRuleNode { - ptr: self.ptr, + StrongRuleNode::from_ptr(self.ptr()) + } + + fn from_ptr(ptr: *mut RuleNode) -> Self { + WeakRuleNode { + p: NonZeroPtrMut::new(ptr) } } fn ptr(&self) -> *mut RuleNode { - self.ptr + self.p.ptr() } } diff --git a/tests/unit/script/size_of.rs b/tests/unit/script/size_of.rs index 3368271d8e1..c7c04b4aac1 100644 --- a/tests/unit/script/size_of.rs +++ b/tests/unit/script/size_of.rs @@ -31,10 +31,10 @@ macro_rules! sizeof_checker ( // Update the sizes here sizeof_checker!(size_event_target, EventTarget, 40); sizeof_checker!(size_node, Node, 184); -sizeof_checker!(size_element, Element, 352); -sizeof_checker!(size_htmlelement, HTMLElement, 368); -sizeof_checker!(size_div, HTMLDivElement, 368); -sizeof_checker!(size_span, HTMLSpanElement, 368); +sizeof_checker!(size_element, Element, 344); +sizeof_checker!(size_htmlelement, HTMLElement, 360); +sizeof_checker!(size_div, HTMLDivElement, 360); +sizeof_checker!(size_span, HTMLSpanElement, 360); sizeof_checker!(size_text, Text, 216); sizeof_checker!(size_characterdata, CharacterData, 216); sizeof_checker!(size_servothreadsafelayoutnode, ServoThreadSafeLayoutNode, 16); diff --git a/tests/unit/stylo/size_of.rs b/tests/unit/stylo/size_of.rs index 3344a65c075..31b6d70b32f 100644 --- a/tests/unit/stylo/size_of.rs +++ b/tests/unit/stylo/size_of.rs @@ -3,9 +3,13 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use selectors::gecko_like_types as dummies; +use servo_arc::Arc; use std::mem::{size_of, align_of}; use style; +use style::data::{ComputedStyle, ElementData, ElementStyles}; use style::gecko::selector_parser as real; +use style::properties::ComputedValues; +use style::rule_tree::StrongRuleNode; #[test] fn size_of_selectors_dummy_types() { @@ -24,6 +28,12 @@ fn size_of_selectors_dummy_types() { // selectors (with the inline hashes) with as few cache misses as possible. size_of_test!(test_size_of_rule, style::stylist::Rule, 40); +size_of_test!(test_size_of_option_arc_cv, Option>, 8); +size_of_test!(test_size_of_option_rule_node, Option, 8); +size_of_test!(test_size_of_computed_style, ComputedStyle, 32); +size_of_test!(test_size_of_element_styles, ElementStyles, 48); +size_of_test!(test_size_of_element_data, ElementData, 56); + size_of_test!(test_size_of_property_declaration, style::properties::PropertyDeclaration, 32); // This is huge, but we allocate it on the stack and then never move it,