diff --git a/components/style/atomic_refcell.rs b/components/style/atomic_refcell.rs index ee6771bc527..fa6c76bfde6 100644 --- a/components/style/atomic_refcell.rs +++ b/components/style/atomic_refcell.rs @@ -2,121 +2,345 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -#![allow(unsafe_code)] +//! Implements a container type providing RefCell-like semantics for objects +//! shared across threads. +//! +//! RwLock is traditionally considered to be the |Sync| analogue of RefCell. +//! However, for consumers that can guarantee that they will never mutably +//! borrow the contents concurrently with immutable borrows, an RwLock is +//! overkill, and has key disadvantages: +//! * Performance: Even the fastest existing implementation of RwLock (that of +//! parking_lot) performs at least two atomic operations during immutable +//! borrows. This makes mutable borrows significantly cheaper than immutable +//! borrows, leading to weird incentives when writing performance-critical +//! code. +//! * Features: Implementing AtomicRefCell on top of RwLock makes it impossible +//! to implement useful things like AtomicRef{,Mut}::map. +//! +//! As such, we re-implement RefCell semantics from scratch with a single atomic +//! reference count. The primary complication of this scheme relates to keeping +//! things in a consistent state when one thread performs an illegal borrow and +//! panics. Since an AtomicRefCell can be accessed by multiple threads, and since +//! panics are recoverable, we need to ensure that an illegal (panicking) access by +//! one thread does not lead to undefined behavior on other, still-running threads. +//! +//! So we represent things as follows: +//! * Any value with the high bit set (so half the total refcount space) indicates +//! a mutable borrow. +//! * Mutable borrows perform an atomic compare-and-swap, swapping in the high bit +//! if the current value is zero. If the current value is non-zero, the thread +//! panics and the value is left undisturbed. +//! * Immutable borrows perform an atomic increment. If the new value has the high +//! bit set, the thread panics. The incremented refcount is left as-is, since it +//! still represents a valid mutable borrow. When the mutable borrow is released, +//! the refcount is set unconditionally to zero, clearing any stray increments by +//! panicked threads. +//! +//! There are a few additional purely-academic complications to handle overflow, +//! which are documented in the implementation. +//! +//! The rest of this module is mostly derived by copy-pasting the implementation of +//! RefCell and fixing things up as appropriate. Certain non-threadsafe methods +//! have been removed. We segment the concurrency logic from the rest of the code to +//! keep the tricky parts small and easy to audit. -use owning_ref::{OwningRef, StableAddress}; -use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; +#![allow(unsafe_code)] +#![deny(missing_docs)] + +use std::cell::UnsafeCell; +use std::cmp; use std::fmt; use std::fmt::Debug; use std::ops::{Deref, DerefMut}; +use std::sync::atomic; +use std::sync::atomic::AtomicUsize; -/// Container type providing RefCell-like semantics for objects shared across -/// threads. -/// -/// RwLock is traditionally considered to be the |Sync| analogue of RefCell. -/// However, for consumers that can guarantee that they will never mutably -/// borrow the contents concurrently with immutable borrows, an RwLock feels -/// like overkill. -/// -/// The RwLock in the standard library is indeed heavyweight, since it heap- -/// allocates an OS-specific primitive and delegates operations to it. However, -/// parking_lot provides a pure-rust implementation of the standard -/// synchronization primitives, with uncontended borrows compiling down to a -/// single atomic operation. This is exactly how we would build an atomic -/// RefCell, so we newtype it with some API sugar. -pub struct AtomicRefCell(RwLock); - -pub struct AtomicRef<'a, T: 'a>(RwLockReadGuard<'a, T>); -unsafe impl<'a, T> StableAddress for AtomicRef<'a, T> {} - -impl<'a, T> Deref for AtomicRef<'a, T> { - type Target = T; - fn deref(&self) -> &T { - self.0.deref() - } -} - -pub struct AtomicRefMut<'a, T: 'a>(RwLockWriteGuard<'a, T>); -unsafe impl<'a, T> StableAddress for AtomicRefMut<'a, T> {} - -impl<'a, T> Deref for AtomicRefMut<'a, T> { - type Target = T; - fn deref(&self) -> &T { - self.0.deref() - } -} - -impl<'a, T> DerefMut for AtomicRefMut<'a, T> { - fn deref_mut(&mut self) -> &mut T { - self.0.deref_mut() - } -} - -impl<'a, T: 'a + Debug> Debug for AtomicRef<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self.0.deref()) - } -} - -impl<'a, T: 'a + Debug> Debug for AtomicRefMut<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self.0.deref()) - } +/// A threadsafe analogue to RefCell. +pub struct AtomicRefCell { + borrow: AtomicUsize, + value: UnsafeCell, } impl AtomicRefCell { - pub fn new(value: T) -> Self { - AtomicRefCell(RwLock::new(value)) + /// Creates a new `AtomicRefCell` containing `value`. + #[inline] + pub fn new(value: T) -> AtomicRefCell { + AtomicRefCell { + borrow: AtomicUsize::new(0), + value: UnsafeCell::new(value), + } } + + /// Consumes the `AtomicRefCell`, returning the wrapped value. + #[inline] + pub fn into_inner(self) -> T { + debug_assert!(self.borrow.load(atomic::Ordering::Acquire) == 0); + unsafe { self.value.into_inner() } + } +} + +impl AtomicRefCell { + /// Immutably borrows the wrapped value. + #[inline] pub fn borrow(&self) -> AtomicRef { - AtomicRef(self.0.try_read().expect("already mutably borrowed")) + AtomicRef { + value: unsafe { &*self.value.get() }, + borrow: AtomicBorrowRef::new(&self.borrow), + } } + + /// Mutably borrows the wrapped value. + #[inline] pub fn borrow_mut(&self) -> AtomicRefMut { - AtomicRefMut(self.0.try_write().expect("already borrowed")) + AtomicRefMut { + value: unsafe { &mut *self.value.get() }, + borrow: AtomicBorrowRefMut::new(&self.borrow), + } + } + + /// Returns a raw pointer to the underlying data in this cell. + /// + /// External synchronization is needed to avoid data races when dereferencing + /// the pointer. + #[inline] + pub fn as_ptr(&self) -> *mut T { + self.value.get() + } +} + +// +// Core synchronization logic. Keep this section small and easy to audit. +// + +const HIGH_BIT: usize = !(::std::usize::MAX >> 1); +const MAX_FAILED_BORROWS: usize = HIGH_BIT + (HIGH_BIT >> 1); + +struct AtomicBorrowRef<'b> { + borrow: &'b AtomicUsize, +} + +impl<'b> AtomicBorrowRef<'b> { + #[inline] + fn new(borrow: &'b AtomicUsize) -> Self { + let new = borrow.fetch_add(1, atomic::Ordering::Acquire) + 1; + + // If the new count has the high bit set, panic. The specifics of how + // we panic is interesting for soundness, but irrelevant for real programs. + // + // FIXME(bholley): Counter-intuitively, moving the panic logic below out-of- + // line costs about 2ns on the benchmark. My best guess is that we end up + // predicting the wrong branch, but there's no way to mark something as + // unlikely on stable rust. :-( + if new & HIGH_BIT != 0 { + if new == HIGH_BIT { + // We overflowed into the reserved upper half of the refcount + // space. Before panicking, decrement the refcount to leave things + // in a consistent immutable-borrow state. + // + // This can basically only happen if somebody forget()s AtomicRefs + // in a tight loop. + borrow.fetch_sub(1, atomic::Ordering::Release); + panic!("too many immutable borrows"); + } else if new >= MAX_FAILED_BORROWS { + // During the mutable borrow, an absurd number of threads have + // incremented the refcount and panicked. To avoid hypothetically + // wrapping the refcount, we abort the process once a certain + // threshold is reached. + // + // This requires billions of threads to have panicked already, and + // so will never happen in a real program. + println!("Too many failed borrows"); + ::std::process::exit(1); + } else { + // This is the normal case, and the only one which should happen + // in a real program. + panic!("already mutably borrowed"); + } + } + + AtomicBorrowRef { borrow: borrow } + } +} + +impl<'b> Drop for AtomicBorrowRef<'b> { + #[inline] + fn drop(&mut self) { + let old = self.borrow.fetch_sub(1, atomic::Ordering::Release); + // This assertion is technically incorrect in the case where another + // thread hits the hypothetical overflow case, since we might observe + // the refcount before it fixes it up (and panics). But that never will + // never happen in a real program, and this is a debug_assert! anyway. + debug_assert!(old & HIGH_BIT == 0); + } +} + +struct AtomicBorrowRefMut<'b> { + borrow: &'b AtomicUsize, +} + +impl<'b> Drop for AtomicBorrowRefMut<'b> { + #[inline] + fn drop(&mut self) { + self.borrow.store(0, atomic::Ordering::Release); + } +} + +impl<'b> AtomicBorrowRefMut<'b> { + #[inline] + fn new(borrow: &'b AtomicUsize) -> AtomicBorrowRefMut<'b> { + // Use compare-and-swap to avoid corrupting the immutable borrow count + // on illegal mutable borrows. + let old = match borrow.compare_exchange(0, HIGH_BIT, atomic::Ordering::Acquire, atomic::Ordering::Relaxed) { + Ok(x) => x, + Err(x) => x, + }; + assert!(old == 0, "already {} borrowed", if old & HIGH_BIT == 0 { "immutably" } else { "mutably" }); + AtomicBorrowRefMut { + borrow: borrow + } + } +} + +unsafe impl Send for AtomicRefCell {} +unsafe impl Sync for AtomicRefCell {} + +// +// End of core synchronization logic. No tricky thread stuff allowed below +// this point. +// + +impl Clone for AtomicRefCell { + #[inline] + fn clone(&self) -> AtomicRefCell { + AtomicRefCell::new(self.borrow().clone()) } } impl Default for AtomicRefCell { - fn default() -> Self { - Self::new(T::default()) + #[inline] + fn default() -> AtomicRefCell { + AtomicRefCell::new(Default::default()) } } -/* - * Implement Ref{,Mut}::map()-like semantics for AtomicRef{,Mut}. We can't quite - * use AtomicRef{,Mut} as the mapped type, but we can use some trait tricks to - * allow us to pass MappedAtomicRef{,Mut} back into AtomicRef{,Mut}::map(). - * - * Note: We cannot implement an AtomicRefMut::map() method until we have mutable - * OwningRef. See https://github.com/Kimundi/owning-ref-rs/pull/16 - */ -pub type MappedAtomicRef<'a, T: 'a, U: 'a> = OwningRef, U>; - -pub trait Map<'a, Base, Curr> { - fn map(self, f: F) -> MappedAtomicRef<'a, Base, New> - where F: FnOnce(&Curr) -> &New; +impl PartialEq for AtomicRefCell { + #[inline] + fn eq(&self, other: &AtomicRefCell) -> bool { + *self.borrow() == *other.borrow() + } } -impl<'a, Base> Map<'a, Base, Base> for AtomicRef<'a, Base> { - fn map(self, f: F) -> MappedAtomicRef<'a, Base, New> - where F: FnOnce(&Base) -> &New +impl Eq for AtomicRefCell {} + +impl PartialOrd for AtomicRefCell { + #[inline] + fn partial_cmp(&self, other: &AtomicRefCell) -> Option { + self.borrow().partial_cmp(&*other.borrow()) + } +} + +impl Ord for AtomicRefCell { + #[inline] + fn cmp(&self, other: &AtomicRefCell) -> cmp::Ordering { + self.borrow().cmp(&*other.borrow()) + } +} + +impl From for AtomicRefCell { + fn from(t: T) -> AtomicRefCell { + AtomicRefCell::new(t) + } +} + +impl<'b> Clone for AtomicBorrowRef<'b> { + #[inline] + fn clone(&self) -> AtomicBorrowRef<'b> { + AtomicBorrowRef::new(self.borrow) + } +} + +/// A wrapper type for an immutably borrowed value from an `AtomicRefCell`. +pub struct AtomicRef<'b, T: ?Sized + 'b> { + value: &'b T, + borrow: AtomicBorrowRef<'b>, +} + + +impl<'b, T: ?Sized> Deref for AtomicRef<'b, T> { + type Target = T; + + #[inline] + fn deref(&self) -> &T { + self.value + } +} + +impl<'b, T: ?Sized> AtomicRef<'b, T> { + /// Copies an `AtomicRef`. + #[inline] + pub fn clone(orig: &AtomicRef<'b, T>) -> AtomicRef<'b, T> { + AtomicRef { + value: orig.value, + borrow: orig.borrow.clone(), + } + } + + /// Make a new `AtomicRef` for a component of the borrowed data. + #[inline] + pub fn map(orig: AtomicRef<'b, T>, f: F) -> AtomicRef<'b, U> + where F: FnOnce(&T) -> &U { - OwningRef::new(self).map(f) + AtomicRef { + value: f(orig.value), + borrow: orig.borrow, + } } } -impl<'a, Base, Curr> Map<'a, Base, Curr> for MappedAtomicRef<'a, Base, Curr> { - fn map(self, f: F) -> MappedAtomicRef<'a, Base, New> - where F: FnOnce(&Curr) -> &New +impl<'b, T: ?Sized> AtomicRefMut<'b, T> { + /// Make a new `AtomicRefMut` for a component of the borrowed data, e.g. an enum + /// variant. + #[inline] + pub fn map(orig: AtomicRefMut<'b, T>, f: F) -> AtomicRefMut<'b, U> + where F: FnOnce(&mut T) -> &mut U { - self.map(f) + AtomicRefMut { + value: f(orig.value), + borrow: orig.borrow, + } } } -impl<'a, Base> AtomicRef<'a, Base> { - pub fn map(orig: M, f: F) -> MappedAtomicRef<'a, Base, New> - where F: FnOnce(&Curr) -> &New, M: Map<'a, Base, Curr> - { - orig.map(f) +/// A wrapper type for a mutably borrowed value from an `AtomicRefCell`. +pub struct AtomicRefMut<'b, T: ?Sized + 'b> { + value: &'b mut T, + borrow: AtomicBorrowRefMut<'b>, +} + +impl<'b, T: ?Sized> Deref for AtomicRefMut<'b, T> { + type Target = T; + + #[inline] + fn deref(&self) -> &T { + self.value + } +} + +impl<'b, T: ?Sized> DerefMut for AtomicRefMut<'b, T> { + #[inline] + fn deref_mut(&mut self) -> &mut T { + self.value + } +} + +impl<'b, T: ?Sized + Debug + 'b> Debug for AtomicRef<'b, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.value.fmt(f) + } +} + +impl<'b, T: ?Sized + Debug + 'b> Debug for AtomicRefMut<'b, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.value.fmt(f) } } diff --git a/components/style/lib.rs b/components/style/lib.rs index e740d6ad385..04bf213d92f 100644 --- a/components/style/lib.rs +++ b/components/style/lib.rs @@ -89,7 +89,6 @@ extern crate time; extern crate unicode_segmentation; pub mod animation; -#[allow(missing_docs)] // TODO: Under a rewrite. pub mod atomic_refcell; #[allow(missing_docs)] // TODO. pub mod attr; diff --git a/tests/unit/style/atomic_refcell.rs b/tests/unit/style/atomic_refcell.rs index 4e6e36d0f8f..8f131085a42 100644 --- a/tests/unit/style/atomic_refcell.rs +++ b/tests/unit/style/atomic_refcell.rs @@ -2,7 +2,7 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -use style::atomic_refcell::{AtomicRef, AtomicRefCell}; +use style::atomic_refcell::{AtomicRef, AtomicRefCell, AtomicRefMut}; use test::Bencher; struct Foo { @@ -19,6 +19,60 @@ impl Default for Bar { } } +// FIXME(bholley): Add tests to exercise this in concurrent scenarios. + +#[test] +fn immutable() { + let a = AtomicRefCell::new(Bar::default()); + let _first = a.borrow(); + let _second = a.borrow(); +} + +#[test] +fn mutable() { + let a = AtomicRefCell::new(Bar::default()); + let _ = a.borrow_mut(); +} + +#[test] +fn interleaved() { + let a = AtomicRefCell::new(Bar::default()); + { + let _ = a.borrow_mut(); + } + { + let _first = a.borrow(); + let _second = a.borrow(); + } + { + let _ = a.borrow_mut(); + } +} + +#[test] +#[should_panic(expected = "already immutably borrowed")] +fn immutable_then_mutable() { + let a = AtomicRefCell::new(Bar::default()); + let _first = a.borrow(); + let _second = a.borrow_mut(); +} + +#[test] +#[should_panic(expected = "already mutably borrowed")] +fn mutable_then_immutable() { + let a = AtomicRefCell::new(Bar::default()); + let _first = a.borrow_mut(); + let _second = a.borrow(); +} + +#[test] +#[should_panic(expected = "already mutably borrowed")] +fn double_mutable() { + let a = AtomicRefCell::new(Bar::default()); + let _first = a.borrow_mut(); + let _second = a.borrow_mut(); +} + #[test] fn map() { let a = AtomicRefCell::new(Bar::default()); @@ -57,19 +111,17 @@ fn mutable_borrow(b: &mut Bencher) { b.iter(|| a.borrow_mut()); } -/* FIXME(bholley): Enable once we have AtomicRefMut::map(), which is blocked on - * https://github.com/Kimundi/owning-ref-rs/pull/16 #[test] fn map_mut() { - let a = AtomicRefCell::new(Bar { f: Foo { u: 42 } }); + let a = AtomicRefCell::new(Bar::default()); let mut b = a.borrow_mut(); assert_eq!(b.f.u, 42); b.f.u = 43; - let mut c = AtomicRefMut::map(b, |x| &x.f); + let mut c = AtomicRefMut::map(b, |x| &mut x.f); assert_eq!(c.u, 43); c.u = 44; - let mut d = AtomicRefMut::map(c, |x| &x.u); + let mut d = AtomicRefMut::map(c, |x| &mut x.u); assert_eq!(*d, 44); - *d. = 45; + *d = 45; assert_eq!(*d, 45); -}*/ +}