Revert #18668 - Add mprotect diagnostics for HashMap crash

This commit is contained in:
Bobby Holley 2017-10-03 10:04:43 -07:00
parent e07c6f38a5
commit 15b866d8de
17 changed files with 9 additions and 448 deletions

View file

@ -1027,12 +1027,6 @@ impl<K, V, S> HashMap<K, V, S>
self.table.size()
}
/// Access to the raw buffer backing this hashmap.
pub fn raw_buffer(&self) -> (*const (), usize) {
assert!(self.raw_capacity() != 0);
self.table.raw_buffer()
}
/// Returns true if the map contains no elements.
///
/// # Examples

View file

@ -13,7 +13,6 @@ extern crate heapsize;
pub mod alloc;
pub mod hash_map;
pub mod hash_set;
pub mod protected;
mod shim;
mod table;
@ -52,6 +51,3 @@ impl fmt::Display for FailedAllocationError {
self.reason.fmt(f)
}
}
// The size of memory pages on this system. Set when initializing geckolib.
pub static SYSTEM_PAGE_SIZE: ::std::sync::atomic::AtomicUsize = ::std::sync::atomic::ATOMIC_USIZE_INIT;

View file

@ -1,235 +0,0 @@
use hash_map::{Entry, HashMap, Iter, IterMut, Keys, RandomState, Values};
use std::borrow::Borrow;
use std::hash::{BuildHasher, Hash};
use FailedAllocationError;
#[derive(Clone, Debug)]
pub struct ProtectedHashMap<K, V, S = RandomState>
where K: Eq + Hash,
S: BuildHasher
{
map: HashMap<K, V, S>,
readonly: bool,
}
impl<K: Hash + Eq, V, S: BuildHasher> ProtectedHashMap<K, V, S>
where K: Eq + Hash,
S: BuildHasher
{
#[inline(always)]
pub fn inner(&self) -> &HashMap<K, V, S> {
&self.map
}
#[inline(always)]
pub fn begin_mutation(&mut self) {
assert!(self.readonly);
self.unprotect();
self.readonly = false;
}
#[inline(always)]
pub fn end_mutation(&mut self) {
assert!(!self.readonly);
self.protect();
self.readonly = true;
}
#[inline(always)]
pub fn with_hasher(hash_builder: S) -> Self {
Self {
map: HashMap::<K, V, S>::with_hasher(hash_builder),
readonly: true,
}
}
#[inline(always)]
pub fn len(&self) -> usize {
self.map.len()
}
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.map.is_empty()
}
#[inline(always)]
pub fn contains_key<Q: ?Sized>(&self, k: &Q) -> bool
where K: Borrow<Q>,
Q: Hash + Eq
{
self.map.contains_key(k)
}
#[inline(always)]
pub fn keys(&self) -> Keys<K, V> {
self.map.keys()
}
#[inline(always)]
pub fn values(&self) -> Values<K, V> {
self.map.values()
}
#[inline(always)]
pub fn get<Q: ?Sized>(&self, k: &Q) -> Option<&V>
where K: Borrow<Q>,
Q: Hash + Eq
{
self.map.get(k)
}
#[inline(always)]
pub fn iter(&self) -> Iter<K, V> {
self.map.iter()
}
#[inline(always)]
pub fn iter_mut(&mut self) -> IterMut<K, V> {
assert!(!self.readonly);
self.map.iter_mut()
}
#[inline(always)]
pub fn entry(&mut self, key: K) -> Entry<K, V> {
assert!(!self.readonly);
self.map.entry(key)
}
#[inline(always)]
pub fn try_entry(&mut self, key: K) -> Result<Entry<K, V>, FailedAllocationError> {
assert!(!self.readonly);
self.map.try_entry(key)
}
#[inline(always)]
pub fn insert(&mut self, k: K, v: V) -> Option<V> {
assert!(!self.readonly);
self.map.insert(k, v)
}
#[inline(always)]
pub fn try_insert(&mut self, k: K, v: V) -> Result<Option<V>, FailedAllocationError> {
assert!(!self.readonly);
self.map.try_insert(k, v)
}
#[inline(always)]
pub fn remove<Q: ?Sized>(&mut self, k: &Q) -> Option<V>
where K: Borrow<Q>,
Q: Hash + Eq
{
assert!(!self.readonly);
self.map.remove(k)
}
#[inline(always)]
pub fn clear(&mut self) where K: 'static, V: 'static {
// We handle scoped mutations for the caller here, since callsites that
// invoke clear() don't benefit from the coalescing we do around insertion.
self.begin_mutation();
self.map.clear();
self.end_mutation();
}
fn protect(&mut self) {
if self.map.capacity() == 0 {
return;
}
let buff = self.map.raw_buffer();
if buff.0 as usize % ::SYSTEM_PAGE_SIZE.load(::std::sync::atomic::Ordering::Relaxed) != 0 {
// Safely handle weird allocators like ASAN that return
// non-page-aligned buffers to page-sized allocations.
return;
}
unsafe {
Gecko_ProtectBuffer(buff.0 as *mut _, buff.1);
}
}
fn unprotect(&mut self) {
if self.map.capacity() == 0 {
return;
}
let buff = self.map.raw_buffer();
if buff.0 as usize % ::SYSTEM_PAGE_SIZE.load(::std::sync::atomic::Ordering::Relaxed) != 0 {
// Safely handle weird allocators like ASAN that return
// non-page-aligned buffers to page-sized allocations.
return;
}
unsafe {
Gecko_UnprotectBuffer(buff.0 as *mut _, buff.1);
}
}
}
impl<K, V> ProtectedHashMap<K, V, RandomState>
where K: Eq + Hash,
{
pub fn new() -> Self {
Self {
map: HashMap::new(),
readonly: true,
}
}
pub fn with_capacity(capacity: usize) -> Self {
let mut result = Self {
map: HashMap::with_capacity(capacity),
readonly: true,
};
result.protect();
result
}
}
impl<K, V, S> PartialEq for ProtectedHashMap<K, V, S>
where K: Eq + Hash,
V: PartialEq,
S: BuildHasher
{
fn eq(&self, other: &Self) -> bool {
self.map.eq(&other.map)
}
}
impl<K, V, S> Eq for ProtectedHashMap<K, V, S>
where K: Eq + Hash,
V: Eq,
S: BuildHasher
{
}
impl<K, V, S> Default for ProtectedHashMap<K, V, S>
where K: Eq + Hash,
S: BuildHasher + Default
{
fn default() -> Self {
Self {
map: HashMap::default(),
readonly: true,
}
}
}
impl<K: Hash + Eq, V, S: BuildHasher> Drop for ProtectedHashMap<K, V, S>
where K: Eq + Hash,
S: BuildHasher
{
fn drop(&mut self) {
debug_assert!(self.readonly, "Dropped while mutating");
self.unprotect();
}
}
// Manually declare the FFI functions since we don't depend on the crate with
// the bindings.
extern "C" {
pub fn Gecko_ProtectBuffer(buffer: *mut ::std::os::raw::c_void,
size: usize);
}
extern "C" {
pub fn Gecko_UnprotectBuffer(buffer: *mut ::std::os::raw::c_void,
size: usize);
}

View file

@ -777,7 +777,7 @@ impl<K, V> RawTable<K, V> {
// FORK NOTE: Uses alloc shim instead of Heap.alloc
let buffer = alloc(round_up_to_page_size(size), alignment);
let buffer = alloc(size, alignment);
if buffer.is_null() {
@ -813,24 +813,6 @@ impl<K, V> RawTable<K, V> {
}
}
/// Access to the raw buffer backing this table.
pub fn raw_buffer(&self) -> (*const (), usize) {
debug_assert!(self.capacity() != 0);
let buffer = self.hashes.ptr() as *const ();
let size = {
let hashes_size = self.capacity() * size_of::<HashUint>();
let pairs_size = self.capacity() * size_of::<(K, V)>();
let (_, _, size, _) = calculate_allocation(hashes_size,
align_of::<HashUint>(),
pairs_size,
align_of::<(K, V)>());
round_up_to_page_size(size)
};
(buffer, size)
}
/// Creates a new raw table from a given capacity. All buckets are
/// initially empty.
pub fn new(capacity: usize) -> Result<RawTable<K, V>, FailedAllocationError> {
@ -1219,19 +1201,3 @@ impl<K, V> Drop for RawTable<K, V> {
}
}
}
// Force all allocations to fill their pages for the duration of the mprotect
// experiment.
#[inline]
fn round_up_to_page_size(size: usize) -> usize {
let page_size = ::SYSTEM_PAGE_SIZE.load(::std::sync::atomic::Ordering::Relaxed);
debug_assert!(page_size != 0);
let mut result = size;
let remainder = size % page_size;
if remainder != 0 {
result += page_size - remainder;
}
debug_assert!(result % page_size == 0);
debug_assert!(result - size < page_size);
result
}