Revert "Add canary and journaling."

This reverts commit 58322d0021.
This commit is contained in:
Manish Goregaokar 2017-10-23 13:46:05 -07:00
parent f7ad19f500
commit d6bafde971
3 changed files with 10 additions and 90 deletions

View file

@ -1,30 +1,15 @@
use hash_map::HashMap; use hash_map::HashMap;
use std::borrow::Borrow; use std::borrow::Borrow;
use std::hash::{BuildHasher, Hash}; use std::hash::{BuildHasher, Hash};
use table::SafeHash;
use FailedAllocationError; use FailedAllocationError;
#[cfg(target_pointer_width = "32")]
const CANARY: usize = 0x42cafe99;
#[cfg(target_pointer_width = "64")]
const CANARY: usize = 0x42cafe9942cafe99;
#[derive(Clone, Debug)]
enum JournalEntry {
Insert(SafeHash),
GetOrInsertWith(SafeHash),
Remove(SafeHash),
DidClear(usize),
}
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct DiagnosticHashMap<K, V, S> pub struct DiagnosticHashMap<K, V, S>
where K: Eq + Hash, where K: Eq + Hash,
S: BuildHasher S: BuildHasher
{ {
map: HashMap<K, (usize, V), S>, map: HashMap<K, V, S>,
journal: Vec<JournalEntry>,
readonly: bool, readonly: bool,
} }
@ -33,7 +18,7 @@ impl<K: Hash + Eq, V, S: BuildHasher> DiagnosticHashMap<K, V, S>
S: BuildHasher S: BuildHasher
{ {
#[inline(always)] #[inline(always)]
pub fn inner(&self) -> &HashMap<K, (usize, V), S> { pub fn inner(&self) -> &HashMap<K, V, S> {
&self.map &self.map
} }
@ -47,27 +32,12 @@ impl<K: Hash + Eq, V, S: BuildHasher> DiagnosticHashMap<K, V, S>
pub fn end_mutation(&mut self) { pub fn end_mutation(&mut self) {
assert!(!self.readonly); assert!(!self.readonly);
self.readonly = true; self.readonly = true;
let mut position = 0;
let mut bad_canary: Option<(usize, *const usize)> = None;
for (_,v) in self.map.iter() {
let canary_ref = &v.0;
if *canary_ref == CANARY {
position += 1;
continue;
}
bad_canary = Some((*canary_ref, canary_ref));
}
if let Some(c) = bad_canary {
self.report_corruption(c.0, c.1, position);
}
} }
#[inline(always)] #[inline(always)]
pub fn with_hasher(hash_builder: S) -> Self { pub fn with_hasher(hash_builder: S) -> Self {
Self { Self {
map: HashMap::<K, (usize, V), S>::with_hasher(hash_builder), map: HashMap::<K, V, S>::with_hasher(hash_builder),
journal: Vec::new(),
readonly: true, readonly: true,
} }
} }
@ -95,7 +65,7 @@ impl<K: Hash + Eq, V, S: BuildHasher> DiagnosticHashMap<K, V, S>
where K: Borrow<Q>, where K: Borrow<Q>,
Q: Hash + Eq Q: Hash + Eq
{ {
self.map.get(k).map(|v| &v.1) self.map.get(k)
} }
#[inline(always)] #[inline(always)]
@ -105,17 +75,14 @@ impl<K: Hash + Eq, V, S: BuildHasher> DiagnosticHashMap<K, V, S>
default: F default: F
) -> Result<&mut V, FailedAllocationError> { ) -> Result<&mut V, FailedAllocationError> {
assert!(!self.readonly); assert!(!self.readonly);
self.journal.push(JournalEntry::GetOrInsertWith(self.map.make_hash(&key)));
let entry = self.map.try_entry(key)?; let entry = self.map.try_entry(key)?;
Ok(&mut entry.or_insert_with(|| (CANARY, default())).1) Ok(entry.or_insert_with(default))
} }
#[inline(always)] #[inline(always)]
pub fn try_insert(&mut self, k: K, v: V) -> Result<Option<V>, FailedAllocationError> { pub fn try_insert(&mut self, k: K, v: V) -> Result<Option<V>, FailedAllocationError> {
assert!(!self.readonly); assert!(!self.readonly);
self.journal.push(JournalEntry::Insert(self.map.make_hash(&k))); self.map.try_insert(k, v)
let old = self.map.try_insert(k, (CANARY, v))?;
Ok(old.map(|x| x.1))
} }
#[inline(always)] #[inline(always)]
@ -124,8 +91,7 @@ impl<K: Hash + Eq, V, S: BuildHasher> DiagnosticHashMap<K, V, S>
Q: Hash + Eq Q: Hash + Eq
{ {
assert!(!self.readonly); assert!(!self.readonly);
self.journal.push(JournalEntry::Remove(self.map.make_hash(k))); self.map.remove(k)
self.map.remove(k).map(|x| x.1)
} }
#[inline(always)] #[inline(always)]
@ -133,36 +99,9 @@ impl<K: Hash + Eq, V, S: BuildHasher> DiagnosticHashMap<K, V, S>
// We handle scoped mutations for the caller here, since callsites that // We handle scoped mutations for the caller here, since callsites that
// invoke clear() don't benefit from the coalescing we do around insertion. // invoke clear() don't benefit from the coalescing we do around insertion.
self.begin_mutation(); self.begin_mutation();
self.journal.clear();
self.journal.push(JournalEntry::DidClear(self.map.raw_capacity()));
self.map.clear(); self.map.clear();
self.end_mutation(); self.end_mutation();
} }
#[inline(never)]
fn report_corruption(
&mut self,
canary: usize,
canary_addr: *const usize,
position: usize
) {
unsafe {
Gecko_AddBufferToCrashReport(
self.journal.as_ptr() as *const _,
self.journal.len() * ::std::mem::size_of::<JournalEntry>(),
);
}
panic!(
"HashMap Corruption (sz={}, cap={}, pairsz={}, cnry={:#x}, pos={}, base_addr={:?}, cnry_addr={:?})",
self.map.len(),
self.map.raw_capacity(),
::std::mem::size_of::<(K, (usize, V))>(),
canary,
position,
self.map.raw_buffer(),
canary_addr,
);
}
} }
impl<K, V, S> PartialEq for DiagnosticHashMap<K, V, S> impl<K, V, S> PartialEq for DiagnosticHashMap<K, V, S>
@ -189,7 +128,6 @@ impl<K, V, S> Default for DiagnosticHashMap<K, V, S>
fn default() -> Self { fn default() -> Self {
Self { Self {
map: HashMap::default(), map: HashMap::default(),
journal: Vec::new(),
readonly: true, readonly: true,
} }
} }
@ -203,8 +141,3 @@ impl<K: Hash + Eq, V, S: BuildHasher> Drop for DiagnosticHashMap<K, V, S>
debug_assert!(self.readonly, "Dropped while mutating"); debug_assert!(self.readonly, "Dropped while mutating");
} }
} }
extern "C" {
pub fn Gecko_AddBufferToCrashReport(addr: *const ::std::os::raw::c_void,
bytes: usize);
}

View file

@ -534,7 +534,7 @@ impl<K, V, S> HashMap<K, V, S>
where K: Eq + Hash, where K: Eq + Hash,
S: BuildHasher S: BuildHasher
{ {
pub fn make_hash<X: ?Sized>(&self, x: &X) -> SafeHash fn make_hash<X: ?Sized>(&self, x: &X) -> SafeHash
where X: Hash where X: Hash
{ {
table::make_hash(&self.hash_builder, x) table::make_hash(&self.hash_builder, x)
@ -683,17 +683,10 @@ impl<K, V, S> HashMap<K, V, S>
/// Returns the hash map's raw capacity. /// Returns the hash map's raw capacity.
#[inline] #[inline]
pub fn raw_capacity(&self) -> usize { fn raw_capacity(&self) -> usize {
self.table.capacity() self.table.capacity()
} }
/// Returns a raw pointer to the table's buffer.
#[inline]
pub fn raw_buffer(&self) -> *const u8 {
assert!(self.len() != 0);
self.table.raw_buffer()
}
/// Reserves capacity for at least `additional` more elements to be inserted /// Reserves capacity for at least `additional` more elements to be inserted
/// in the `HashMap`. The collection may reserve more space to avoid /// in the `HashMap`. The collection may reserve more space to avoid
/// frequent reallocations. /// frequent reallocations.

View file

@ -182,7 +182,7 @@ pub struct GapThenFull<K, V, M> {
/// A hash that is not zero, since we use a hash of zero to represent empty /// A hash that is not zero, since we use a hash of zero to represent empty
/// buckets. /// buckets.
#[derive(PartialEq, Copy, Clone, Debug)] #[derive(PartialEq, Copy, Clone)]
pub struct SafeHash { pub struct SafeHash {
hash: HashUint, hash: HashUint,
} }
@ -816,12 +816,6 @@ impl<K, V> RawTable<K, V> {
} }
} }
/// Returns a raw pointer to the table's buffer.
#[inline]
pub fn raw_buffer(&self) -> *const u8 {
self.hashes.ptr() as *const u8
}
/// Creates a new raw table from a given capacity. All buckets are /// Creates a new raw table from a given capacity. All buckets are
/// initially empty. /// initially empty.
pub fn new(capacity: usize) -> Result<RawTable<K, V>, FailedAllocationError> { pub fn new(capacity: usize) -> Result<RawTable<K, V>, FailedAllocationError> {