Revert "Stuff HashMap journal into a string and verify invariants in a few more places."

This reverts commit 4b64390b81.
This commit is contained in:
Manish Goregaokar 2017-10-23 13:45:48 -07:00
parent f98f83ceb3
commit fde96d0ff5
3 changed files with 27 additions and 65 deletions

View file

@ -1,6 +1,7 @@
use hash_map::HashMap;
use std::borrow::Borrow;
use std::hash::{BuildHasher, Hash};
use table::SafeHash;
use FailedAllocationError;
@ -11,9 +12,9 @@ const CANARY: usize = 0x42cafe9942cafe99;
#[derive(Clone, Debug)]
enum JournalEntry {
Insert(usize),
GOIW(usize),
Remove(usize),
Insert(SafeHash),
GetOrInsertWith(SafeHash),
Remove(SafeHash),
DidClear(usize),
}
@ -36,23 +37,17 @@ impl<K: Hash + Eq, V, S: BuildHasher> DiagnosticHashMap<K, V, S>
&self.map
}
#[inline(never)]
#[inline(always)]
pub fn begin_mutation(&mut self) {
self.map.verify();
assert!(self.readonly);
self.readonly = false;
self.verify();
}
#[inline(never)]
#[inline(always)]
pub fn end_mutation(&mut self) {
self.map.verify();
assert!(!self.readonly);
self.readonly = true;
self.verify();
}
fn verify(&self) {
let mut position = 0;
let mut bad_canary: Option<(usize, *const usize)> = None;
for (_,v) in self.map.iter() {
@ -110,7 +105,7 @@ impl<K: Hash + Eq, V, S: BuildHasher> DiagnosticHashMap<K, V, S>
default: F
) -> Result<&mut V, FailedAllocationError> {
assert!(!self.readonly);
self.journal.push(JournalEntry::GOIW(self.map.make_hash(&key).inspect()));
self.journal.push(JournalEntry::GetOrInsertWith(self.map.make_hash(&key)));
let entry = self.map.try_entry(key)?;
Ok(&mut entry.or_insert_with(|| (CANARY, default())).1)
}
@ -118,7 +113,7 @@ impl<K: Hash + Eq, V, S: BuildHasher> DiagnosticHashMap<K, V, S>
#[inline(always)]
pub fn try_insert(&mut self, k: K, v: V) -> Result<Option<V>, FailedAllocationError> {
assert!(!self.readonly);
self.journal.push(JournalEntry::Insert(self.map.make_hash(&k).inspect()));
self.journal.push(JournalEntry::Insert(self.map.make_hash(&k)));
let old = self.map.try_insert(k, (CANARY, v))?;
Ok(old.map(|x| x.1))
}
@ -129,7 +124,7 @@ impl<K: Hash + Eq, V, S: BuildHasher> DiagnosticHashMap<K, V, S>
Q: Hash + Eq
{
assert!(!self.readonly);
self.journal.push(JournalEntry::Remove(self.map.make_hash(k).inspect()));
self.journal.push(JournalEntry::Remove(self.map.make_hash(k)));
self.map.remove(k).map(|x| x.1)
}
@ -146,22 +141,19 @@ impl<K: Hash + Eq, V, S: BuildHasher> DiagnosticHashMap<K, V, S>
#[inline(never)]
fn report_corruption(
&self,
&mut self,
canary: usize,
canary_addr: *const usize,
position: usize
) {
use ::std::ffi::CString;
let key = b"HashMapJournal\0";
let value = CString::new(format!("{:?}", self.journal)).unwrap();
unsafe {
Gecko_AnnotateCrashReport(
key.as_ptr() as *const ::std::os::raw::c_char,
value.as_ptr(),
Gecko_AddBufferToCrashReport(
self.journal.as_ptr() as *const _,
self.journal.len() * ::std::mem::size_of::<JournalEntry>(),
);
}
panic!(
"HashMap Corruption (sz={}, cap={}, pairsz={}, cnry={:#x}, pos={}, base_addr={:?}, cnry_addr={:?}, jrnl_len={})",
"HashMap Corruption (sz={}, cap={}, pairsz={}, cnry={:#x}, pos={}, base_addr={:?}, cnry_addr={:?})",
self.map.len(),
self.map.raw_capacity(),
::std::mem::size_of::<(K, (usize, V))>(),
@ -169,7 +161,6 @@ impl<K: Hash + Eq, V, S: BuildHasher> DiagnosticHashMap<K, V, S>
position,
self.map.raw_buffer(),
canary_addr,
self.journal.len(),
);
}
}
@ -214,6 +205,6 @@ impl<K: Hash + Eq, V, S: BuildHasher> Drop for DiagnosticHashMap<K, V, S>
}
extern "C" {
pub fn Gecko_AnnotateCrashReport(key_str: *const ::std::os::raw::c_char,
value_str: *const ::std::os::raw::c_char);
pub fn Gecko_AddBufferToCrashReport(addr: *const ::std::os::raw::c_void,
bytes: usize);
}

View file

@ -694,12 +694,6 @@ impl<K, V, S> HashMap<K, V, S>
self.table.raw_buffer()
}
/// Verify that the table metadata is internally consistent.
#[inline]
pub fn verify(&self) {
self.table.verify();
}
/// Reserves capacity for at least `additional` more elements to be inserted
/// in the `HashMap`. The collection may reserve more space to avoid
/// frequent reallocations.

View file

@ -247,21 +247,13 @@ impl<K, V> RawBucket<K, V> {
(self.hash(), self.pair())
}
fn assert_bounds(&self, bytes_allocated: usize, size: Option<usize>) {
fn assert_bounds(&self, bytes_allocated: usize) {
let base = self.hash_start as *mut u8;
let (h, p) = unsafe { self.hash_pair() };
assert!((h as *mut u8) < (p as *mut u8), "HashMap Corruption - hash offset not below pair offset");
let end = unsafe { p.offset(1) } as *mut u8;
assert!(end > base, "HashMap Corruption - end={:?}, base={:?}, idx={}, alloc={}, size={:?}", end, base, self.idx, bytes_allocated, size);
assert!(
end <= unsafe { base.offset(bytes_allocated as isize) },
"HashMap Corruption - end={:?}, base={:?}, idx={}, alloc={}, size={:?}",
end,
base,
self.idx,
bytes_allocated,
size,
);
assert!(end > base, "HashMap Corruption - end={:?}, base={:?}", end, base);
assert!(end <= unsafe { base.offset(bytes_allocated as isize) }, "HashMap Corruption - end={:?}, base={:?}", end, base);
}
}
@ -439,13 +431,13 @@ impl<K, V, M: Deref<Target = RawTable<K, V>>> Bucket<K, V, M> {
/// Modifies the bucket in place to make it point to the next slot.
pub fn next(&mut self) {
self.raw.idx = self.raw.idx.wrapping_add(1) & self.table.capacity_mask;
self.raw.assert_bounds(self.table.bytes_allocated, None);
self.raw.assert_bounds(self.table.bytes_allocated);
}
/// Modifies the bucket in place to make it point to the previous slot.
pub fn prev(&mut self) {
self.raw.idx = self.raw.idx.wrapping_sub(1) & self.table.capacity_mask;
self.raw.assert_bounds(self.table.bytes_allocated, None);
self.raw.assert_bounds(self.table.bytes_allocated);
}
}
@ -821,7 +813,6 @@ impl<K, V> RawTable<K, V> {
}
fn raw_bucket_at(&self, index: usize) -> RawBucket<K, V> {
self.verify();
let hashes_size = self.capacity() * size_of::<HashUint>();
let pairs_size = self.capacity() * size_of::<(K, V)>();
@ -842,7 +833,7 @@ impl<K, V> RawTable<K, V> {
}
};
bucket.assert_bounds(self.bytes_allocated, Some(self.size));
bucket.assert_bounds(self.bytes_allocated);
bucket
}
@ -852,20 +843,6 @@ impl<K, V> RawTable<K, V> {
self.hashes.ptr() as *const u8
}
/// Verify that the table metadata is internally consistent.
#[inline]
pub fn verify(&self) {
assert!(
self.capacity() == 0 || self.capacity().is_power_of_two(),
"HashMap Corruption: mask={}, sz={}, alloc={}", self.capacity_mask, self.size, self.bytes_allocated,
);
assert_eq!(
self.capacity() * (size_of::<usize>() + size_of::<(K, V)>()),
self.bytes_allocated,
"HashMap Corruption: mask={}, sz={}, alloc={}", self.capacity_mask, self.size, self.bytes_allocated,
);
}
/// Creates a new raw table from a given capacity. All buckets are
/// initially empty.
pub fn new(capacity: usize) -> Result<RawTable<K, V>, FailedAllocationError> {
@ -956,7 +933,7 @@ impl<K, V> RawTable<K, V> {
}
}
raw.idx = raw.idx.checked_sub(1).unwrap();
raw.assert_bounds(self.bytes_allocated, Some(self.size));
raw.assert_bounds(self.bytes_allocated);
}
}
@ -1016,12 +993,12 @@ impl<'a, K, V> Iterator for RawBuckets<'a, K, V> {
self.elems_left = self.elems_left.checked_sub(1).unwrap();
if self.elems_left != 0 {
self.raw.as_mut().unwrap().idx += 1;
self.raw.as_ref().unwrap().assert_bounds(self.bytes_allocated, None);
self.raw.as_ref().unwrap().assert_bounds(self.bytes_allocated);
}
return Some(item);
}
self.raw.as_mut().unwrap().idx += 1;
self.raw.as_ref().unwrap().assert_bounds(self.bytes_allocated, None);
self.raw.as_ref().unwrap().assert_bounds(self.bytes_allocated);
}
}
}
@ -1230,9 +1207,9 @@ impl<K: Clone, V: Clone> Clone for RawTable<K, V> {
}
buckets.idx += 1;
buckets.assert_bounds(self.bytes_allocated, None);
buckets.assert_bounds(self.bytes_allocated);
new_buckets.idx += 1;
new_buckets.assert_bounds(new_ht.bytes_allocated, None);
new_buckets.assert_bounds(new_ht.bytes_allocated);
}
new_ht.size = self.size();