From f7ad19f50071cc13c6792ff4bef4ce855e60be36 Mon Sep 17 00:00:00 2001 From: Manish Goregaokar Date: Mon, 23 Oct 2017 13:45:57 -0700 Subject: [PATCH] Revert "Assert more things in hashtables." This reverts commit e5023a3eb48675a480dd50fe5cf19da2f6b9534c. --- components/hashglobe/src/table.rs | 99 ++++++++----------------------- 1 file changed, 26 insertions(+), 73 deletions(-) diff --git a/components/hashglobe/src/table.rs b/components/hashglobe/src/table.rs index 788e98aa5d4..8fc306a39c3 100644 --- a/components/hashglobe/src/table.rs +++ b/components/hashglobe/src/table.rs @@ -43,7 +43,7 @@ struct TaggedHashUintPtr(Unique); impl TaggedHashUintPtr { #[inline] unsafe fn new(ptr: *mut HashUint) -> Self { - assert!(ptr as usize & 1 == 0 || ptr as usize == EMPTY as usize); + debug_assert!(ptr as usize & 1 == 0 || ptr as usize == EMPTY as usize); TaggedHashUintPtr(Unique::new_unchecked(ptr)) } @@ -117,7 +117,6 @@ pub struct RawTable { capacity_mask: usize, size: usize, hashes: TaggedHashUintPtr, - bytes_allocated: usize, // Because K/V do not appear directly in any of the types in the struct, // inform rustc that in fact instances of K and V are reachable from here. @@ -246,15 +245,6 @@ impl RawBucket { unsafe fn hash_pair(&self) -> (*mut HashUint, *mut (K, V)) { (self.hash(), self.pair()) } - - fn assert_bounds(&self, bytes_allocated: usize) { - let base = self.hash_start as *mut u8; - let (h, p) = unsafe { self.hash_pair() }; - assert!((h as *mut u8) < (p as *mut u8), "HashMap Corruption - hash offset not below pair offset"); - let end = unsafe { p.offset(1) } as *mut u8; - assert!(end > base, "HashMap Corruption - end={:?}, base={:?}", end, base); - assert!(end <= unsafe { base.offset(bytes_allocated as isize) }, "HashMap Corruption - end={:?}, base={:?}", end, base); - } } // Buckets hold references to the table. @@ -358,7 +348,8 @@ impl>> Bucket { pub fn at_index(table: M, ib_index: usize) -> Bucket { // if capacity is 0, then the RawBucket will be populated with bogus pointers. // This is an uncommon case though, so avoid it in release builds. - assert!(table.capacity() > 0, "HashMap Corruption - Table should have capacity at this point"); + debug_assert!(table.capacity() > 0, + "Table should have capacity at this point"); let ib_index = ib_index & table.capacity_mask; Bucket { raw: table.raw_bucket_at(ib_index), @@ -431,13 +422,11 @@ impl>> Bucket { /// Modifies the bucket in place to make it point to the next slot. pub fn next(&mut self) { self.raw.idx = self.raw.idx.wrapping_add(1) & self.table.capacity_mask; - self.raw.assert_bounds(self.table.bytes_allocated); } /// Modifies the bucket in place to make it point to the previous slot. pub fn prev(&mut self) { self.raw.idx = self.raw.idx.wrapping_sub(1) & self.table.capacity_mask; - self.raw.assert_bounds(self.table.bytes_allocated); } } @@ -560,7 +549,7 @@ impl<'t, K, V> FullBucket> { /// This works similarly to `put`, building an `EmptyBucket` out of the /// taken bucket. pub fn take(self) -> (EmptyBucket>, K, V) { - self.table.size = self.table.size.checked_sub(1).unwrap(); + self.table.size -= 1; unsafe { *self.raw.hash() = EMPTY_BUCKET; @@ -675,7 +664,7 @@ impl GapThenFull /// Panics if `target_alignment` is not a power of two. #[inline] fn round_up_to_next(unrounded: usize, target_alignment: usize) -> usize { - assert!(target_alignment.is_power_of_two(), "HashMap Corruption - alignment not power of two"); + assert!(target_alignment.is_power_of_two()); (unrounded + target_alignment - 1) & !(target_alignment - 1) } @@ -745,11 +734,9 @@ impl RawTable { size: 0, capacity_mask: capacity.wrapping_sub(1), hashes: TaggedHashUintPtr::new(EMPTY as *mut HashUint), - bytes_allocated: 0, marker: marker::PhantomData, }); } - assert!(capacity.is_power_of_two(), "HashMap Corruption - capacity not power of two"); // No need for `checked_mul` before a more restrictive check performed // later in this method. @@ -801,13 +788,11 @@ impl RawTable { ptr::write_bytes(buffer, 0xe7, size); let hashes = buffer.offset(hash_offset as isize) as *mut HashUint; - assert!(hashes as *mut u8 == buffer, "HashMap Corruption - Nonzero hash_offset"); Ok(RawTable { capacity_mask: capacity.wrapping_sub(1), size: 0, hashes: TaggedHashUintPtr::new(hashes), - bytes_allocated: size, marker: marker::PhantomData, }) } @@ -818,23 +803,17 @@ impl RawTable { let (pairs_offset, _, oflo) = calculate_offsets(hashes_size, pairs_size, align_of::<(K, V)>()); - assert!(!oflo, "HashMap Corruption - capacity overflow"); - assert!(pairs_offset as isize > 0, "HashMap Corruption - pairs offset={}", pairs_offset); - assert!(index as isize >= 0, "HashMap Corruption - index={}", index); - assert!(index < self.capacity(), "HashMap Corruption - index={}", index); + debug_assert!(!oflo, "capacity overflow"); let buffer = self.hashes.ptr() as *mut u8; - let bucket = unsafe { + unsafe { RawBucket { hash_start: buffer as *mut HashUint, pair_start: buffer.offset(pairs_offset as isize) as *const (K, V), idx: index, _marker: marker::PhantomData, } - }; - - bucket.assert_bounds(self.bytes_allocated); - bucket + } } /// Returns a raw pointer to the table's buffer. @@ -866,9 +845,8 @@ impl RawTable { fn raw_buckets(&self) -> RawBuckets { RawBuckets { - raw: if self.capacity() == 0 { None } else { Some(self.raw_bucket_at(0)) }, + raw: self.raw_bucket_at(0), elems_left: self.size, - bytes_allocated: self.bytes_allocated, marker: marker::PhantomData, } } @@ -887,13 +865,12 @@ impl RawTable { } pub fn into_iter(self) -> IntoIter { - let RawBuckets { raw, elems_left, bytes_allocated, .. } = self.raw_buckets(); + let RawBuckets { raw, elems_left, .. } = self.raw_buckets(); // Replace the marker regardless of lifetime bounds on parameters. IntoIter { iter: RawBuckets { raw, elems_left, - bytes_allocated, marker: marker::PhantomData, }, table: self, @@ -901,13 +878,12 @@ impl RawTable { } pub fn drain(&mut self) -> Drain { - let RawBuckets { raw, elems_left, bytes_allocated, .. } = self.raw_buckets(); + let RawBuckets { raw, elems_left, .. } = self.raw_buckets(); // Replace the marker regardless of lifetime bounds on parameters. Drain { iter: RawBuckets { raw, elems_left, - bytes_allocated, marker: marker::PhantomData, }, table: Shared::from(self), @@ -919,21 +895,17 @@ impl RawTable { /// state and should only be used for dropping the table's remaining /// entries. It's used in the implementation of Drop. unsafe fn rev_drop_buckets(&mut self) { + // initialize the raw bucket past the end of the table + let mut raw = self.raw_bucket_at(self.capacity()); let mut elems_left = self.size; - if elems_left == 0 { - return; - } - let mut raw = self.raw_bucket_at(self.capacity() - 1); - loop { + + while elems_left != 0 { + raw.idx -= 1; + if *raw.hash() != EMPTY_BUCKET { + elems_left -= 1; ptr::drop_in_place(raw.pair()); - elems_left = elems_left.checked_sub(1).unwrap(); - if elems_left == 0 { - return; - } } - raw.idx = raw.idx.checked_sub(1).unwrap(); - raw.assert_bounds(self.bytes_allocated); } } @@ -951,11 +923,8 @@ impl RawTable { /// A raw iterator. The basis for some other iterators in this module. Although /// this interface is safe, it's not used outside this module. struct RawBuckets<'a, K, V> { - // We use an Option here to avoid ever constructing a RawBucket for - // invalid memory. - raw: Option>, + raw: RawBucket, elems_left: usize, - bytes_allocated: usize, // Strictly speaking, this should be &'a (K,V), but that would // require that K:'a, and we often use RawBuckets<'static...> for @@ -971,7 +940,6 @@ impl<'a, K, V> Clone for RawBuckets<'a, K, V> { RawBuckets { raw: self.raw, elems_left: self.elems_left, - bytes_allocated: self.bytes_allocated, marker: marker::PhantomData, } } @@ -988,17 +956,12 @@ impl<'a, K, V> Iterator for RawBuckets<'a, K, V> { loop { unsafe { - let item = self.raw.unwrap(); + let item = self.raw; + self.raw.idx += 1; if *item.hash() != EMPTY_BUCKET { - self.elems_left = self.elems_left.checked_sub(1).unwrap(); - if self.elems_left != 0 { - self.raw.as_mut().unwrap().idx += 1; - self.raw.as_ref().unwrap().assert_bounds(self.bytes_allocated); - } + self.elems_left -= 1; return Some(item); } - self.raw.as_mut().unwrap().idx += 1; - self.raw.as_ref().unwrap().assert_bounds(self.bytes_allocated); } } } @@ -1133,7 +1096,7 @@ impl Iterator for IntoIter { fn next(&mut self) -> Option<(SafeHash, K, V)> { self.iter.next().map(|raw| { - self.table.size = self.table.size.checked_sub(1).unwrap(); + self.table.size -= 1; unsafe { let (k, v) = ptr::read(raw.pair()); (SafeHash { hash: *raw.hash() }, k, v) @@ -1159,7 +1122,7 @@ impl<'a, K, V> Iterator for Drain<'a, K, V> { fn next(&mut self) -> Option<(SafeHash, K, V)> { self.iter.next().map(|raw| { unsafe { - self.table.as_mut().size = self.table.as_mut().size.checked_sub(1).unwrap(); + self.table.as_mut().size -= 1; let (k, v) = ptr::read(raw.pair()); (SafeHash { hash: ptr::replace(&mut *raw.hash(), EMPTY_BUCKET) }, k, v) } @@ -1188,28 +1151,18 @@ impl Clone for RawTable { unsafe { let cap = self.capacity(); let mut new_ht = RawTable::new_uninitialized(cap); - if cap == 0 { - return new_ht; - } let mut new_buckets = new_ht.raw_bucket_at(0); let mut buckets = self.raw_bucket_at(0); - loop { + while buckets.idx < cap { *new_buckets.hash() = *buckets.hash(); if *new_buckets.hash() != EMPTY_BUCKET { let pair_ptr = buckets.pair(); let kv = ((*pair_ptr).0.clone(), (*pair_ptr).1.clone()); ptr::write(new_buckets.pair(), kv); } - - if buckets.idx == cap - 1 { - break; - } - buckets.idx += 1; - buckets.assert_bounds(self.bytes_allocated); new_buckets.idx += 1; - new_buckets.assert_bounds(new_ht.bytes_allocated); } new_ht.size = self.size(); @@ -1248,7 +1201,7 @@ impl Drop for RawTable { pairs_size, align_of::<(K, V)>()); - assert!(!oflo, "HashMap Corruption - should be impossible"); + debug_assert!(!oflo, "should be impossible"); unsafe { dealloc(self.hashes.ptr() as *mut u8, align);