mirror of
https://github.com/servo/servo.git
synced 2025-08-03 12:40:06 +01:00
Move to components/hashglobe
This commit is contained in:
parent
ed0fa304fc
commit
5d3115fa8e
11 changed files with 0 additions and 0 deletions
152
components/hashglobe/src/alloc.rs
Normal file
152
components/hashglobe/src/alloc.rs
Normal file
|
@ -0,0 +1,152 @@
|
|||
// FORK NOTE: Copied from liballoc_system, removed unnecessary APIs,
|
||||
// APIs take size/align directly instead of Layout
|
||||
|
||||
|
||||
|
||||
|
||||
// The minimum alignment guaranteed by the architecture. This value is used to
|
||||
// add fast paths for low alignment values. In practice, the alignment is a
|
||||
// constant at the call site and the branch will be optimized out.
|
||||
#[cfg(all(any(target_arch = "x86",
|
||||
target_arch = "arm",
|
||||
target_arch = "mips",
|
||||
target_arch = "powerpc",
|
||||
target_arch = "powerpc64",
|
||||
target_arch = "asmjs",
|
||||
target_arch = "wasm32")))]
|
||||
const MIN_ALIGN: usize = 8;
|
||||
#[cfg(all(any(target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "mips64",
|
||||
target_arch = "s390x",
|
||||
target_arch = "sparc64")))]
|
||||
const MIN_ALIGN: usize = 16;
|
||||
|
||||
pub use self::platform::{alloc, dealloc};
|
||||
|
||||
#[cfg(any(unix, target_os = "redox"))]
|
||||
mod platform {
|
||||
extern crate libc;
|
||||
|
||||
use std::ptr;
|
||||
|
||||
use super::MIN_ALIGN;
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn alloc(size: usize, align: usize) -> *mut u8 {
|
||||
let ptr = if align <= MIN_ALIGN {
|
||||
libc::malloc(size) as *mut u8
|
||||
} else {
|
||||
aligned_malloc(size, align)
|
||||
};
|
||||
ptr
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn dealloc(ptr: *mut u8, _align: usize) {
|
||||
libc::free(ptr as *mut libc::c_void)
|
||||
}
|
||||
|
||||
#[cfg(any(target_os = "android", target_os = "redox"))]
|
||||
#[inline]
|
||||
unsafe fn aligned_malloc(size: usize, align: usize) -> *mut u8 {
|
||||
// On android we currently target API level 9 which unfortunately
|
||||
// doesn't have the `posix_memalign` API used below. Instead we use
|
||||
// `memalign`, but this unfortunately has the property on some systems
|
||||
// where the memory returned cannot be deallocated by `free`!
|
||||
//
|
||||
// Upon closer inspection, however, this appears to work just fine with
|
||||
// Android, so for this platform we should be fine to call `memalign`
|
||||
// (which is present in API level 9). Some helpful references could
|
||||
// possibly be chromium using memalign [1], attempts at documenting that
|
||||
// memalign + free is ok [2] [3], or the current source of chromium
|
||||
// which still uses memalign on android [4].
|
||||
//
|
||||
// [1]: https://codereview.chromium.org/10796020/
|
||||
// [2]: https://code.google.com/p/android/issues/detail?id=35391
|
||||
// [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579
|
||||
// [4]: https://chromium.googlesource.com/chromium/src/base/+/master/
|
||||
// /memory/aligned_memory.cc
|
||||
libc::memalign(align, size) as *mut u8
|
||||
}
|
||||
|
||||
#[cfg(not(any(target_os = "android", target_os = "redox")))]
|
||||
#[inline]
|
||||
unsafe fn aligned_malloc(size: usize, align: usize) -> *mut u8 {
|
||||
let mut out = ptr::null_mut();
|
||||
let ret = libc::posix_memalign(&mut out, align, size);
|
||||
if ret != 0 {
|
||||
ptr::null_mut()
|
||||
} else {
|
||||
out as *mut u8
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
#[allow(bad_style)]
|
||||
mod platform {
|
||||
|
||||
use super::MIN_ALIGN;
|
||||
type LPVOID = *mut u8;
|
||||
type HANDLE = LPVOID;
|
||||
type SIZE_T = usize;
|
||||
type DWORD = u32;
|
||||
type BOOL = i32;
|
||||
|
||||
|
||||
extern "system" {
|
||||
fn GetProcessHeap() -> HANDLE;
|
||||
fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID;
|
||||
fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL;
|
||||
fn GetLastError() -> DWORD;
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
struct Header(*mut u8);
|
||||
|
||||
unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
|
||||
&mut *(ptr as *mut Header).offset(-1)
|
||||
}
|
||||
|
||||
unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
|
||||
let aligned = ptr.offset((align - (ptr as usize & (align - 1))) as isize);
|
||||
*get_header(aligned) = Header(ptr);
|
||||
aligned
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn allocate_with_flags(size: usize, align: usize, flags: DWORD) -> *mut u8
|
||||
{
|
||||
if align <= MIN_ALIGN {
|
||||
HeapAlloc(GetProcessHeap(), flags, size)
|
||||
} else {
|
||||
let size = size + align;
|
||||
let ptr = HeapAlloc(GetProcessHeap(), flags, size);
|
||||
if ptr.is_null() {
|
||||
ptr
|
||||
} else {
|
||||
align_ptr(ptr, align)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn alloc(size: usize, align: usize) -> *mut u8 {
|
||||
allocate_with_flags(size, align, 0)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn dealloc(ptr: *mut u8, align: usize) {
|
||||
if align <= MIN_ALIGN {
|
||||
let err = HeapFree(GetProcessHeap(), 0, ptr as LPVOID);
|
||||
debug_assert!(err != 0, "Failed to free heap memory: {}",
|
||||
GetLastError());
|
||||
} else {
|
||||
let header = get_header(ptr);
|
||||
let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID);
|
||||
debug_assert!(err != 0, "Failed to free heap memory: {}",
|
||||
GetLastError());
|
||||
}
|
||||
}
|
||||
}
|
149
components/hashglobe/src/fake.rs
Normal file
149
components/hashglobe/src/fake.rs
Normal file
|
@ -0,0 +1,149 @@
|
|||
//! This module contains shims around the stdlib HashMap
|
||||
//! that add fallible methods
|
||||
//!
|
||||
//! These methods are a lie. They are not actually fallible. This is just to make
|
||||
//! it smooth to switch between hashmap impls in a codebase.
|
||||
|
||||
use std::hash::{BuildHasher, Hash};
|
||||
use std::collections::HashMap as StdMap;
|
||||
use std::collections::HashSet as StdSet;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
|
||||
pub use std::collections::hash_map::{Entry, RandomState};
|
||||
|
||||
pub struct HashMap<K, V, S = RandomState>(StdMap<K, V, S>);
|
||||
|
||||
|
||||
use FailedAllocationError;
|
||||
|
||||
impl<K, V, S> Deref for HashMap<K, V, S> {
|
||||
type Target = StdMap<K, V, S>;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V, S> DerefMut for HashMap<K, V, S> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: Hash + Eq, V> HashMap<K, V, RandomState> {
|
||||
|
||||
#[inline]
|
||||
pub fn new() -> HashMap<K, V, RandomState> {
|
||||
HashMap(StdMap::new())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn with_capacity(capacity: usize) -> HashMap<K, V, RandomState> {
|
||||
HashMap(StdMap::with_capacity(capacity))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn try_with_capacity(capacity: usize) -> Result<HashMap<K, V, RandomState>, FailedAllocationError> {
|
||||
Ok(HashMap(StdMap::with_capacity(capacity)))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<K, V, S> HashMap<K, V, S>
|
||||
where K: Eq + Hash,
|
||||
S: BuildHasher
|
||||
{
|
||||
#[inline]
|
||||
pub fn try_with_hasher(hash_builder: S) -> Result<HashMap<K, V, S>, FailedAllocationError> {
|
||||
Ok(HashMap(StdMap::with_hasher(hash_builder)))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn try_with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Result<HashMap<K, V, S>, FailedAllocationError> {
|
||||
Ok(HashMap(StdMap::with_capacity_and_hasher(capacity, hash_builder)))
|
||||
}
|
||||
|
||||
pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> HashMap<K, V, S> {
|
||||
HashMap(StdMap::with_capacity_and_hasher(capacity, hash_builder))
|
||||
}
|
||||
|
||||
|
||||
#[inline]
|
||||
pub fn try_reserve(&mut self, additional: usize) -> Result<(), FailedAllocationError> {
|
||||
Ok(self.reserve(additional))
|
||||
}
|
||||
|
||||
pub fn try_shrink_to_fit(&mut self) -> Result<(), FailedAllocationError> {
|
||||
Ok(self.shrink_to_fit())
|
||||
}
|
||||
|
||||
pub fn try_entry(&mut self, key: K) -> Result<Entry<K, V>, FailedAllocationError> {
|
||||
Ok(self.entry(key))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn try_insert(&mut self, k: K, v: V) -> Result<Option<V>, FailedAllocationError> {
|
||||
Ok(self.insert(k, v))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct HashSet<T, S = RandomState>(StdSet<T, S>);
|
||||
|
||||
|
||||
impl<T, S> Deref for HashSet<T, S> {
|
||||
type Target = StdSet<T, S>;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S> DerefMut for HashSet<T, S> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Hash + Eq> HashSet<T, RandomState> {
|
||||
|
||||
#[inline]
|
||||
pub fn new() -> HashSet<T, RandomState> {
|
||||
HashSet(StdSet::new())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn with_capacity(capacity: usize) -> HashSet<T, RandomState> {
|
||||
HashSet(StdSet::with_capacity(capacity))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<T, S> HashSet<T, S>
|
||||
where T: Eq + Hash,
|
||||
S: BuildHasher
|
||||
{
|
||||
|
||||
#[inline]
|
||||
pub fn with_hasher(hasher: S) -> HashSet<T, S> {
|
||||
HashSet(StdSet::with_hasher(hasher))
|
||||
}
|
||||
|
||||
|
||||
#[inline]
|
||||
pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> HashSet<T, S> {
|
||||
HashSet(StdSet::with_capacity_and_hasher(capacity, hasher))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn try_reserve(&mut self, additional: usize) -> Result<(), FailedAllocationError> {
|
||||
Ok(self.reserve(additional))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn try_shrink_to_fit(&mut self) -> Result<(), FailedAllocationError> {
|
||||
Ok(self.shrink_to_fit())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn try_insert(&mut self, value: T) -> Result<bool, FailedAllocationError> {
|
||||
Ok(self.insert(value))
|
||||
}
|
||||
}
|
3095
components/hashglobe/src/hash_map.rs
Normal file
3095
components/hashglobe/src/hash_map.rs
Normal file
File diff suppressed because it is too large
Load diff
1623
components/hashglobe/src/hash_set.rs
Normal file
1623
components/hashglobe/src/hash_set.rs
Normal file
File diff suppressed because it is too large
Load diff
36
components/hashglobe/src/lib.rs
Normal file
36
components/hashglobe/src/lib.rs
Normal file
|
@ -0,0 +1,36 @@
|
|||
pub use std::*;
|
||||
|
||||
mod table;
|
||||
mod shim;
|
||||
mod alloc;
|
||||
pub mod hash_map;
|
||||
pub mod hash_set;
|
||||
|
||||
pub mod fake;
|
||||
|
||||
use std::{error, fmt};
|
||||
|
||||
trait Recover<Q: ?Sized> {
|
||||
type Key;
|
||||
|
||||
fn get(&self, key: &Q) -> Option<&Self::Key>;
|
||||
fn take(&mut self, key: &Q) -> Option<Self::Key>;
|
||||
fn replace(&mut self, key: Self::Key) -> Option<Self::Key>;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FailedAllocationError {
|
||||
reason: &'static str,
|
||||
}
|
||||
|
||||
impl error::Error for FailedAllocationError {
|
||||
fn description(&self) -> &str {
|
||||
self.reason
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for FailedAllocationError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
self.reason.fmt(f)
|
||||
}
|
||||
}
|
60
components/hashglobe/src/shim.rs
Normal file
60
components/hashglobe/src/shim.rs
Normal file
|
@ -0,0 +1,60 @@
|
|||
use std::marker::PhantomData;
|
||||
|
||||
pub struct NonZeroPtr<T: 'static>(&'static T);
|
||||
|
||||
impl<T: 'static> NonZeroPtr<T> {
|
||||
pub unsafe fn new_unchecked(ptr: *mut T) -> Self {
|
||||
NonZeroPtr(&*ptr)
|
||||
}
|
||||
pub fn as_ptr(&self) -> *mut T {
|
||||
self.0 as *const T as *mut T
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Unique<T: 'static> {
|
||||
ptr: NonZeroPtr<T>,
|
||||
_marker: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T: 'static> Unique<T> {
|
||||
pub unsafe fn new_unchecked(ptr: *mut T) -> Self {
|
||||
Unique {
|
||||
ptr: NonZeroPtr::new_unchecked(ptr),
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
pub fn as_ptr(&self) -> *mut T {
|
||||
self.ptr.as_ptr()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: Send + 'static> Send for Unique<T> { }
|
||||
|
||||
unsafe impl<T: Sync + 'static> Sync for Unique<T> { }
|
||||
|
||||
pub struct Shared<T: 'static> {
|
||||
ptr: NonZeroPtr<T>,
|
||||
_marker: PhantomData<T>,
|
||||
// force it to be !Send/!Sync
|
||||
_marker2: PhantomData<*const u8>,
|
||||
}
|
||||
|
||||
impl<T: 'static> Shared<T> {
|
||||
pub unsafe fn new_unchecked(ptr: *mut T) -> Self {
|
||||
Shared {
|
||||
ptr: NonZeroPtr::new_unchecked(ptr),
|
||||
_marker: PhantomData,
|
||||
_marker2: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe fn as_mut(&self) -> &mut T {
|
||||
&mut *self.ptr.as_ptr()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> From<&'a mut T> for Shared<T> {
|
||||
fn from(reference: &'a mut T) -> Self {
|
||||
unsafe { Shared::new_unchecked(reference) }
|
||||
}
|
||||
}
|
1204
components/hashglobe/src/table.rs
Normal file
1204
components/hashglobe/src/table.rs
Normal file
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue