mirror of
https://github.com/servo/servo.git
synced 2025-08-05 21:50:18 +01:00
Update rustc to revision 2cfb5acb5a2751c759627377e602bac4f88f2d19.
This commit is contained in:
parent
cf616b90a2
commit
16c7060bc8
153 changed files with 2095 additions and 1298 deletions
|
@ -69,14 +69,12 @@ impl<K,V> HashCache<K,V> where K: Clone + PartialEq + Eq + Hash, V: Clone {
|
|||
#[test]
|
||||
fn test_hashcache() {
|
||||
let mut cache: HashCache<uint, Cell<&str>> = HashCache::new();
|
||||
let one = Cell::new("one");
|
||||
let two = Cell::new("two");
|
||||
|
||||
cache.insert(1, one);
|
||||
cache.insert(1, Cell::new("one"));
|
||||
assert!(cache.find(&1).is_some());
|
||||
assert!(cache.find(&2).is_none());
|
||||
|
||||
cache.find_or_create(&2, |_v| { two });
|
||||
cache.find_or_create(&2, |_v| { Cell::new("two") });
|
||||
assert!(cache.find(&1).is_some());
|
||||
assert!(cache.find(&2).is_some());
|
||||
}
|
||||
|
@ -233,7 +231,7 @@ fn test_lru_cache() {
|
|||
assert!(cache.find(&4).is_some()); // (2, 4) (no change)
|
||||
|
||||
// Test find_or_create.
|
||||
cache.find_or_create(&1, |_| { one }); // (4, 1)
|
||||
cache.find_or_create(&1, |_| { Cell::new("one") }); // (4, 1)
|
||||
|
||||
assert!(cache.find(&1).is_some()); // (4, 1) (no change)
|
||||
assert!(cache.find(&2).is_none()); // (4, 1) (no change)
|
||||
|
|
|
@ -11,7 +11,7 @@ use text_writer::TextWriter;
|
|||
|
||||
macro_rules! define_cursor {
|
||||
($( $css: expr => $variant: ident = $value: expr, )+) => {
|
||||
#[deriving(Clone, PartialEq, Eq, FromPrimitive, Show)]
|
||||
#[deriving(Clone, Copy, PartialEq, Eq, FromPrimitive, Show)]
|
||||
#[repr(u8)]
|
||||
pub enum Cursor {
|
||||
$( $variant = $value ),+
|
||||
|
|
660
components/util/deque/mod.rs
Normal file
660
components/util/deque/mod.rs
Normal file
|
@ -0,0 +1,660 @@
|
|||
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
//! A (mostly) lock-free concurrent work-stealing deque
|
||||
//!
|
||||
//! This module contains an implementation of the Chase-Lev work stealing deque
|
||||
//! described in "Dynamic Circular Work-Stealing Deque". The implementation is
|
||||
//! heavily based on the pseudocode found in the paper.
|
||||
//!
|
||||
//! This implementation does not want to have the restriction of a garbage
|
||||
//! collector for reclamation of buffers, and instead it uses a shared pool of
|
||||
//! buffers. This shared pool is required for correctness in this
|
||||
//! implementation.
|
||||
//!
|
||||
//! The only lock-synchronized portions of this deque are the buffer allocation
|
||||
//! and deallocation portions. Otherwise all operations are lock-free.
|
||||
//!
|
||||
//! # Example
|
||||
//!
|
||||
//! use util::deque::BufferPool;
|
||||
//!
|
||||
//! let mut pool = BufferPool::new();
|
||||
//! let (mut worker, mut stealer) = pool.deque();
|
||||
//!
|
||||
//! // Only the worker may push/pop
|
||||
//! worker.push(1i);
|
||||
//! worker.pop();
|
||||
//!
|
||||
//! // Stealers take data from the other end of the deque
|
||||
//! worker.push(1i);
|
||||
//! stealer.steal();
|
||||
//!
|
||||
//! // Stealers can be cloned to have many stealers stealing in parallel
|
||||
//! worker.push(1i);
|
||||
//! let mut stealer2 = stealer.clone();
|
||||
//! stealer2.steal();
|
||||
|
||||
#![experimental]
|
||||
|
||||
// NB: the "buffer pool" strategy is not done for speed, but rather for
|
||||
// correctness. For more info, see the comment on `swap_buffer`
|
||||
|
||||
// FIXME: all atomic operations in this module use a SeqCst ordering. That is
|
||||
// probably overkill
|
||||
|
||||
pub use self::Stolen::{Empty, Abort, Data};
|
||||
|
||||
use alloc::arc::Arc;
|
||||
use alloc::heap::{allocate, deallocate};
|
||||
use std::kinds::marker;
|
||||
use std::mem::{forget, min_align_of, size_of, transmute};
|
||||
use std::ptr;
|
||||
|
||||
use std::sync::Mutex;
|
||||
use std::sync::atomic::{AtomicInt, AtomicPtr, SeqCst};
|
||||
|
||||
// Once the queue is less than 1/K full, then it will be downsized. Note that
|
||||
// the deque requires that this number be less than 2.
|
||||
static K: int = 4;
|
||||
|
||||
// Minimum number of bits that a buffer size should be. No buffer will resize to
|
||||
// under this value, and all deques will initially contain a buffer of this
|
||||
// size.
|
||||
//
|
||||
// The size in question is 1 << MIN_BITS
|
||||
static MIN_BITS: uint = 7;
|
||||
|
||||
struct Deque<T> {
|
||||
bottom: AtomicInt,
|
||||
top: AtomicInt,
|
||||
array: AtomicPtr<Buffer<T>>,
|
||||
pool: BufferPool<T>,
|
||||
}
|
||||
|
||||
/// Worker half of the work-stealing deque. This worker has exclusive access to
|
||||
/// one side of the deque, and uses `push` and `pop` method to manipulate it.
|
||||
///
|
||||
/// There may only be one worker per deque.
|
||||
pub struct Worker<T> {
|
||||
deque: Arc<Deque<T>>,
|
||||
_noshare: marker::NoSync,
|
||||
}
|
||||
|
||||
/// The stealing half of the work-stealing deque. Stealers have access to the
|
||||
/// opposite end of the deque from the worker, and they only have access to the
|
||||
/// `steal` method.
|
||||
pub struct Stealer<T> {
|
||||
deque: Arc<Deque<T>>,
|
||||
_noshare: marker::NoSync,
|
||||
}
|
||||
|
||||
/// When stealing some data, this is an enumeration of the possible outcomes.
|
||||
#[deriving(PartialEq, Show)]
|
||||
pub enum Stolen<T> {
|
||||
/// The deque was empty at the time of stealing
|
||||
Empty,
|
||||
/// The stealer lost the race for stealing data, and a retry may return more
|
||||
/// data.
|
||||
Abort,
|
||||
/// The stealer has successfully stolen some data.
|
||||
Data(T),
|
||||
}
|
||||
|
||||
/// The allocation pool for buffers used by work-stealing deques. Right now this
|
||||
/// structure is used for reclamation of memory after it is no longer in use by
|
||||
/// deques.
|
||||
///
|
||||
/// This data structure is protected by a mutex, but it is rarely used. Deques
|
||||
/// will only use this structure when allocating a new buffer or deallocating a
|
||||
/// previous one.
|
||||
pub struct BufferPool<T> {
|
||||
// FIXME: This entire file was copied from std::sync::deque before it was removed,
|
||||
// I converted `Exclusive` to `Mutex` here, but that might not be ideal
|
||||
pool: Arc<Mutex<Vec<Box<Buffer<T>>>>>,
|
||||
}
|
||||
|
||||
/// An internal buffer used by the chase-lev deque. This structure is actually
|
||||
/// implemented as a circular buffer, and is used as the intermediate storage of
|
||||
/// the data in the deque.
|
||||
///
|
||||
/// This type is implemented with *T instead of Vec<T> for two reasons:
|
||||
///
|
||||
/// 1. There is nothing safe about using this buffer. This easily allows the
|
||||
/// same value to be read twice in to rust, and there is nothing to
|
||||
/// prevent this. The usage by the deque must ensure that one of the
|
||||
/// values is forgotten. Furthermore, we only ever want to manually run
|
||||
/// destructors for values in this buffer (on drop) because the bounds
|
||||
/// are defined by the deque it's owned by.
|
||||
///
|
||||
/// 2. We can certainly avoid bounds checks using *T instead of Vec<T>, although
|
||||
/// LLVM is probably pretty good at doing this already.
|
||||
struct Buffer<T> {
|
||||
storage: *const T,
|
||||
log_size: uint,
|
||||
}
|
||||
|
||||
impl<T: Send> BufferPool<T> {
|
||||
/// Allocates a new buffer pool which in turn can be used to allocate new
|
||||
/// deques.
|
||||
pub fn new() -> BufferPool<T> {
|
||||
BufferPool { pool: Arc::new(Mutex::new(Vec::new())) }
|
||||
}
|
||||
|
||||
/// Allocates a new work-stealing deque which will send/receiving memory to
|
||||
/// and from this buffer pool.
|
||||
pub fn deque(&self) -> (Worker<T>, Stealer<T>) {
|
||||
let a = Arc::new(Deque::new(self.clone()));
|
||||
let b = a.clone();
|
||||
(Worker { deque: a, _noshare: marker::NoSync },
|
||||
Stealer { deque: b, _noshare: marker::NoSync })
|
||||
}
|
||||
|
||||
fn alloc(&mut self, bits: uint) -> Box<Buffer<T>> {
|
||||
unsafe {
|
||||
let mut pool = self.pool.lock();
|
||||
match pool.iter().position(|x| x.size() >= (1 << bits)) {
|
||||
Some(i) => pool.remove(i).unwrap(),
|
||||
None => box Buffer::new(bits)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn free(&self, buf: Box<Buffer<T>>) {
|
||||
unsafe {
|
||||
let mut pool = self.pool.lock();
|
||||
match pool.iter().position(|v| v.size() > buf.size()) {
|
||||
Some(i) => pool.insert(i, buf),
|
||||
None => pool.push(buf),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> Clone for BufferPool<T> {
|
||||
fn clone(&self) -> BufferPool<T> { BufferPool { pool: self.pool.clone() } }
|
||||
}
|
||||
|
||||
impl<T: Send> Worker<T> {
|
||||
/// Pushes data onto the front of this work queue.
|
||||
pub fn push(&self, t: T) {
|
||||
unsafe { self.deque.push(t) }
|
||||
}
|
||||
/// Pops data off the front of the work queue, returning `None` on an empty
|
||||
/// queue.
|
||||
pub fn pop(&self) -> Option<T> {
|
||||
unsafe { self.deque.pop() }
|
||||
}
|
||||
|
||||
/// Gets access to the buffer pool that this worker is attached to. This can
|
||||
/// be used to create more deques which share the same buffer pool as this
|
||||
/// deque.
|
||||
pub fn pool<'a>(&'a self) -> &'a BufferPool<T> {
|
||||
&self.deque.pool
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> Stealer<T> {
|
||||
/// Steals work off the end of the queue (opposite of the worker's end)
|
||||
pub fn steal(&self) -> Stolen<T> {
|
||||
unsafe { self.deque.steal() }
|
||||
}
|
||||
|
||||
/// Gets access to the buffer pool that this stealer is attached to. This
|
||||
/// can be used to create more deques which share the same buffer pool as
|
||||
/// this deque.
|
||||
pub fn pool<'a>(&'a self) -> &'a BufferPool<T> {
|
||||
&self.deque.pool
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> Clone for Stealer<T> {
|
||||
fn clone(&self) -> Stealer<T> {
|
||||
Stealer { deque: self.deque.clone(), _noshare: marker::NoSync }
|
||||
}
|
||||
}
|
||||
|
||||
// Almost all of this code can be found directly in the paper so I'm not
|
||||
// personally going to heavily comment what's going on here.
|
||||
|
||||
impl<T: Send> Deque<T> {
|
||||
fn new(mut pool: BufferPool<T>) -> Deque<T> {
|
||||
let buf = pool.alloc(MIN_BITS);
|
||||
Deque {
|
||||
bottom: AtomicInt::new(0),
|
||||
top: AtomicInt::new(0),
|
||||
array: AtomicPtr::new(unsafe { transmute(buf) }),
|
||||
pool: pool,
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn push(&self, data: T) {
|
||||
let mut b = self.bottom.load(SeqCst);
|
||||
let t = self.top.load(SeqCst);
|
||||
let mut a = self.array.load(SeqCst);
|
||||
let size = b - t;
|
||||
if size >= (*a).size() - 1 {
|
||||
// You won't find this code in the chase-lev deque paper. This is
|
||||
// alluded to in a small footnote, however. We always free a buffer
|
||||
// when growing in order to prevent leaks.
|
||||
a = self.swap_buffer(b, a, (*a).resize(b, t, 1));
|
||||
b = self.bottom.load(SeqCst);
|
||||
}
|
||||
(*a).put(b, data);
|
||||
self.bottom.store(b + 1, SeqCst);
|
||||
}
|
||||
|
||||
unsafe fn pop(&self) -> Option<T> {
|
||||
let b = self.bottom.load(SeqCst);
|
||||
let a = self.array.load(SeqCst);
|
||||
let b = b - 1;
|
||||
self.bottom.store(b, SeqCst);
|
||||
let t = self.top.load(SeqCst);
|
||||
let size = b - t;
|
||||
if size < 0 {
|
||||
self.bottom.store(t, SeqCst);
|
||||
return None;
|
||||
}
|
||||
let data = (*a).get(b);
|
||||
if size > 0 {
|
||||
self.maybe_shrink(b, t);
|
||||
return Some(data);
|
||||
}
|
||||
if self.top.compare_and_swap(t, t + 1, SeqCst) == t {
|
||||
self.bottom.store(t + 1, SeqCst);
|
||||
return Some(data);
|
||||
} else {
|
||||
self.bottom.store(t + 1, SeqCst);
|
||||
forget(data); // someone else stole this value
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn steal(&self) -> Stolen<T> {
|
||||
let t = self.top.load(SeqCst);
|
||||
let old = self.array.load(SeqCst);
|
||||
let b = self.bottom.load(SeqCst);
|
||||
let a = self.array.load(SeqCst);
|
||||
let size = b - t;
|
||||
if size <= 0 { return Empty }
|
||||
if size % (*a).size() == 0 {
|
||||
if a == old && t == self.top.load(SeqCst) {
|
||||
return Empty
|
||||
}
|
||||
return Abort
|
||||
}
|
||||
let data = (*a).get(t);
|
||||
if self.top.compare_and_swap(t, t + 1, SeqCst) == t {
|
||||
Data(data)
|
||||
} else {
|
||||
forget(data); // someone else stole this value
|
||||
Abort
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn maybe_shrink(&self, b: int, t: int) {
|
||||
let a = self.array.load(SeqCst);
|
||||
if b - t < (*a).size() / K && b - t > (1 << MIN_BITS) {
|
||||
self.swap_buffer(b, a, (*a).resize(b, t, -1));
|
||||
}
|
||||
}
|
||||
|
||||
// Helper routine not mentioned in the paper which is used in growing and
|
||||
// shrinking buffers to swap in a new buffer into place. As a bit of a
|
||||
// recap, the whole point that we need a buffer pool rather than just
|
||||
// calling malloc/free directly is that stealers can continue using buffers
|
||||
// after this method has called 'free' on it. The continued usage is simply
|
||||
// a read followed by a forget, but we must make sure that the memory can
|
||||
// continue to be read after we flag this buffer for reclamation.
|
||||
unsafe fn swap_buffer(&self, b: int, old: *mut Buffer<T>,
|
||||
buf: Buffer<T>) -> *mut Buffer<T> {
|
||||
let newbuf: *mut Buffer<T> = transmute(box buf);
|
||||
self.array.store(newbuf, SeqCst);
|
||||
let ss = (*newbuf).size();
|
||||
self.bottom.store(b + ss, SeqCst);
|
||||
let t = self.top.load(SeqCst);
|
||||
if self.top.compare_and_swap(t, t + ss, SeqCst) != t {
|
||||
self.bottom.store(b, SeqCst);
|
||||
}
|
||||
self.pool.free(transmute(old));
|
||||
return newbuf;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[unsafe_destructor]
|
||||
impl<T: Send> Drop for Deque<T> {
|
||||
fn drop(&mut self) {
|
||||
let t = self.top.load(SeqCst);
|
||||
let b = self.bottom.load(SeqCst);
|
||||
let a = self.array.load(SeqCst);
|
||||
// Free whatever is leftover in the dequeue, and then move the buffer
|
||||
// back into the pool.
|
||||
for i in range(t, b) {
|
||||
let _: T = unsafe { (*a).get(i) };
|
||||
}
|
||||
self.pool.free(unsafe { transmute(a) });
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn buffer_alloc_size<T>(log_size: uint) -> uint {
|
||||
(1 << log_size) * size_of::<T>()
|
||||
}
|
||||
|
||||
impl<T: Send> Buffer<T> {
|
||||
unsafe fn new(log_size: uint) -> Buffer<T> {
|
||||
let size = buffer_alloc_size::<T>(log_size);
|
||||
let buffer = allocate(size, min_align_of::<T>());
|
||||
if buffer.is_null() { ::alloc::oom() }
|
||||
Buffer {
|
||||
storage: buffer as *const T,
|
||||
log_size: log_size,
|
||||
}
|
||||
}
|
||||
|
||||
fn size(&self) -> int { 1 << self.log_size }
|
||||
|
||||
// Apparently LLVM cannot optimize (foo % (1 << bar)) into this implicitly
|
||||
fn mask(&self) -> int { (1 << self.log_size) - 1 }
|
||||
|
||||
unsafe fn elem(&self, i: int) -> *const T {
|
||||
self.storage.offset(i & self.mask())
|
||||
}
|
||||
|
||||
// This does not protect against loading duplicate values of the same cell,
|
||||
// nor does this clear out the contents contained within. Hence, this is a
|
||||
// very unsafe method which the caller needs to treat specially in case a
|
||||
// race is lost.
|
||||
unsafe fn get(&self, i: int) -> T {
|
||||
ptr::read(self.elem(i))
|
||||
}
|
||||
|
||||
// Unsafe because this unsafely overwrites possibly uninitialized or
|
||||
// initialized data.
|
||||
unsafe fn put(&self, i: int, t: T) {
|
||||
ptr::write(self.elem(i) as *mut T, t);
|
||||
}
|
||||
|
||||
// Again, unsafe because this has incredibly dubious ownership violations.
|
||||
// It is assumed that this buffer is immediately dropped.
|
||||
unsafe fn resize(&self, b: int, t: int, delta: int) -> Buffer<T> {
|
||||
// NB: not entirely obvious, but thanks to 2's complement,
|
||||
// casting delta to uint and then adding gives the desired
|
||||
// effect.
|
||||
let buf = Buffer::new(self.log_size + delta as uint);
|
||||
for i in range(t, b) {
|
||||
buf.put(i, self.get(i));
|
||||
}
|
||||
return buf;
|
||||
}
|
||||
}
|
||||
|
||||
#[unsafe_destructor]
|
||||
impl<T: Send> Drop for Buffer<T> {
|
||||
fn drop(&mut self) {
|
||||
// It is assumed that all buffers are empty on drop.
|
||||
let size = buffer_alloc_size::<T>(self.log_size);
|
||||
unsafe { deallocate(self.storage as *mut u8, size, min_align_of::<T>()) }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{Data, BufferPool, Abort, Empty, Worker, Stealer};
|
||||
|
||||
use std::mem;
|
||||
use rustrt::thread::Thread;
|
||||
use std::rand;
|
||||
use std::rand::Rng;
|
||||
use std::sync::atomic::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst,
|
||||
AtomicUint, INIT_ATOMIC_UINT};
|
||||
use std::vec;
|
||||
|
||||
#[test]
|
||||
fn smoke() {
|
||||
let pool = BufferPool::new();
|
||||
let (w, s) = pool.deque();
|
||||
assert_eq!(w.pop(), None);
|
||||
assert_eq!(s.steal(), Empty);
|
||||
w.push(1i);
|
||||
assert_eq!(w.pop(), Some(1));
|
||||
w.push(1);
|
||||
assert_eq!(s.steal(), Data(1));
|
||||
w.push(1);
|
||||
assert_eq!(s.clone().steal(), Data(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stealpush() {
|
||||
static AMT: int = 100000;
|
||||
let pool = BufferPool::<int>::new();
|
||||
let (w, s) = pool.deque();
|
||||
let t = Thread::start(proc() {
|
||||
let mut left = AMT;
|
||||
while left > 0 {
|
||||
match s.steal() {
|
||||
Data(i) => {
|
||||
assert_eq!(i, 1);
|
||||
left -= 1;
|
||||
}
|
||||
Abort | Empty => {}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
for _ in range(0, AMT) {
|
||||
w.push(1);
|
||||
}
|
||||
|
||||
t.join();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stealpush_large() {
|
||||
static AMT: int = 100000;
|
||||
let pool = BufferPool::<(int, int)>::new();
|
||||
let (w, s) = pool.deque();
|
||||
let t = Thread::start(proc() {
|
||||
let mut left = AMT;
|
||||
while left > 0 {
|
||||
match s.steal() {
|
||||
Data((1, 10)) => { left -= 1; }
|
||||
Data(..) => panic!(),
|
||||
Abort | Empty => {}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
for _ in range(0, AMT) {
|
||||
w.push((1, 10));
|
||||
}
|
||||
|
||||
t.join();
|
||||
}
|
||||
|
||||
fn stampede(w: Worker<Box<int>>, s: Stealer<Box<int>>,
|
||||
nthreads: int, amt: uint) {
|
||||
for _ in range(0, amt) {
|
||||
w.push(box 20);
|
||||
}
|
||||
let mut remaining = AtomicUint::new(amt);
|
||||
let unsafe_remaining: *mut AtomicUint = &mut remaining;
|
||||
|
||||
let threads = range(0, nthreads).map(|_| {
|
||||
let s = s.clone();
|
||||
Thread::start(proc() {
|
||||
unsafe {
|
||||
while (*unsafe_remaining).load(SeqCst) > 0 {
|
||||
match s.steal() {
|
||||
Data(box 20) => {
|
||||
(*unsafe_remaining).fetch_sub(1, SeqCst);
|
||||
}
|
||||
Data(..) => panic!(),
|
||||
Abort | Empty => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}).collect::<Vec<Thread<()>>>();
|
||||
|
||||
while remaining.load(SeqCst) > 0 {
|
||||
match w.pop() {
|
||||
Some(box 20) => { remaining.fetch_sub(1, SeqCst); }
|
||||
Some(..) => panic!(),
|
||||
None => {}
|
||||
}
|
||||
}
|
||||
|
||||
for thread in threads.into_iter() {
|
||||
thread.join();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn run_stampede() {
|
||||
let pool = BufferPool::<Box<int>>::new();
|
||||
let (w, s) = pool.deque();
|
||||
stampede(w, s, 8, 10000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn many_stampede() {
|
||||
static AMT: uint = 4;
|
||||
let pool = BufferPool::<Box<int>>::new();
|
||||
let threads = range(0, AMT).map(|_| {
|
||||
let (w, s) = pool.deque();
|
||||
Thread::start(proc() {
|
||||
stampede(w, s, 4, 10000);
|
||||
})
|
||||
}).collect::<Vec<Thread<()>>>();
|
||||
|
||||
for thread in threads.into_iter() {
|
||||
thread.join();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stress() {
|
||||
static AMT: int = 100000;
|
||||
static NTHREADS: int = 8;
|
||||
static DONE: AtomicBool = INIT_ATOMIC_BOOL;
|
||||
static HITS: AtomicUint = INIT_ATOMIC_UINT;
|
||||
let pool = BufferPool::<int>::new();
|
||||
let (w, s) = pool.deque();
|
||||
|
||||
let threads = range(0, NTHREADS).map(|_| {
|
||||
let s = s.clone();
|
||||
Thread::start(proc() {
|
||||
loop {
|
||||
match s.steal() {
|
||||
Data(2) => { HITS.fetch_add(1, SeqCst); }
|
||||
Data(..) => panic!(),
|
||||
_ if DONE.load(SeqCst) => break,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
})
|
||||
}).collect::<Vec<Thread<()>>>();
|
||||
|
||||
let mut rng = rand::task_rng();
|
||||
let mut expected = 0;
|
||||
while expected < AMT {
|
||||
if rng.gen_range(0i, 3) == 2 {
|
||||
match w.pop() {
|
||||
None => {}
|
||||
Some(2) => { HITS.fetch_add(1, SeqCst); },
|
||||
Some(_) => panic!(),
|
||||
}
|
||||
} else {
|
||||
expected += 1;
|
||||
w.push(2);
|
||||
}
|
||||
}
|
||||
|
||||
while HITS.load(SeqCst) < AMT as uint {
|
||||
match w.pop() {
|
||||
None => {}
|
||||
Some(2) => { HITS.fetch_add(1, SeqCst); },
|
||||
Some(_) => panic!(),
|
||||
}
|
||||
}
|
||||
DONE.store(true, SeqCst);
|
||||
|
||||
for thread in threads.into_iter() {
|
||||
thread.join();
|
||||
}
|
||||
|
||||
assert_eq!(HITS.load(SeqCst), expected as uint);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(windows, ignore)] // apparently windows scheduling is weird?
|
||||
fn no_starvation() {
|
||||
static AMT: int = 10000;
|
||||
static NTHREADS: int = 4;
|
||||
static DONE: AtomicBool = INIT_ATOMIC_BOOL;
|
||||
let pool = BufferPool::<(int, uint)>::new();
|
||||
let (w, s) = pool.deque();
|
||||
|
||||
let (threads, hits) = vec::unzip(range(0, NTHREADS).map(|_| {
|
||||
let s = s.clone();
|
||||
let unique_box = box AtomicUint::new(0);
|
||||
let thread_box = unsafe {
|
||||
*mem::transmute::<&Box<AtomicUint>,
|
||||
*const *mut AtomicUint>(&unique_box)
|
||||
};
|
||||
(Thread::start(proc() {
|
||||
unsafe {
|
||||
loop {
|
||||
match s.steal() {
|
||||
Data((1, 2)) => {
|
||||
(*thread_box).fetch_add(1, SeqCst);
|
||||
}
|
||||
Data(..) => panic!(),
|
||||
_ if DONE.load(SeqCst) => break,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}), unique_box)
|
||||
}));
|
||||
|
||||
let mut rng = rand::task_rng();
|
||||
let mut myhit = false;
|
||||
'outer: loop {
|
||||
for _ in range(0, rng.gen_range(0, AMT)) {
|
||||
if !myhit && rng.gen_range(0i, 3) == 2 {
|
||||
match w.pop() {
|
||||
None => {}
|
||||
Some((1, 2)) => myhit = true,
|
||||
Some(_) => panic!(),
|
||||
}
|
||||
} else {
|
||||
w.push((1, 2));
|
||||
}
|
||||
}
|
||||
|
||||
for slot in hits.iter() {
|
||||
let amt = slot.load(SeqCst);
|
||||
if amt == 0 { continue 'outer; }
|
||||
}
|
||||
if myhit {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
DONE.store(true, SeqCst);
|
||||
|
||||
for thread in threads.into_iter() {
|
||||
thread.join();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -29,7 +29,7 @@ use std::fmt;
|
|||
///
|
||||
/// The ratio between ScreenPx and DevicePixel for a given display be found by calling
|
||||
/// `servo::windowing::WindowMethods::hidpi_factor`.
|
||||
#[deriving(Show)]
|
||||
#[deriving(Show, Copy)]
|
||||
pub enum ScreenPx {}
|
||||
|
||||
/// One CSS "px" in the coordinate system of the "initial viewport":
|
||||
|
@ -41,7 +41,7 @@ pub enum ScreenPx {}
|
|||
///
|
||||
/// At the default zoom level of 100%, one PagePx is equal to one ScreenPx. However, if the
|
||||
/// document is zoomed in or out then this scale may be larger or smaller.
|
||||
#[deriving(Encodable, Show)]
|
||||
#[deriving(Encodable, Show, Copy)]
|
||||
pub enum ViewportPx {}
|
||||
|
||||
/// One CSS "px" in the root coordinate system for the content document.
|
||||
|
@ -50,7 +50,7 @@ pub enum ViewportPx {}
|
|||
/// This is the mobile-style "pinch zoom" that enlarges content without reflowing it. When the
|
||||
/// viewport zoom is not equal to 1.0, then the layout viewport is no longer the same physical size
|
||||
/// as the viewable area.
|
||||
#[deriving(Encodable, Show)]
|
||||
#[deriving(Encodable, Show, Copy)]
|
||||
pub enum PagePx {}
|
||||
|
||||
// In summary, the hierarchy of pixel units and the factors to convert from one to the next:
|
||||
|
@ -65,7 +65,7 @@ pub enum PagePx {}
|
|||
// See https://bugzilla.mozilla.org/show_bug.cgi?id=177805 for more info.
|
||||
//
|
||||
// FIXME: Implement Au using Length and ScaleFactor instead of a custom type.
|
||||
#[deriving(Clone, Hash, PartialEq, PartialOrd, Eq, Ord, Zero)]
|
||||
#[deriving(Clone, Copy, Hash, PartialEq, PartialOrd, Eq, Ord)]
|
||||
pub struct Au(pub i32);
|
||||
|
||||
impl Default for Au {
|
||||
|
|
|
@ -21,7 +21,6 @@ extern crate libc;
|
|||
extern crate rand;
|
||||
extern crate rustrt;
|
||||
extern crate serialize;
|
||||
extern crate sync;
|
||||
#[cfg(target_os="macos")]
|
||||
extern crate task_info;
|
||||
extern crate "time" as std_time;
|
||||
|
@ -40,6 +39,7 @@ pub mod bloom;
|
|||
pub mod cache;
|
||||
pub mod cursor;
|
||||
pub mod debug_utils;
|
||||
pub mod deque;
|
||||
pub mod dlist;
|
||||
pub mod fnv;
|
||||
pub mod geometry;
|
||||
|
@ -50,7 +50,8 @@ pub mod opts;
|
|||
pub mod persistent_list;
|
||||
pub mod range;
|
||||
pub mod resource_files;
|
||||
pub mod rtinstrument;
|
||||
// FIXME: Find replacement for this post-runtime removal
|
||||
// pub mod rtinstrument;
|
||||
pub mod smallvec;
|
||||
pub mod sort;
|
||||
pub mod str;
|
||||
|
|
|
@ -7,10 +7,10 @@
|
|||
use geom::{Size2D, Point2D, SideOffsets2D, Rect};
|
||||
use geom::num::Zero;
|
||||
use std::cmp::{min, max};
|
||||
use std::fmt::{Show, Formatter, FormatError};
|
||||
use std::fmt::{Show, Formatter, Error};
|
||||
|
||||
bitflags!(
|
||||
#[deriving(Encodable)]
|
||||
#[deriving(Encodable, Copy)]
|
||||
flags WritingMode: u8 {
|
||||
const FLAG_RTL = 1 << 0,
|
||||
const FLAG_VERTICAL = 1 << 1,
|
||||
|
@ -49,7 +49,7 @@ impl WritingMode {
|
|||
}
|
||||
|
||||
impl Show for WritingMode {
|
||||
fn fmt(&self, formatter: &mut Formatter) -> Result<(), FormatError> {
|
||||
fn fmt(&self, formatter: &mut Formatter) -> Result<(), Error> {
|
||||
if self.is_vertical() {
|
||||
try!(write!(formatter, "V"));
|
||||
if self.is_vertical_lr() {
|
||||
|
@ -79,11 +79,11 @@ impl Show for WritingMode {
|
|||
/// (in addition to taking it as a parameter to methods) and check it.
|
||||
/// In non-debug builds, make this storage zero-size and the checks no-ops.
|
||||
#[cfg(ndebug)]
|
||||
#[deriving(Encodable, PartialEq, Eq, Clone)]
|
||||
#[deriving(Encodable, PartialEq, Eq, Clone, Copy)]
|
||||
struct DebugWritingMode;
|
||||
|
||||
#[cfg(not(ndebug))]
|
||||
#[deriving(Encodable, PartialEq, Eq, Clone)]
|
||||
#[deriving(Encodable, PartialEq, Eq, Clone, Copy)]
|
||||
struct DebugWritingMode {
|
||||
mode: WritingMode
|
||||
}
|
||||
|
@ -122,19 +122,19 @@ impl DebugWritingMode {
|
|||
|
||||
impl Show for DebugWritingMode {
|
||||
#[cfg(ndebug)]
|
||||
fn fmt(&self, formatter: &mut Formatter) -> Result<(), FormatError> {
|
||||
fn fmt(&self, formatter: &mut Formatter) -> Result<(), Error> {
|
||||
write!(formatter, "?")
|
||||
}
|
||||
|
||||
#[cfg(not(ndebug))]
|
||||
fn fmt(&self, formatter: &mut Formatter) -> Result<(), FormatError> {
|
||||
fn fmt(&self, formatter: &mut Formatter) -> Result<(), Error> {
|
||||
self.mode.fmt(formatter)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// A 2D size in flow-relative dimensions
|
||||
#[deriving(Encodable, PartialEq, Eq, Clone)]
|
||||
#[deriving(Encodable, PartialEq, Eq, Clone, Copy)]
|
||||
pub struct LogicalSize<T> {
|
||||
pub inline: T, // inline-size, a.k.a. logical width, a.k.a. measure
|
||||
pub block: T, // block-size, a.k.a. logical height, a.k.a. extent
|
||||
|
@ -142,7 +142,7 @@ pub struct LogicalSize<T> {
|
|||
}
|
||||
|
||||
impl<T: Show> Show for LogicalSize<T> {
|
||||
fn fmt(&self, formatter: &mut Formatter) -> Result<(), FormatError> {
|
||||
fn fmt(&self, formatter: &mut Formatter) -> Result<(), Error> {
|
||||
write!(formatter, "LogicalSize({}, i{}×b{})",
|
||||
self.debug_writing_mode, self.inline, self.block)
|
||||
}
|
||||
|
@ -266,7 +266,7 @@ impl<T: Sub<T, T>> Sub<LogicalSize<T>, LogicalSize<T>> for LogicalSize<T> {
|
|||
|
||||
|
||||
/// A 2D point in flow-relative dimensions
|
||||
#[deriving(PartialEq, Encodable, Eq, Clone)]
|
||||
#[deriving(PartialEq, Encodable, Eq, Clone, Copy)]
|
||||
pub struct LogicalPoint<T> {
|
||||
pub i: T, /// inline-axis coordinate
|
||||
pub b: T, /// block-axis coordinate
|
||||
|
@ -274,7 +274,7 @@ pub struct LogicalPoint<T> {
|
|||
}
|
||||
|
||||
impl<T: Show> Show for LogicalPoint<T> {
|
||||
fn fmt(&self, formatter: &mut Formatter) -> Result<(), FormatError> {
|
||||
fn fmt(&self, formatter: &mut Formatter) -> Result<(), Error> {
|
||||
write!(formatter, "LogicalPoint({} (i{}, b{}))",
|
||||
self.debug_writing_mode, self.i, self.b)
|
||||
}
|
||||
|
@ -434,7 +434,7 @@ impl<T: Sub<T,T>> Sub<LogicalSize<T>, LogicalPoint<T>> for LogicalPoint<T> {
|
|||
/// Represents the four sides of the margins, borders, or padding of a CSS box,
|
||||
/// or a combination of those.
|
||||
/// A positive "margin" can be added to a rectangle to obtain a bigger rectangle.
|
||||
#[deriving(Encodable, PartialEq, Eq, Clone)]
|
||||
#[deriving(Encodable, PartialEq, Eq, Clone, Copy)]
|
||||
pub struct LogicalMargin<T> {
|
||||
pub block_start: T,
|
||||
pub inline_end: T,
|
||||
|
@ -444,7 +444,7 @@ pub struct LogicalMargin<T> {
|
|||
}
|
||||
|
||||
impl<T: Show> Show for LogicalMargin<T> {
|
||||
fn fmt(&self, formatter: &mut Formatter) -> Result<(), FormatError> {
|
||||
fn fmt(&self, formatter: &mut Formatter) -> Result<(), Error> {
|
||||
write!(formatter,
|
||||
"LogicalMargin({}, inline: {}..{} block: {}..{})",
|
||||
self.debug_writing_mode,
|
||||
|
@ -718,7 +718,7 @@ impl<T: Sub<T, T>> Sub<LogicalMargin<T>, LogicalMargin<T>> for LogicalMargin<T>
|
|||
|
||||
|
||||
/// A rectangle in flow-relative dimensions
|
||||
#[deriving(Encodable, PartialEq, Eq, Clone)]
|
||||
#[deriving(Encodable, PartialEq, Eq, Clone, Copy)]
|
||||
pub struct LogicalRect<T> {
|
||||
pub start: LogicalPoint<T>,
|
||||
pub size: LogicalSize<T>,
|
||||
|
@ -726,7 +726,7 @@ pub struct LogicalRect<T> {
|
|||
}
|
||||
|
||||
impl<T: Show> Show for LogicalRect<T> {
|
||||
fn fmt(&self, formatter: &mut Formatter) -> Result<(), FormatError> {
|
||||
fn fmt(&self, formatter: &mut Formatter) -> Result<(), Error> {
|
||||
write!(formatter,
|
||||
"LogicalRect({}, i{}×b{}, @ (i{},b{}))",
|
||||
self.debug_writing_mode,
|
||||
|
|
|
@ -19,7 +19,7 @@ use std::os;
|
|||
use std::ptr;
|
||||
use std::rt;
|
||||
|
||||
#[deriving(Clone)]
|
||||
#[deriving(Clone, Copy)]
|
||||
pub enum RenderApi {
|
||||
OpenGL,
|
||||
Mesa,
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
//! A persistent, thread-safe singly-linked list.
|
||||
|
||||
use std::mem;
|
||||
use sync::Arc;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub struct PersistentList<T> {
|
||||
head: PersistentListLink<T>,
|
||||
|
|
|
@ -26,7 +26,7 @@ impl RangeIndex<int> for int {
|
|||
#[macro_export]
|
||||
macro_rules! int_range_index {
|
||||
($(#[$attr:meta])* struct $Self:ident($T:ty)) => (
|
||||
#[deriving(Clone, PartialEq, PartialOrd, Eq, Ord, Show)]
|
||||
#[deriving(Clone, PartialEq, PartialOrd, Eq, Ord, Show, Copy)]
|
||||
$(#[$attr])*
|
||||
pub struct $Self(pub $T);
|
||||
|
||||
|
@ -175,7 +175,7 @@ macro_rules! int_range_index {
|
|||
}
|
||||
|
||||
/// A range of indices
|
||||
#[deriving(Clone, Encodable)]
|
||||
#[deriving(Clone, Encodable, Copy)]
|
||||
pub struct Range<I> {
|
||||
begin: I,
|
||||
length: I,
|
||||
|
|
|
@ -13,7 +13,7 @@ use std::rt::local::Local;
|
|||
//use std::rt::rtio;
|
||||
use std::rt::task::{Task, TaskOpts, BlockedTask};
|
||||
use std_time;
|
||||
use sync::Mutex;
|
||||
use std::sync::Mutex;
|
||||
#[cfg(not(test))]
|
||||
use serialize::json;
|
||||
|
||||
|
|
|
@ -57,11 +57,11 @@ pub fn is_whitespace(s: &str) -> bool {
|
|||
///
|
||||
/// http://www.whatwg.org/specs/web-apps/current-work/multipage/common-microsyntaxes.html#space-character
|
||||
pub static HTML_SPACE_CHARACTERS: StaticCharVec = &[
|
||||
'\u0020',
|
||||
'\u0009',
|
||||
'\u000a',
|
||||
'\u000c',
|
||||
'\u000d',
|
||||
'\u{0020}',
|
||||
'\u{0009}',
|
||||
'\u{000a}',
|
||||
'\u{000c}',
|
||||
'\u{000d}',
|
||||
];
|
||||
|
||||
pub fn split_html_space_chars<'a>(s: &'a str)
|
||||
|
@ -131,6 +131,7 @@ pub fn parse_unsigned_integer<T: Iterator<char>>(input: T) -> Option<u32> {
|
|||
})
|
||||
}
|
||||
|
||||
#[deriving(Copy)]
|
||||
pub enum LengthOrPercentageOrAuto {
|
||||
Auto,
|
||||
Percentage(f64),
|
||||
|
|
|
@ -2,17 +2,17 @@
|
|||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use std::str::IntoMaybeOwned;
|
||||
use std::task;
|
||||
use std::comm::Sender;
|
||||
use std::task::TaskBuilder;
|
||||
use rtinstrument;
|
||||
// use rtinstrument;
|
||||
use task_state;
|
||||
|
||||
pub fn spawn_named<S: IntoMaybeOwned<'static>>(name: S, f: proc():Send) {
|
||||
pub fn spawn_named<S: IntoCow<'static, String, str>>(name: S, f: proc():Send) {
|
||||
let builder = task::TaskBuilder::new().named(name);
|
||||
builder.spawn(proc() {
|
||||
rtinstrument::instrument(f);
|
||||
// rtinstrument::instrument(f);
|
||||
f();
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -24,13 +24,15 @@ pub fn spawn_named_with_send_on_failure<T: Send>(name: &'static str,
|
|||
dest: Sender<T>) {
|
||||
let future_result = TaskBuilder::new().named(name).try_future(proc() {
|
||||
task_state::initialize(state);
|
||||
rtinstrument::instrument(f);
|
||||
// FIXME: Find replacement for this post-runtime removal
|
||||
// rtinstrument::instrument(f);
|
||||
f();
|
||||
});
|
||||
|
||||
let watched_name = name.into_string();
|
||||
let watcher_name = format!("{}Watcher", watched_name);
|
||||
TaskBuilder::new().named(watcher_name).spawn(proc() {
|
||||
rtinstrument::instrument(proc() {
|
||||
//rtinstrument::instrument(proc() {
|
||||
match future_result.unwrap() {
|
||||
Ok(()) => (),
|
||||
Err(..) => {
|
||||
|
@ -38,6 +40,6 @@ pub fn spawn_named_with_send_on_failure<T: Send>(name: &'static str,
|
|||
dest.send(msg);
|
||||
}
|
||||
}
|
||||
});
|
||||
//});
|
||||
});
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
pub use self::imp::{initialize, get, enter, exit};
|
||||
|
||||
bitflags! {
|
||||
#[deriving(Show)]
|
||||
#[deriving(Show, Copy)]
|
||||
flags TaskState: u32 {
|
||||
const SCRIPT = 0x01,
|
||||
const LAYOUT = 0x02,
|
||||
|
@ -46,22 +46,28 @@ task_types! {
|
|||
#[cfg(not(ndebug))]
|
||||
mod imp {
|
||||
use super::{TaskState, TYPES};
|
||||
use std::cell::RefCell;
|
||||
|
||||
local_data_key!(STATE: TaskState)
|
||||
thread_local!(static STATE: RefCell<Option<TaskState>> = RefCell::new(None))
|
||||
|
||||
pub fn initialize(x: TaskState) {
|
||||
match STATE.replace(Some(x)) {
|
||||
None => (),
|
||||
Some(s) => panic!("Task state already initialized as {}", s),
|
||||
};
|
||||
STATE.with(|ref k| {
|
||||
match *k.borrow() {
|
||||
Some(s) => panic!("Task state already initialized as {}", s),
|
||||
None => ()
|
||||
};
|
||||
*k.borrow_mut() = Some(x);
|
||||
});
|
||||
get(); // check the assertion below
|
||||
}
|
||||
|
||||
pub fn get() -> TaskState {
|
||||
let state = match STATE.get() {
|
||||
None => panic!("Task state not initialized"),
|
||||
Some(s) => *s,
|
||||
};
|
||||
let state = STATE.with(|ref k| {
|
||||
match *k.borrow() {
|
||||
None => panic!("Task state not initialized"),
|
||||
Some(s) => s,
|
||||
}
|
||||
});
|
||||
|
||||
// Exactly one of the task type flags should be set.
|
||||
assert_eq!(1, TYPES.iter().filter(|&&ty| state.contains(ty)).count());
|
||||
|
@ -71,13 +77,17 @@ mod imp {
|
|||
pub fn enter(x: TaskState) {
|
||||
let state = get();
|
||||
assert!(!state.intersects(x));
|
||||
STATE.replace(Some(state | x));
|
||||
STATE.with(|ref k| {
|
||||
*k.borrow_mut() = Some(state | x);
|
||||
})
|
||||
}
|
||||
|
||||
pub fn exit(x: TaskState) {
|
||||
let state = get();
|
||||
assert!(state.contains(x));
|
||||
STATE.replace(Some(state & !x));
|
||||
STATE.with(|ref k| {
|
||||
*k.borrow_mut() = Some(state & !x);
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3,20 +3,24 @@
|
|||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use std::sync::atomic::{AtomicUint, INIT_ATOMIC_UINT, SeqCst};
|
||||
use std::rc::Rc;
|
||||
use std::cell::RefCell;
|
||||
|
||||
static mut next_tid: AtomicUint = INIT_ATOMIC_UINT;
|
||||
|
||||
local_data_key!(task_local_tid: uint)
|
||||
thread_local!(static task_local_tid: Rc<RefCell<Option<uint>>> = Rc::new(RefCell::new(None)))
|
||||
|
||||
/// Every task gets one, that's unique.
|
||||
pub fn tid() -> uint {
|
||||
let ret =
|
||||
match task_local_tid.replace(None) {
|
||||
None => unsafe { next_tid.fetch_add(1, SeqCst) },
|
||||
Some(x) => x,
|
||||
};
|
||||
task_local_tid.with(|ref k| {
|
||||
let ret =
|
||||
match *k.borrow() {
|
||||
None => unsafe { next_tid.fetch_add(1, SeqCst) },
|
||||
Some(x) => x,
|
||||
};
|
||||
|
||||
task_local_tid.replace(Some(ret));
|
||||
*k.borrow_mut() = Some(ret);
|
||||
|
||||
ret
|
||||
ret
|
||||
})
|
||||
}
|
||||
|
|
|
@ -101,12 +101,12 @@ fn should_find_all_elements() {
|
|||
let arr_two = [3044u32, 8393];
|
||||
let arr_three = [12u32, 23, 34];
|
||||
|
||||
test_find_all_elems(arr_odd);
|
||||
test_find_all_elems(arr_even);
|
||||
test_find_all_elems(arr_double);
|
||||
test_find_all_elems(arr_one);
|
||||
test_find_all_elems(arr_two);
|
||||
test_find_all_elems(arr_three);
|
||||
test_find_all_elems(&arr_odd);
|
||||
test_find_all_elems(&arr_even);
|
||||
test_find_all_elems(&arr_double);
|
||||
test_find_all_elems(&arr_one);
|
||||
test_find_all_elems(&arr_two);
|
||||
test_find_all_elems(&arr_three);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -118,10 +118,10 @@ fn should_not_find_missing_elements() {
|
|||
let arr_two = [3044u32, 8393];
|
||||
let arr_three = [12u32, 23, 34];
|
||||
|
||||
test_miss_all_elems(arr_odd, [-22, 0, 3, 5, 34938, 10, 11, 12]);
|
||||
test_miss_all_elems(arr_even, [-1, 0, 3, 34938, 10, 11, 12]);
|
||||
test_miss_all_elems(arr_double, [-1, 0, 3, 4, 34938, 10, 11, 12, 234, 234, 33]);
|
||||
test_miss_all_elems(arr_one, [-1, 0, 3, 34938, 10, 11, 12, 234, 234, 33]);
|
||||
test_miss_all_elems(arr_two, [-1, 0, 3, 34938, 10, 11, 12, 234, 234, 33]);
|
||||
test_miss_all_elems(arr_three, [-2, 0, 1, 2, 3, 34938, 10, 11, 234, 234, 33]);
|
||||
test_miss_all_elems(&arr_odd, &[-22, 0, 3, 5, 34938, 10, 11, 12]);
|
||||
test_miss_all_elems(&arr_even, &[-1, 0, 3, 34938, 10, 11, 12]);
|
||||
test_miss_all_elems(&arr_double, &[-1, 0, 3, 4, 34938, 10, 11, 12, 234, 234, 33]);
|
||||
test_miss_all_elems(&arr_one, &[-1, 0, 3, 34938, 10, 11, 12, 234, 234, 33]);
|
||||
test_miss_all_elems(&arr_two, &[-1, 0, 3, 34938, 10, 11, 12, 234, 234, 33]);
|
||||
test_miss_all_elems(&arr_three, &[-2, 0, 1, 2, 3, 34938, 10, 11, 234, 234, 33]);
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ use rand::{Rng, XorShiftRng};
|
|||
use std::mem;
|
||||
use std::rand::weak_rng;
|
||||
use std::sync::atomic::{AtomicUint, SeqCst};
|
||||
use std::sync::deque::{Abort, BufferPool, Data, Empty, Stealer, Worker};
|
||||
use deque::{Abort, BufferPool, Data, Empty, Stealer, Worker};
|
||||
|
||||
/// A unit of work.
|
||||
///
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue