auto merge of #2347 : Ms2ger/servo/vec, r=jdm

This commit is contained in:
bors-servo 2014-05-06 17:19:25 -04:00
commit c013d2a211
10 changed files with 118 additions and 112 deletions

View file

@ -18,7 +18,7 @@ pub type FontFamilyMap = HashMap<~str, FontFamily>;
trait FontListHandleMethods { trait FontListHandleMethods {
fn get_available_families(&self, fctx: &FontContextHandle) -> FontFamilyMap; fn get_available_families(&self, fctx: &FontContextHandle) -> FontFamilyMap;
fn load_variations_for_family(&self, family: &mut FontFamily); fn load_variations_for_family(&self, family: &mut FontFamily);
fn get_last_resort_font_families() -> ~[~str]; fn get_last_resort_font_families() -> Vec<~str>;
} }
/// The platform-independent font list abstraction. /// The platform-independent font list abstraction.
@ -75,9 +75,8 @@ impl FontList {
} }
} }
pub fn get_last_resort_font_families() -> ~[~str] { pub fn get_last_resort_font_families() -> Vec<~str> {
let last_resort = FontListHandle::get_last_resort_font_families(); FontListHandle::get_last_resort_font_families()
last_resort
} }
} }

View file

@ -129,8 +129,8 @@ impl FontListHandle {
} }
} }
pub fn get_last_resort_font_families() -> ~[~str] { pub fn get_last_resort_font_families() -> Vec<~str> {
~["Roboto".to_owned()] vec!("Roboto".to_owned())
} }
} }

View file

@ -131,8 +131,8 @@ impl FontListHandle {
} }
} }
pub fn get_last_resort_font_families() -> ~[~str] { pub fn get_last_resort_font_families() -> Vec<~str> {
~["Arial".to_owned()] vec!("Arial".to_owned())
} }
} }

View file

@ -58,7 +58,7 @@ impl FontListHandle {
} }
} }
pub fn get_last_resort_font_families() -> ~[~str] { pub fn get_last_resort_font_families() -> Vec<~str> {
~["Arial Unicode MS".to_owned(),"Arial".to_owned()] vec!("Arial Unicode MS".to_owned(), "Arial".to_owned())
} }
} }

View file

@ -138,49 +138,53 @@ fn test_true_type_tag() {
#[test] #[test]
fn test_transform_compress_none() { fn test_transform_compress_none() {
let test_strs = vec!(
let test_strs : ~[~str] = ~[" foo bar".to_owned(), " foo bar",
"foo bar ".to_owned(), "foo bar ",
"foo\n bar".to_owned(), "foo\n bar",
"foo \nbar".to_owned(), "foo \nbar",
" foo bar \nbaz".to_owned(), " foo bar \nbaz",
"foo bar baz".to_owned(), "foo bar baz",
"foobarbaz\n\n".to_owned()]; "foobarbaz\n\n"
);
let mode = CompressNone; let mode = CompressNone;
for i in range(0, test_strs.len()) { for test in test_strs.iter() {
let mut new_line_pos = vec!(); let mut new_line_pos = vec!();
let (trimmed_str, _out) = transform_text(test_strs[i], mode, true, &mut new_line_pos); let (trimmed_str, _out) = transform_text(*test, mode, true, &mut new_line_pos);
assert_eq!(&trimmed_str, &test_strs[i]) assert_eq!(trimmed_str.as_slice(), *test)
} }
} }
#[test] #[test]
fn test_transform_discard_newline() { fn test_transform_discard_newline() {
let test_strs = vec!(
" foo bar",
"foo bar ",
"foo\n bar",
"foo \nbar",
" foo bar \nbaz",
"foo bar baz",
"foobarbaz\n\n"
);
let test_strs : ~[~str] = ~[" foo bar".to_owned(), let oracle_strs = vec!(
"foo bar ".to_owned(), " foo bar",
"foo\n bar".to_owned(), "foo bar ",
"foo \nbar".to_owned(), "foo bar",
" foo bar \nbaz".to_owned(), "foo bar",
"foo bar baz".to_owned(), " foo bar baz",
"foobarbaz\n\n".to_owned()]; "foo bar baz",
"foobarbaz"
let oracle_strs : ~[~str] = ~[" foo bar".to_owned(), );
"foo bar ".to_owned(),
"foo bar".to_owned(),
"foo bar".to_owned(),
" foo bar baz".to_owned(),
"foo bar baz".to_owned(),
"foobarbaz".to_owned()];
assert_eq!(test_strs.len(), oracle_strs.len()); assert_eq!(test_strs.len(), oracle_strs.len());
let mode = DiscardNewline; let mode = DiscardNewline;
for i in range(0, test_strs.len()) { for (test, oracle) in test_strs.iter().zip(oracle_strs.iter()) {
let mut new_line_pos = vec!(); let mut new_line_pos = vec!();
let (trimmed_str, _out) = transform_text(test_strs[i], mode, true, &mut new_line_pos); let (trimmed_str, _out) = transform_text(*test, mode, true, &mut new_line_pos);
assert_eq!(&trimmed_str, &oracle_strs[i]) assert_eq!(trimmed_str.as_slice(), *oracle)
} }
} }
@ -244,30 +248,34 @@ fn test_transform_compress_whitespace_newline() {
#[test] #[test]
fn test_transform_compress_whitespace_newline_no_incoming() { fn test_transform_compress_whitespace_newline_no_incoming() {
let test_strs : ~[~str] = ~[" foo bar".to_owned(), let test_strs = vec!(
"\nfoo bar".to_owned(), " foo bar",
"foo bar ".to_owned(), "\nfoo bar",
"foo\n bar".to_owned(), "foo bar ",
"foo \nbar".to_owned(), "foo\n bar",
" foo bar \nbaz".to_owned(), "foo \nbar",
"foo bar baz".to_owned(), " foo bar \nbaz",
"foobarbaz\n\n".to_owned()]; "foo bar baz",
"foobarbaz\n\n"
);
let oracle_strs : ~[~str] = ~[" foo bar".to_owned(), let oracle_strs = vec!(
" foo bar".to_owned(), " foo bar",
"foo bar ".to_owned(), " foo bar",
"foo bar".to_owned(), "foo bar ",
"foo bar".to_owned(), "foo bar",
" foo bar baz".to_owned(), "foo bar",
"foo bar baz".to_owned(), " foo bar baz",
"foobarbaz ".to_owned()]; "foo bar baz",
"foobarbaz "
);
assert_eq!(test_strs.len(), oracle_strs.len()); assert_eq!(test_strs.len(), oracle_strs.len());
let mode = CompressWhitespaceNewline; let mode = CompressWhitespaceNewline;
for i in range(0, test_strs.len()) { for (test, oracle) in test_strs.iter().zip(oracle_strs.iter()) {
let mut new_line_pos = vec!(); let mut new_line_pos = vec!();
let (trimmed_str, _out) = transform_text(test_strs[i], mode, false, &mut new_line_pos); let (trimmed_str, _out) = transform_text(*test, mode, false, &mut new_line_pos);
assert_eq!(&trimmed_str, &oracle_strs[i]) assert_eq!(trimmed_str.as_slice(), *oracle)
} }
} }

View file

@ -81,7 +81,7 @@ impl RenderListener for CompositorChan {
fn initialize_layers_for_pipeline(&self, fn initialize_layers_for_pipeline(&self,
pipeline_id: PipelineId, pipeline_id: PipelineId,
metadata: ~[LayerMetadata], metadata: Vec<LayerMetadata>,
epoch: Epoch) { epoch: Epoch) {
// FIXME(#2004, pcwalton): This assumes that the first layer determines the page size, and // FIXME(#2004, pcwalton): This assumes that the first layer determines the page size, and
// that all other layers are immediate children of it. This is sufficient to handle // that all other layers are immediate children of it. This is sufficient to handle

View file

@ -126,7 +126,7 @@ pub trait RenderListener {
/// creating and/or destroying render layers as necessary. /// creating and/or destroying render layers as necessary.
fn initialize_layers_for_pipeline(&self, fn initialize_layers_for_pipeline(&self,
pipeline_id: PipelineId, pipeline_id: PipelineId,
metadata: ~[LayerMetadata], metadata: Vec<LayerMetadata>,
epoch: Epoch); epoch: Epoch);
fn set_layer_clip_rect(&self, fn set_layer_clip_rect(&self,

View file

@ -126,15 +126,15 @@ type LoaderTaskFactory = extern "Rust" fn() -> LoaderTask;
/// Create a ResourceTask with the default loaders /// Create a ResourceTask with the default loaders
pub fn ResourceTask() -> ResourceTask { pub fn ResourceTask() -> ResourceTask {
let loaders = ~[ let loaders = vec!(
("file".to_owned(), file_loader::factory), ("file".to_owned(), file_loader::factory),
("http".to_owned(), http_loader::factory), ("http".to_owned(), http_loader::factory),
("data".to_owned(), data_loader::factory), ("data".to_owned(), data_loader::factory),
]; );
create_resource_task_with_loaders(loaders) create_resource_task_with_loaders(loaders)
} }
fn create_resource_task_with_loaders(loaders: ~[(~str, LoaderTaskFactory)]) -> ResourceTask { fn create_resource_task_with_loaders(loaders: Vec<(~str, LoaderTaskFactory)>) -> ResourceTask {
let (setup_chan, setup_port) = channel(); let (setup_chan, setup_port) = channel();
let builder = task::task().named("ResourceManager"); let builder = task::task().named("ResourceManager");
builder.spawn(proc() { builder.spawn(proc() {
@ -148,12 +148,12 @@ fn create_resource_task_with_loaders(loaders: ~[(~str, LoaderTaskFactory)]) -> R
struct ResourceManager { struct ResourceManager {
from_client: Receiver<ControlMsg>, from_client: Receiver<ControlMsg>,
/// Per-scheme resource loaders /// Per-scheme resource loaders
loaders: ~[(~str, LoaderTaskFactory)], loaders: Vec<(~str, LoaderTaskFactory)>,
} }
fn ResourceManager(from_client: Receiver<ControlMsg>, fn ResourceManager(from_client: Receiver<ControlMsg>,
loaders: ~[(~str, LoaderTaskFactory)]) -> ResourceManager { loaders: Vec<(~str, LoaderTaskFactory)>) -> ResourceManager {
ResourceManager { ResourceManager {
from_client : from_client, from_client : from_client,
loaders : loaders, loaders : loaders,
@ -236,7 +236,7 @@ fn snicklefritz_loader_factory() -> LoaderTask {
#[test] #[test]
fn should_delegate_to_scheme_loader() { fn should_delegate_to_scheme_loader() {
let loader_factories = ~[("snicklefritz".to_owned(), snicklefritz_loader_factory)]; let loader_factories = vec!(("snicklefritz".to_owned(), snicklefritz_loader_factory));
let resource_task = create_resource_task_with_loaders(loader_factories); let resource_task = create_resource_task_with_loaders(loader_factories);
let (start_chan, start) = channel(); let (start_chan, start) = channel();
resource_task.send(Load(FromStr::from_str("snicklefritz://heya").unwrap(), start_chan)); resource_task.send(Load(FromStr::from_str("snicklefritz://heya").unwrap(), start_chan));

View file

@ -12,7 +12,6 @@ use std::ptr;
use std::sync::atomics::{AtomicUint, Relaxed, SeqCst}; use std::sync::atomics::{AtomicUint, Relaxed, SeqCst};
use std::unstable::mutex::StaticNativeMutex; use std::unstable::mutex::StaticNativeMutex;
use std::mem; use std::mem;
use std::slice;
/// When the size exceeds (number of buckets * LOAD_NUMERATOR/LOAD_DENOMINATOR), the hash table /// When the size exceeds (number of buckets * LOAD_NUMERATOR/LOAD_DENOMINATOR), the hash table
/// grows. /// grows.
@ -38,9 +37,9 @@ pub struct ConcurrentHashMap<K,V> {
/// The number of elements in this hash table. /// The number of elements in this hash table.
size: AtomicUint, size: AtomicUint,
/// The striped locks. /// The striped locks.
locks: ~[StaticNativeMutex], locks: Vec<StaticNativeMutex>,
/// The buckets. /// The buckets.
buckets: ~[Option<Bucket<K,V>>], buckets: Vec<Option<Bucket<K,V>>>,
} }
impl<K:Hash + Eq,V> ConcurrentHashMap<K,V> { impl<K:Hash + Eq,V> ConcurrentHashMap<K,V> {
@ -57,12 +56,12 @@ impl<K:Hash + Eq,V> ConcurrentHashMap<K,V> {
k0: rand.gen(), k0: rand.gen(),
k1: rand.gen(), k1: rand.gen(),
size: AtomicUint::new(0), size: AtomicUint::new(0),
locks: slice::from_fn(lock_count, |_| { locks: Vec::from_fn(lock_count, |_| {
unsafe { unsafe {
StaticNativeMutex::new() StaticNativeMutex::new()
} }
}), }),
buckets: slice::from_fn(lock_count * buckets_per_lock, |_| None), buckets: Vec::from_fn(lock_count * buckets_per_lock, |_| None),
} }
} }
@ -75,7 +74,7 @@ impl<K:Hash + Eq,V> ConcurrentHashMap<K,V> {
loop { loop {
let (bucket_index, lock_index) = self.bucket_and_lock_indices(&key); let (bucket_index, lock_index) = self.bucket_and_lock_indices(&key);
if this.overloaded() { if this.overloaded() {
this.locks[lock_index].unlock_noguard(); this.locks.get(lock_index).unlock_noguard();
this.try_resize(self.buckets_per_lock() * 2); this.try_resize(self.buckets_per_lock() * 2);
// Have to retry because the bucket and lock indices will have shifted. // Have to retry because the bucket and lock indices will have shifted.
@ -83,7 +82,7 @@ impl<K:Hash + Eq,V> ConcurrentHashMap<K,V> {
} }
this.insert_unlocked(key, value, Some(bucket_index)); this.insert_unlocked(key, value, Some(bucket_index));
this.locks[lock_index].unlock_noguard(); this.locks.get(lock_index).unlock_noguard();
break break
} }
} }
@ -98,16 +97,9 @@ impl<K:Hash + Eq,V> ConcurrentHashMap<K,V> {
None => self.bucket_index_unlocked(&key), None => self.bucket_index_unlocked(&key),
}; };
match this.buckets[bucket_index] { match this.buckets.get_mut(bucket_index) {
None => { &None => {}
this.buckets[bucket_index] = Some(Bucket { &Some(ref mut bucket) => {
next: None,
key: key,
value: value,
});
drop(this.size.fetch_add(1, SeqCst));
}
Some(ref mut bucket) => {
// Search to try to find a value. // Search to try to find a value.
let mut bucket: *mut Bucket<K,V> = bucket; let mut bucket: *mut Bucket<K,V> = bucket;
loop { loop {
@ -132,8 +124,15 @@ impl<K:Hash + Eq,V> ConcurrentHashMap<K,V> {
drop(this.size.fetch_add(1, SeqCst)); drop(this.size.fetch_add(1, SeqCst));
break break
} }
return;
} }
} }
*this.buckets.get_mut(bucket_index) = Some(Bucket {
next: None,
key: key,
value: value,
});
drop(this.size.fetch_add(1, SeqCst));
} }
/// Removes the given key from the hash table. /// Removes the given key from the hash table.
@ -146,9 +145,9 @@ impl<K:Hash + Eq,V> ConcurrentHashMap<K,V> {
// Rebuild the bucket. // Rebuild the bucket.
let mut nuke_bucket = false; let mut nuke_bucket = false;
match this.buckets[bucket_index] { match this.buckets.get_mut(bucket_index) {
None => {} &None => {}
Some(ref mut bucket) if bucket.key == *key => { &Some(ref mut bucket) if bucket.key == *key => {
// Common case (assuming a sparse table): If the key is the first one in the // Common case (assuming a sparse table): If the key is the first one in the
// chain, just copy the next fields over. // chain, just copy the next fields over.
let next_opt = mem::replace(&mut bucket.next, None); let next_opt = mem::replace(&mut bucket.next, None);
@ -158,7 +157,7 @@ impl<K:Hash + Eq,V> ConcurrentHashMap<K,V> {
} }
drop(this.size.fetch_sub(1, SeqCst)) drop(this.size.fetch_sub(1, SeqCst))
} }
Some(ref mut bucket) => { &Some(ref mut bucket) => {
// Rarer case: If the key is elsewhere in the chain (or nowhere), then search for // Rarer case: If the key is elsewhere in the chain (or nowhere), then search for
// it and just stitch up pointers. // it and just stitch up pointers.
let mut prev: *mut Bucket<K,V> = bucket; let mut prev: *mut Bucket<K,V> = bucket;
@ -188,11 +187,11 @@ impl<K:Hash + Eq,V> ConcurrentHashMap<K,V> {
} }
} }
if nuke_bucket { if nuke_bucket {
this.buckets[bucket_index] = None *this.buckets.get_mut(bucket_index) = None
} }
unsafe { unsafe {
this.locks[lock_index].unlock_noguard() this.locks.get(lock_index).unlock_noguard()
} }
} }
@ -214,9 +213,9 @@ impl<K:Hash + Eq,V> ConcurrentHashMap<K,V> {
let (bucket_index, lock_index) = this.bucket_and_lock_indices(key); let (bucket_index, lock_index) = this.bucket_and_lock_indices(key);
let result; let result;
match this.buckets[bucket_index] { match this.buckets.get(bucket_index) {
None => result = false, &None => result = false,
Some(ref bucket) => { &Some(ref bucket) => {
// Search to try to find a value. // Search to try to find a value.
let mut bucket = bucket; let mut bucket = bucket;
loop { loop {
@ -236,7 +235,7 @@ impl<K:Hash + Eq,V> ConcurrentHashMap<K,V> {
} }
unsafe { unsafe {
this.locks[lock_index].unlock_noguard() this.locks.get(lock_index).unlock_noguard()
} }
result result
@ -256,7 +255,7 @@ impl<K:Hash + Eq,V> ConcurrentHashMap<K,V> {
stripe_index += 1; stripe_index += 1;
if stripe_index == buckets_per_lock { if stripe_index == buckets_per_lock {
unsafe { unsafe {
this.locks[lock_index].unlock_noguard(); this.locks.get(lock_index).unlock_noguard();
} }
stripe_index = 0; stripe_index = 0;
@ -264,7 +263,7 @@ impl<K:Hash + Eq,V> ConcurrentHashMap<K,V> {
} }
if stripe_index == 0 { if stripe_index == 0 {
unsafe { unsafe {
this.locks[lock_index].lock_noguard() this.locks.get(lock_index).lock_noguard()
} }
} }
@ -295,7 +294,7 @@ impl<K:Hash + Eq,V> ConcurrentHashMap<K,V> {
let new_bucket_count = lock_count * new_buckets_per_lock; let new_bucket_count = lock_count * new_buckets_per_lock;
if new_bucket_count > this.buckets.len() { if new_bucket_count > this.buckets.len() {
// Create a new set of buckets. // Create a new set of buckets.
let mut buckets = slice::from_fn(new_bucket_count, |_| None); let mut buckets = Vec::from_fn(new_bucket_count, |_| None);
mem::swap(&mut this.buckets, &mut buckets); mem::swap(&mut this.buckets, &mut buckets);
this.size.store(0, Relaxed); this.size.store(0, Relaxed);
@ -360,7 +359,7 @@ impl<K:Hash + Eq,V> ConcurrentHashMap<K,V> {
bucket_index = hash as uint % bucket_count; bucket_index = hash as uint % bucket_count;
lock_index = bucket_index / buckets_per_lock; lock_index = bucket_index / buckets_per_lock;
unsafe { unsafe {
this.locks[lock_index].lock_noguard(); this.locks.get(lock_index).lock_noguard();
} }
let new_bucket_count = this.buckets.len(); let new_bucket_count = this.buckets.len();
if bucket_count == new_bucket_count { if bucket_count == new_bucket_count {
@ -369,7 +368,7 @@ impl<K:Hash + Eq,V> ConcurrentHashMap<K,V> {
// If we got here, the hash table resized from under us: try again. // If we got here, the hash table resized from under us: try again.
unsafe { unsafe {
this.locks[lock_index].unlock_noguard() this.locks.get(lock_index).unlock_noguard()
} }
} }
@ -447,12 +446,12 @@ impl<'a,K,V> Iterator<(&'a K, &'a V)> for ConcurrentHashMapIterator<'a,K,V> {
// necessary and acquire the new one, if necessary. // necessary and acquire the new one, if necessary.
if bucket_index != -1 { if bucket_index != -1 {
unsafe { unsafe {
map.locks[lock_index as uint].unlock_noguard() map.locks.get(lock_index as uint).unlock_noguard()
} }
} }
if bucket_index != (bucket_count as int) - 1 { if bucket_index != (bucket_count as int) - 1 {
unsafe { unsafe {
map.locks[(lock_index + 1) as uint].lock_noguard() map.locks.get((lock_index + 1) as uint).lock_noguard()
} }
} }
} }
@ -464,9 +463,9 @@ impl<'a,K,V> Iterator<(&'a K, &'a V)> for ConcurrentHashMapIterator<'a,K,V> {
self.bucket_index += 1; self.bucket_index += 1;
self.current_bucket = match map.buckets[self.bucket_index as uint] { self.current_bucket = match map.buckets.get(self.bucket_index as uint) {
None => ptr::null(), &None => ptr::null(),
Some(ref bucket) => { &Some(ref bucket) => {
let bucket: *Bucket<K,V> = bucket; let bucket: *Bucket<K,V> = bucket;
bucket bucket
} }

View file

@ -68,7 +68,7 @@ struct WorkerThread<QUD,WUD> {
/// The communication channel on which messages are sent to the supervisor. /// The communication channel on which messages are sent to the supervisor.
chan: Sender<SupervisorMsg<QUD,WUD>>, chan: Sender<SupervisorMsg<QUD,WUD>>,
/// The thief end of the work-stealing deque for all other workers. /// The thief end of the work-stealing deque for all other workers.
other_deques: ~[Stealer<WorkUnit<QUD,WUD>>], other_deques: Vec<Stealer<WorkUnit<QUD,WUD>>>,
/// The random number generator for this worker. /// The random number generator for this worker.
rng: XorShiftRng, rng: XorShiftRng,
} }
@ -104,7 +104,7 @@ impl<QUD:Send,WUD:Send> WorkerThread<QUD,WUD> {
let mut should_continue = true; let mut should_continue = true;
loop { loop {
let victim = (self.rng.next_u32() as uint) % self.other_deques.len(); let victim = (self.rng.next_u32() as uint) % self.other_deques.len();
match self.other_deques[victim].steal() { match self.other_deques.get_mut(victim).steal() {
Empty | Abort => { Empty | Abort => {
// Continue. // Continue.
} }
@ -189,7 +189,7 @@ impl<'a,QUD,WUD:Send> WorkerProxy<'a,QUD,WUD> {
/// A work queue on which units of work can be submitted. /// A work queue on which units of work can be submitted.
pub struct WorkQueue<QUD,WUD> { pub struct WorkQueue<QUD,WUD> {
/// Information about each of the workers. /// Information about each of the workers.
workers: ~[WorkerInfo<QUD,WUD>], workers: Vec<WorkerInfo<QUD,WUD>>,
/// A port on which deques can be received from the workers. /// A port on which deques can be received from the workers.
port: Receiver<SupervisorMsg<QUD,WUD>>, port: Receiver<SupervisorMsg<QUD,WUD>>,
/// The amount of work that has been enqueued. /// The amount of work that has been enqueued.
@ -204,7 +204,7 @@ impl<QUD:Send,WUD:Send> WorkQueue<QUD,WUD> {
pub fn new(task_name: &'static str, thread_count: uint, user_data: QUD) -> WorkQueue<QUD,WUD> { pub fn new(task_name: &'static str, thread_count: uint, user_data: QUD) -> WorkQueue<QUD,WUD> {
// Set up data structures. // Set up data structures.
let (supervisor_chan, supervisor_port) = channel(); let (supervisor_chan, supervisor_port) = channel();
let (mut infos, mut threads) = (~[], ~[]); let (mut infos, mut threads) = (vec!(), vec!());
for i in range(0, thread_count) { for i in range(0, thread_count) {
let (worker_chan, worker_port) = channel(); let (worker_chan, worker_port) = channel();
let mut pool = BufferPool::new(); let mut pool = BufferPool::new();
@ -219,7 +219,7 @@ impl<QUD:Send,WUD:Send> WorkQueue<QUD,WUD> {
index: i, index: i,
port: worker_port, port: worker_port,
chan: supervisor_chan.clone(), chan: supervisor_chan.clone(),
other_deques: ~[], other_deques: vec!(),
rng: rand::weak_rng(), rng: rand::weak_rng(),
}); });
} }
@ -228,10 +228,10 @@ impl<QUD:Send,WUD:Send> WorkQueue<QUD,WUD> {
for i in range(0, thread_count) { for i in range(0, thread_count) {
for j in range(0, thread_count) { for j in range(0, thread_count) {
if i != j { if i != j {
threads[i].other_deques.push(infos[j].thief.clone()) threads.get_mut(i).other_deques.push(infos.get(j).thief.clone())
} }
} }
assert!(threads[i].other_deques.len() == thread_count - 1) assert!(threads.get(i).other_deques.len() == thread_count - 1)
} }
// Spawn threads. // Spawn threads.
@ -255,7 +255,7 @@ impl<QUD:Send,WUD:Send> WorkQueue<QUD,WUD> {
/// Enqueues a block into the work queue. /// Enqueues a block into the work queue.
#[inline] #[inline]
pub fn push(&mut self, work_unit: WorkUnit<QUD,WUD>) { pub fn push(&mut self, work_unit: WorkUnit<QUD,WUD>) {
match self.workers[0].deque { match self.workers.get_mut(0).deque {
None => { None => {
fail!("tried to push a block but we don't have the deque?!") fail!("tried to push a block but we don't have the deque?!")
} }
@ -284,7 +284,7 @@ impl<QUD:Send,WUD:Send> WorkQueue<QUD,WUD> {
// Get our deques back. // Get our deques back.
for _ in range(0, self.workers.len()) { for _ in range(0, self.workers.len()) {
match self.port.recv() { match self.port.recv() {
ReturnDequeMsg(index, deque) => self.workers[index].deque = Some(deque), ReturnDequeMsg(index, deque) => self.workers.get_mut(index).deque = Some(deque),
FinishedMsg => fail!("unexpected finished message!"), FinishedMsg => fail!("unexpected finished message!"),
} }
} }