diff --git a/components/util/cache.rs b/components/util/cache.rs index b3f2aaf55c4..757a6d60c20 100644 --- a/components/util/cache.rs +++ b/components/util/cache.rs @@ -59,7 +59,7 @@ impl HashCache #[test] fn test_hashcache() { - let mut cache: HashCache> = HashCache::new(); + let mut cache: HashCache> = HashCache::new(); cache.insert(1, Cell::new("one")); assert!(cache.find(&1).is_some()); @@ -72,11 +72,11 @@ fn test_hashcache() { pub struct LRUCache { entries: Vec<(K, V)>, - cache_size: uint, + cache_size: usize, } impl LRUCache { - pub fn new(size: uint) -> LRUCache { + pub fn new(size: usize) -> LRUCache { LRUCache { entries: vec!(), cache_size: size, @@ -84,7 +84,7 @@ impl LRUCache { } #[inline] - pub fn touch(&mut self, pos: uint) -> V { + pub fn touch(&mut self, pos: usize) -> V { let last_index = self.entries.len() - 1; if pos != last_index { let entry = self.entries.remove(pos); @@ -134,7 +134,7 @@ pub struct SimpleHashCache { } impl,V:Clone> SimpleHashCache { - pub fn new(cache_size: uint) -> SimpleHashCache { + pub fn new(cache_size: usize) -> SimpleHashCache { let mut r = rand::thread_rng(); SimpleHashCache { entries: repeat(None).take(cache_size).collect(), @@ -144,15 +144,15 @@ impl,V:Clone> SimpleHashCache { } #[inline] - fn to_bucket(&self, h: uint) -> uint { + fn to_bucket(&self, h: usize) -> usize { h % self.entries.len() } #[inline] - fn bucket_for_key>(&self, key: &Q) -> uint { + fn bucket_for_key>(&self, key: &Q) -> usize { let mut hasher = SipHasher::new_with_keys(self.k0, self.k1); key.hash(&mut hasher); - self.to_bucket(hasher.finish() as uint) + self.to_bucket(hasher.finish() as usize) } pub fn insert(&mut self, key: K, value: V) { diff --git a/components/util/debug_utils.rs b/components/util/debug_utils.rs index 8d9789de029..f7c02b8068b 100644 --- a/components/util/debug_utils.rs +++ b/components/util/debug_utils.rs @@ -12,7 +12,7 @@ fn hexdump_slice(buf: &[u8]) { let mut stderr = io::stderr(); stderr.write_all(b" ").unwrap(); for (i, &v) in buf.iter().enumerate() { - let output = format!("{:02X} ", v as uint); + let output = format!("{:02X} ", v); stderr.write_all(output.as_bytes()).unwrap(); match i % 16 { 15 => { stderr.write_all(b"\n ").unwrap(); }, diff --git a/components/util/deque/mod.rs b/components/util/deque/mod.rs index 6dce728679b..089edf6bd43 100644 --- a/components/util/deque/mod.rs +++ b/components/util/deque/mod.rs @@ -57,23 +57,23 @@ use std::mem::{forget, min_align_of, size_of, transmute}; use std::ptr; use std::sync::Mutex; -use std::sync::atomic::{AtomicInt, AtomicPtr}; +use std::sync::atomic::{AtomicIsize, AtomicPtr}; use std::sync::atomic::Ordering::SeqCst; // Once the queue is less than 1/K full, then it will be downsized. Note that // the deque requires that this number be less than 2. -static K: int = 4; +static K: isize = 4; // Minimum number of bits that a buffer size should be. No buffer will resize to // under this value, and all deques will initially contain a buffer of this // size. // // The size in question is 1 << MIN_BITS -static MIN_BITS: uint = 7; +static MIN_BITS: usize = 7; struct Deque { - bottom: AtomicInt, - top: AtomicInt, + bottom: AtomicIsize, + top: AtomicIsize, array: AtomicPtr>, pool: BufferPool, } @@ -139,7 +139,7 @@ pub struct BufferPool { /// LLVM is probably pretty good at doing this already. struct Buffer { storage: *const T, - log_size: uint, + log_size: usize, } unsafe impl Send for Buffer { } @@ -159,7 +159,7 @@ impl BufferPool { (Worker { deque: a }, Stealer { deque: b }) } - fn alloc(&mut self, bits: uint) -> Box> { + fn alloc(&mut self, bits: usize) -> Box> { unsafe { let mut pool = self.pool.lock().unwrap(); match pool.iter().position(|x| x.size() >= (1 << bits)) { @@ -228,8 +228,8 @@ impl Deque { fn new(mut pool: BufferPool) -> Deque { let buf = pool.alloc(MIN_BITS); Deque { - bottom: AtomicInt::new(0), - top: AtomicInt::new(0), + bottom: AtomicIsize::new(0), + top: AtomicIsize::new(0), array: AtomicPtr::new(unsafe { transmute(buf) }), pool: pool, } @@ -299,7 +299,7 @@ impl Deque { } } - unsafe fn maybe_shrink(&self, b: int, t: int) { + unsafe fn maybe_shrink(&self, b: isize, t: isize) { let a = self.array.load(SeqCst); if b - t < (*a).size() / K && b - t > (1 << MIN_BITS) { self.swap_buffer(b, a, (*a).resize(b, t, -1)); @@ -313,7 +313,7 @@ impl Deque { // after this method has called 'free' on it. The continued usage is simply // a read followed by a forget, but we must make sure that the memory can // continue to be read after we flag this buffer for reclamation. - unsafe fn swap_buffer(&self, b: int, old: *mut Buffer, + unsafe fn swap_buffer(&self, b: isize, old: *mut Buffer, buf: Buffer) -> *mut Buffer { let newbuf: *mut Buffer = transmute(box buf); self.array.store(newbuf, SeqCst); @@ -345,12 +345,12 @@ impl Drop for Deque { } #[inline] -fn buffer_alloc_size(log_size: uint) -> uint { +fn buffer_alloc_size(log_size: usize) -> usize { (1 << log_size) * size_of::() } impl Buffer { - unsafe fn new(log_size: uint) -> Buffer { + unsafe fn new(log_size: usize) -> Buffer { let size = buffer_alloc_size::(log_size); let buffer = allocate(size, min_align_of::()); if buffer.is_null() { ::alloc::oom() } @@ -360,12 +360,12 @@ impl Buffer { } } - fn size(&self) -> int { 1 << self.log_size } + fn size(&self) -> isize { 1 << self.log_size } // Apparently LLVM cannot optimize (foo % (1 << bar)) into this implicitly - fn mask(&self) -> int { (1 << self.log_size) - 1 } + fn mask(&self) -> isize { (1 << self.log_size) - 1 } - unsafe fn elem(&self, i: int) -> *const T { + unsafe fn elem(&self, i: isize) -> *const T { self.storage.offset(i & self.mask()) } @@ -373,23 +373,23 @@ impl Buffer { // nor does this clear out the contents contained within. Hence, this is a // very unsafe method which the caller needs to treat specially in case a // race is lost. - unsafe fn get(&self, i: int) -> T { + unsafe fn get(&self, i: isize) -> T { ptr::read(self.elem(i)) } // Unsafe because this unsafely overwrites possibly uninitialized or // initialized data. - unsafe fn put(&self, i: int, t: T) { + unsafe fn put(&self, i: isize, t: T) { ptr::write(self.elem(i) as *mut T, t); } // Again, unsafe because this has incredibly dubious ownership violations. // It is assumed that this buffer is immediately dropped. - unsafe fn resize(&self, b: int, t: int, delta: int) -> Buffer { + unsafe fn resize(&self, b: isize, t: isize, delta: isize) -> Buffer { // NB: not entirely obvious, but thanks to 2's complement, - // casting delta to uint and then adding gives the desired + // casting delta to usize and then adding gives the desired // effect. - let buf = Buffer::new(self.log_size + delta as uint); + let buf = Buffer::new(self.log_size + delta as usize); for i in range(t, b) { buf.put(i, self.get(i)); } diff --git a/components/util/geometry.rs b/components/util/geometry.rs index 59f61c5afc0..3a165facb5c 100644 --- a/components/util/geometry.rs +++ b/components/util/geometry.rs @@ -236,7 +236,7 @@ impl Au { } #[inline] - pub fn from_px(px: int) -> Au { + pub fn from_px(px: isize) -> Au { NumCast::from(px * 60).unwrap() } @@ -246,9 +246,9 @@ impl Au { } #[inline] - pub fn to_nearest_px(&self) -> int { + pub fn to_nearest_px(&self) -> isize { let Au(s) = *self; - ((s as f64) / 60f64).round() as int + ((s as f64) / 60f64).round() as isize } #[inline] @@ -309,13 +309,13 @@ pub fn from_frac_px(px: f64) -> Au { Au((px * 60f64) as i32) } -pub fn from_px(px: int) -> Au { +pub fn from_px(px: isize) -> Au { NumCast::from(px * 60).unwrap() } -pub fn to_px(au: Au) -> int { +pub fn to_px(au: Au) -> isize { let Au(a) = au; - (a / 60) as int + (a / 60) as isize } pub fn to_frac_px(au: Au) -> f64 { @@ -325,7 +325,7 @@ pub fn to_frac_px(au: Au) -> f64 { // assumes 72 points per inch, and 96 px per inch pub fn from_pt(pt: f64) -> Au { - from_px((pt / 72f64 * 96f64) as int) + from_px((pt / 72f64 * 96f64) as isize) } // assumes 72 points per inch, and 96 px per inch diff --git a/components/util/memory.rs b/components/util/memory.rs index f895c129fea..e5e20d4cc94 100644 --- a/components/util/memory.rs +++ b/components/util/memory.rs @@ -241,7 +241,7 @@ macro_rules! option_try( ); #[cfg(target_os="linux")] -fn get_proc_self_statm_field(field: uint) -> Option { +fn get_proc_self_statm_field(field: usize) -> Option { let mut f = File::open(&Path::new("/proc/self/statm")); match f.read_to_string() { Ok(contents) => { diff --git a/components/util/opts.rs b/components/util/opts.rs index b64fe4e8f46..6bcbf694940 100644 --- a/components/util/opts.rs +++ b/components/util/opts.rs @@ -28,14 +28,14 @@ pub struct Opts { /// How many threads to use for CPU painting (`-t`). /// /// Note that painting is sequentialized when using GPU painting. - pub paint_threads: uint, + pub paint_threads: usize, /// True to use GPU painting via Skia-GL, false to use CPU painting via Skia (`-g`). Note that /// compositing is always done on the GPU. pub gpu_painting: bool, /// The maximum size of each tile in pixels (`-s`). - pub tile_size: uint, + pub tile_size: usize, /// The ratio of device pixels per px at the default scale. If unspecified, will use the /// platform default setting. @@ -54,7 +54,7 @@ pub struct Opts { /// The number of threads to use for layout (`-y`). Defaults to 1, which results in a recursive /// sequential algorithm. - pub layout_threads: uint, + pub layout_threads: usize, pub nonincremental_layout: bool, @@ -102,7 +102,7 @@ pub struct Opts { pub devtools_port: Option, /// The initial requested size of the window. - pub initial_window_size: TypedSize2D, + pub initial_window_size: TypedSize2D, /// An optional string allowing the user agent to be set for testing. pub user_agent: Option, @@ -256,7 +256,7 @@ pub fn from_cmdline_args(args: &[String]) -> bool { opt_match.free.clone() }; - let tile_size: uint = match opt_match.opt_str("s") { + let tile_size: usize = match opt_match.opt_str("s") { Some(tile_size_str) => tile_size_str.parse().unwrap(), None => 512, }; @@ -265,7 +265,7 @@ pub fn from_cmdline_args(args: &[String]) -> bool { ScaleFactor(dppx_str.parse().unwrap()) ); - let mut paint_threads: uint = match opt_match.opt_str("t") { + let mut paint_threads: usize = match opt_match.opt_str("t") { Some(paint_threads_str) => paint_threads_str.parse().unwrap(), None => cmp::max(rt::default_sched_threads() * 3 / 4, 1), }; @@ -280,7 +280,7 @@ pub fn from_cmdline_args(args: &[String]) -> bool { let gpu_painting = !FORCE_CPU_PAINTING && opt_match.opt_present("g"); - let mut layout_threads: uint = match opt_match.opt_str("y") { + let mut layout_threads: usize = match opt_match.opt_str("y") { Some(layout_threads_str) => layout_threads_str.parse().unwrap(), None => cmp::max(rt::default_sched_threads() * 3 / 4, 1), }; @@ -301,7 +301,7 @@ pub fn from_cmdline_args(args: &[String]) -> bool { let initial_window_size = match opt_match.opt_str("resolution") { Some(res_string) => { - let res: Vec = res_string.split('x').map(|r| r.parse().unwrap()).collect(); + let res: Vec = res_string.split('x').map(|r| r.parse().unwrap()).collect(); TypedSize2D(res[0], res[1]) } None => { diff --git a/components/util/persistent_list.rs b/components/util/persistent_list.rs index f20edff3d38..e78dc3daa1e 100644 --- a/components/util/persistent_list.rs +++ b/components/util/persistent_list.rs @@ -9,7 +9,7 @@ use std::sync::Arc; pub struct PersistentList { head: PersistentListLink, - length: uint, + length: usize, } struct PersistentListEntry { @@ -29,7 +29,7 @@ impl PersistentList where T: Send + Sync { } #[inline] - pub fn len(&self) -> uint { + pub fn len(&self) -> usize { self.length } diff --git a/components/util/taskpool.rs b/components/util/taskpool.rs index 5572395fc98..d3f52c5018b 100644 --- a/components/util/taskpool.rs +++ b/components/util/taskpool.rs @@ -25,7 +25,7 @@ pub struct TaskPool { } impl TaskPool { - pub fn new(tasks: uint) -> TaskPool { + pub fn new(tasks: u32) -> TaskPool { assert!(tasks > 0); let (tx, rx) = channel(); diff --git a/components/util/tid.rs b/components/util/tid.rs index 7351f85f085..9a01a19f6ce 100644 --- a/components/util/tid.rs +++ b/components/util/tid.rs @@ -8,10 +8,10 @@ use std::cell::RefCell; static mut next_tid: AtomicUsize = ATOMIC_USIZE_INIT; -thread_local!(static TASK_LOCAL_TID: Rc>> = Rc::new(RefCell::new(None))); +thread_local!(static TASK_LOCAL_TID: Rc>> = Rc::new(RefCell::new(None))); /// Every task gets one, that's unique. -pub fn tid() -> uint { +pub fn tid() -> usize { TASK_LOCAL_TID.with(|ref k| { let ret = match *k.borrow() { diff --git a/components/util/vec.rs b/components/util/vec.rs index 960e7e8db1a..a1b07126e06 100644 --- a/components/util/vec.rs +++ b/components/util/vec.rs @@ -16,11 +16,11 @@ pub trait Comparator { pub trait BinarySearchMethods<'a, T: Ord + PartialOrd + PartialEq> { fn binary_search_(&self, key: &T) -> Option<&'a T>; - fn binary_search_index(&self, key: &T) -> Option; + fn binary_search_index(&self, key: &T) -> Option; } pub trait FullBinarySearchMethods { - fn binary_search_index_by>(&self, key: &K, cmp: C) -> Option; + fn binary_search_index_by>(&self, key: &K, cmp: C) -> Option; } impl<'a, T: Ord + PartialOrd + PartialEq> BinarySearchMethods<'a, T> for &'a [T] { @@ -28,28 +28,28 @@ impl<'a, T: Ord + PartialOrd + PartialEq> BinarySearchMethods<'a, T> for &'a [T] self.binary_search_index(key).map(|i| &self[i]) } - fn binary_search_index(&self, key: &T) -> Option { + fn binary_search_index(&self, key: &T) -> Option { self.binary_search_index_by(key, DefaultComparator) } } impl<'a, T> FullBinarySearchMethods for &'a [T] { - fn binary_search_index_by>(&self, key: &K, cmp: C) -> Option { + fn binary_search_index_by>(&self, key: &K, cmp: C) -> Option { if self.len() == 0 { return None; } - let mut low : int = 0; - let mut high : int = (self.len() as int) - 1; + let mut low : isize = 0; + let mut high : isize = (self.len() as isize) - 1; while low <= high { // http://googleresearch.blogspot.com/2006/06/extra-extra-read-all-about-it-nearly.html - let mid = ((low as uint) + (high as uint)) >> 1; + let mid = ((low as usize) + (high as usize)) >> 1; let midv = &self[mid]; match cmp.compare(key, midv) { - Ordering::Greater => low = (mid as int) + 1, - Ordering::Less => high = (mid as int) - 1, + Ordering::Greater => low = (mid as isize) + 1, + Ordering::Less => high = (mid as isize) - 1, Ordering::Equal => return Some(mid), } } diff --git a/components/util/workqueue.rs b/components/util/workqueue.rs index ad39b1e349f..c24f40602f9 100644 --- a/components/util/workqueue.rs +++ b/components/util/workqueue.rs @@ -13,7 +13,7 @@ use task_state; use libc::funcs::posix88::unistd::usleep; use std::mem; use rand::{Rng, weak_rng, XorShiftRng}; -use std::sync::atomic::{AtomicUint, Ordering}; +use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::mpsc::{channel, Sender, Receiver}; use deque::{Abort, BufferPool, Data, Empty, Stealer, Worker}; @@ -33,7 +33,7 @@ pub struct WorkUnit { /// Messages from the supervisor to the worker. enum WorkerMsg { /// Tells the worker to start work. - Start(Worker>, *mut AtomicUint, *const QueueData), + Start(Worker>, *mut AtomicUsize, *const QueueData), /// Tells the worker to stop. It can be restarted again with a `WorkerMsg::Start`. Stop, /// Tells the worker thread to terminate. @@ -45,7 +45,7 @@ unsafe impl Send for WorkerMsg { Finished, - ReturnDeque(uint, Worker>), + ReturnDeque(usize, Worker>), } unsafe impl Send for SupervisorMsg {} @@ -63,7 +63,7 @@ struct WorkerInfo { /// Information specific to each worker thread that the thread keeps. struct WorkerThread { /// The index of this worker. - index: uint, + index: usize, /// The communication port from the supervisor. port: Receiver>, /// The communication channel on which messages are sent to the supervisor. @@ -110,7 +110,7 @@ impl WorkerThread { let mut i = 0; let mut should_continue = true; loop { - let victim = (self.rng.next_u32() as uint) % self.other_deques.len(); + let victim = (self.rng.next_u32() as usize) % self.other_deques.len(); match self.other_deques[victim].steal() { Empty | Abort => { // Continue. @@ -179,7 +179,7 @@ impl WorkerThread { /// A handle to the work queue that individual work units have. pub struct WorkerProxy<'a, QueueData: 'a, WorkData: 'a> { worker: &'a mut Worker>, - ref_count: *mut AtomicUint, + ref_count: *mut AtomicUsize, queue_data: *const QueueData, worker_index: u8, } @@ -216,7 +216,7 @@ pub struct WorkQueue { /// A port on which deques can be received from the workers. port: Receiver>, /// The amount of work that has been enqueued. - work_count: uint, + work_count: usize, /// Arbitrary user data. pub data: QueueData, } @@ -226,7 +226,7 @@ impl WorkQueue { /// it. pub fn new(task_name: &'static str, state: task_state::TaskState, - thread_count: uint, + thread_count: usize, user_data: QueueData) -> WorkQueue { // Set up data structures. let (supervisor_chan, supervisor_port) = channel(); @@ -295,7 +295,7 @@ impl WorkQueue { /// Synchronously runs all the enqueued tasks and waits for them to complete. pub fn run(&mut self) { // Tell the workers to start. - let mut work_count = AtomicUint::new(self.work_count); + let mut work_count = AtomicUsize::new(self.work_count); for worker in self.workers.iter_mut() { worker.chan.send(WorkerMsg::Start(worker.deque.take().unwrap(), &mut work_count, &self.data)).unwrap() }