Cargoify servo

This commit is contained in:
Jack Moffitt 2014-08-28 09:34:23 -06:00
parent db2f642c32
commit c6ab60dbfc
1761 changed files with 8423 additions and 2294 deletions

View file

@ -0,0 +1,20 @@
[package]
name = "util"
version = "0.0.1"
authors = ["The Servo Project Developers"]
[lib]
name = "util"
path = "lib.rs"
[dependencies.azure]
git = "https://github.com/servo/rust-azure"
[dependencies.geom]
git = "https://github.com/servo/rust-geom"
[dependencies.task_info]
path = "../../support/rust-task_info"
[dependencies.string_cache]
git = "https://github.com/servo/string-cache"

43
components/util/atom.rs Normal file
View file

@ -0,0 +1,43 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Provides a wrapper around the Atom type in the string cache
//! crate. It's needed so that it can implement the Encodable
//! trait which is required by Servo.
use serialize::{Encoder, Encodable};
use std::fmt;
use std::hash::Hash;
use string_cache::atom;
#[deriving(Clone, Eq, Hash, PartialEq)]
pub struct Atom {
atom: atom::Atom,
}
impl Atom {
#[inline(always)]
pub fn from_slice(slice: &str) -> Atom {
Atom {
atom: atom::Atom::from_slice(slice)
}
}
#[inline(always)]
pub fn as_slice<'t>(&'t self) -> &'t str {
self.atom.as_slice()
}
}
impl fmt::Show for Atom {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:s}", self.atom.as_slice())
}
}
impl<E, S: Encoder<E>> Encodable<S, E> for Atom {
fn encode(&self, _s: &mut S) -> Result<(), E> {
Ok(())
}
}

279
components/util/cache.rs Normal file
View file

@ -0,0 +1,279 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::collections::hashmap::HashMap;
use rand::Rng;
use std::hash::{Hash, sip};
use std::rand::task_rng;
use std::slice::Items;
#[cfg(test)]
use std::cell::Cell;
pub trait Cache<K: PartialEq, V: Clone> {
fn insert(&mut self, key: K, value: V);
fn find(&mut self, key: &K) -> Option<V>;
fn find_or_create(&mut self, key: &K, blk: |&K| -> V) -> V;
fn evict_all(&mut self);
}
pub struct MonoCache<K, V> {
entry: Option<(K,V)>,
}
impl<K: Clone + PartialEq, V: Clone> MonoCache<K,V> {
pub fn new(_size: uint) -> MonoCache<K,V> {
MonoCache { entry: None }
}
}
impl<K: Clone + PartialEq, V: Clone> Cache<K,V> for MonoCache<K,V> {
fn insert(&mut self, key: K, value: V) {
self.entry = Some((key, value));
}
fn find(&mut self, key: &K) -> Option<V> {
match self.entry {
None => None,
Some((ref k, ref v)) => if *k == *key { Some(v.clone()) } else { None }
}
}
fn find_or_create(&mut self, key: &K, blk: |&K| -> V) -> V {
match self.find(key) {
Some(value) => value,
None => {
let value = blk(key);
self.entry = Some((key.clone(), value.clone()));
value
}
}
}
fn evict_all(&mut self) {
self.entry = None;
}
}
#[test]
fn test_monocache() {
let mut cache: MonoCache<uint,Cell<&str>> = MonoCache::new(10);
let one = Cell::new("one");
let two = Cell::new("two");
cache.insert(1, one);
assert!(cache.find(&1).is_some());
assert!(cache.find(&2).is_none());
cache.find_or_create(&2, |_v| { two });
assert!(cache.find(&2).is_some());
assert!(cache.find(&1).is_none());
}
pub struct HashCache<K, V> {
entries: HashMap<K, V>,
}
impl<K: Clone + PartialEq + Eq + Hash, V: Clone> HashCache<K,V> {
pub fn new() -> HashCache<K, V> {
HashCache {
entries: HashMap::new(),
}
}
}
impl<K: Clone + PartialEq + Eq + Hash, V: Clone> Cache<K,V> for HashCache<K,V> {
fn insert(&mut self, key: K, value: V) {
self.entries.insert(key, value);
}
fn find(&mut self, key: &K) -> Option<V> {
match self.entries.find(key) {
Some(v) => Some(v.clone()),
None => None,
}
}
fn find_or_create(&mut self, key: &K, blk: |&K| -> V) -> V {
self.entries.find_or_insert_with(key.clone(), blk).clone()
}
fn evict_all(&mut self) {
self.entries.clear();
}
}
#[test]
fn test_hashcache() {
let mut cache: HashCache<uint, Cell<&str>> = HashCache::new();
let one = Cell::new("one");
let two = Cell::new("two");
cache.insert(1, one);
assert!(cache.find(&1).is_some());
assert!(cache.find(&2).is_none());
cache.find_or_create(&2, |_v| { two });
assert!(cache.find(&1).is_some());
assert!(cache.find(&2).is_some());
}
pub struct LRUCache<K, V> {
entries: Vec<(K, V)>,
cache_size: uint,
}
impl<K: Clone + PartialEq, V: Clone> LRUCache<K,V> {
pub fn new(size: uint) -> LRUCache<K, V> {
LRUCache {
entries: vec!(),
cache_size: size,
}
}
#[inline]
pub fn touch(&mut self, pos: uint) -> V {
let last_index = self.entries.len() - 1;
if pos != last_index {
let entry = self.entries.remove(pos);
self.entries.push(entry.unwrap());
}
self.entries[last_index].ref1().clone()
}
pub fn iter<'a>(&'a self) -> Items<'a,(K,V)> {
self.entries.iter()
}
}
impl<K: Clone + PartialEq, V: Clone> Cache<K,V> for LRUCache<K,V> {
fn insert(&mut self, key: K, val: V) {
if self.entries.len() == self.cache_size {
self.entries.remove(0);
}
self.entries.push((key, val));
}
fn find(&mut self, key: &K) -> Option<V> {
match self.entries.iter().position(|&(ref k, _)| *k == *key) {
Some(pos) => Some(self.touch(pos)),
None => None,
}
}
fn find_or_create(&mut self, key: &K, blk: |&K| -> V) -> V {
match self.entries.iter().position(|&(ref k, _)| *k == *key) {
Some(pos) => self.touch(pos),
None => {
let val = blk(key);
self.insert(key.clone(), val.clone());
val
}
}
}
fn evict_all(&mut self) {
self.entries.clear();
}
}
pub struct SimpleHashCache<K,V> {
entries: Vec<Option<(K,V)>>,
k0: u64,
k1: u64,
}
impl<K:Clone+PartialEq+Hash,V:Clone> SimpleHashCache<K,V> {
pub fn new(cache_size: uint) -> SimpleHashCache<K,V> {
let mut r = task_rng();
SimpleHashCache {
entries: Vec::from_elem(cache_size, None),
k0: r.gen(),
k1: r.gen(),
}
}
#[inline]
fn to_bucket(&self, h: uint) -> uint {
h % self.entries.len()
}
#[inline]
fn bucket_for_key<Q:Hash>(&self, key: &Q) -> uint {
self.to_bucket(sip::hash_with_keys(self.k0, self.k1, key) as uint)
}
#[inline]
pub fn find_equiv<'a,Q:Hash+Equiv<K>>(&'a self, key: &Q) -> Option<&'a V> {
let bucket_index = self.bucket_for_key(key);
match self.entries[bucket_index] {
Some((ref existing_key, ref value)) if key.equiv(existing_key) => Some(value),
_ => None,
}
}
}
impl<K:Clone+PartialEq+Hash,V:Clone> Cache<K,V> for SimpleHashCache<K,V> {
fn insert(&mut self, key: K, value: V) {
let bucket_index = self.bucket_for_key(&key);
*self.entries.get_mut(bucket_index) = Some((key, value));
}
fn find(&mut self, key: &K) -> Option<V> {
let bucket_index = self.bucket_for_key(key);
match self.entries[bucket_index] {
Some((ref existing_key, ref value)) if existing_key == key => Some((*value).clone()),
_ => None,
}
}
fn find_or_create(&mut self, key: &K, blk: |&K| -> V) -> V {
match self.find(key) {
Some(value) => return value,
None => {}
}
let value = blk(key);
self.insert((*key).clone(), value.clone());
value
}
fn evict_all(&mut self) {
for slot in self.entries.mut_iter() {
*slot = None
}
}
}
#[test]
fn test_lru_cache() {
let one = Cell::new("one");
let two = Cell::new("two");
let three = Cell::new("three");
let four = Cell::new("four");
// Test normal insertion.
let mut cache: LRUCache<uint,Cell<&str>> = LRUCache::new(2); // (_, _) (cache is empty)
cache.insert(1, one); // (1, _)
cache.insert(2, two); // (1, 2)
cache.insert(3, three); // (2, 3)
assert!(cache.find(&1).is_none()); // (2, 3) (no change)
assert!(cache.find(&3).is_some()); // (2, 3)
assert!(cache.find(&2).is_some()); // (3, 2)
// Test that LRU works (this insertion should replace 3, not 2).
cache.insert(4, four); // (2, 4)
assert!(cache.find(&1).is_none()); // (2, 4) (no change)
assert!(cache.find(&2).is_some()); // (4, 2)
assert!(cache.find(&3).is_none()); // (4, 2) (no change)
assert!(cache.find(&4).is_some()); // (2, 4) (no change)
// Test find_or_create.
cache.find_or_create(&1, |_| { one }); // (4, 1)
assert!(cache.find(&1).is_some()); // (4, 1) (no change)
assert!(cache.find(&2).is_none()); // (4, 1) (no change)
assert!(cache.find(&3).is_none()); // (4, 1) (no change)
assert!(cache.find(&4).is_some()); // (1, 4)
}

View file

@ -0,0 +1,33 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::io;
use std::io::Writer;
use std::mem;
use std::mem::size_of;
use std::slice::raw::buf_as_slice;
fn hexdump_slice(buf: &[u8]) {
let mut stderr = io::stderr();
stderr.write(b" ").unwrap();
for (i, &v) in buf.iter().enumerate() {
let output = format!("{:02X} ", v as uint);
stderr.write(output.as_bytes()).unwrap();
match i % 16 {
15 => { stderr.write(b"\n ").unwrap(); },
7 => { stderr.write(b" ").unwrap(); },
_ => ()
}
stderr.flush().unwrap();
}
stderr.write(b"\n").unwrap();
}
pub fn hexdump<T>(obj: &T) {
unsafe {
let buf: *const u8 = mem::transmute(obj);
debug!("dumping at {:p}", buf);
buf_as_slice(buf, size_of::<T>(), hexdump_slice);
}
}

304
components/util/geometry.rs Normal file
View file

@ -0,0 +1,304 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use geom::length::Length;
use geom::point::Point2D;
use geom::rect::Rect;
use geom::size::Size2D;
use serialize::{Encodable, Encoder};
use std::default::Default;
use std::num::{NumCast, One, Zero};
use std::fmt;
// Units for use with geom::length and geom::scale_factor.
/// A normalized "pixel" at the default resolution for the display.
///
/// Like the CSS "px" unit, the exact physical size of this unit may vary between devices, but it
/// should approximate a device-independent reference length. This unit corresponds to Android's
/// "density-independent pixel" (dip), Mac OS X's "point", and Windows "device-independent pixel."
///
/// The relationship between DevicePixel and ScreenPx is defined by the OS. On most low-dpi
/// screens, one ScreenPx is equal to one DevicePixel. But on high-density screens it can be
/// some larger number. For example, by default on Apple "retina" displays, one ScreenPx equals
/// two DevicePixels. On Android "MDPI" displays, one ScreenPx equals 1.5 device pixels.
///
/// The ratio between ScreenPx and DevicePixel for a given display be found by calling
/// `servo::windowing::WindowMethods::hidpi_factor`.
pub enum ScreenPx {}
/// One CSS "px" in the coordinate system of the "initial viewport":
/// http://www.w3.org/TR/css-device-adapt/#initial-viewport
///
/// ViewportPx is equal to ScreenPx times a "page zoom" factor controlled by the user. This is
/// the desktop-style "full page" zoom that enlarges content but then reflows the layout viewport
/// so it still exactly fits the visible area.
///
/// At the default zoom level of 100%, one PagePx is equal to one ScreenPx. However, if the
/// document is zoomed in or out then this scale may be larger or smaller.
#[deriving(Encodable)]
pub enum ViewportPx {}
/// One CSS "px" in the root coordinate system for the content document.
///
/// PagePx is equal to ViewportPx multiplied by a "viewport zoom" factor controlled by the user.
/// This is the mobile-style "pinch zoom" that enlarges content without reflowing it. When the
/// viewport zoom is not equal to 1.0, then the layout viewport is no longer the same physical size
/// as the viewable area.
#[deriving(Encodable)]
pub enum PagePx {}
// In summary, the hierarchy of pixel units and the factors to convert from one to the next:
//
// DevicePixel
// / hidpi_ratio => ScreenPx
// / desktop_zoom => ViewportPx
// / pinch_zoom => PagePx
// An Au is an "App Unit" and represents 1/60th of a CSS pixel. It was
// originally proposed in 2002 as a standard unit of measure in Gecko.
// See https://bugzilla.mozilla.org/show_bug.cgi?id=177805 for more info.
//
// FIXME: Implement Au using Length and ScaleFactor instead of a custom type.
#[deriving(Clone, PartialEq, PartialOrd, Eq, Ord, Zero)]
pub struct Au(pub i32);
impl Default for Au {
#[inline]
fn default() -> Au {
Au(0)
}
}
impl<E, S: Encoder<E>> Encodable<S, E> for Au {
fn encode(&self, e: &mut S) -> Result<(), E> {
e.emit_f64(to_frac_px(*self))
}
}
impl fmt::Show for Au {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}px", to_frac_px(*self))
}}
impl Add<Au,Au> for Au {
#[inline]
fn add(&self, other: &Au) -> Au {
let Au(s) = *self;
let Au(o) = *other;
Au(s + o)
}
}
impl Sub<Au,Au> for Au {
#[inline]
fn sub(&self, other: &Au) -> Au {
let Au(s) = *self;
let Au(o) = *other;
Au(s - o)
}
}
impl Mul<Au,Au> for Au {
#[inline]
fn mul(&self, other: &Au) -> Au {
let Au(s) = *self;
let Au(o) = *other;
Au(s * o)
}
}
impl Div<Au,Au> for Au {
#[inline]
fn div(&self, other: &Au) -> Au {
let Au(s) = *self;
let Au(o) = *other;
Au(s / o)
}
}
impl Rem<Au,Au> for Au {
#[inline]
fn rem(&self, other: &Au) -> Au {
let Au(s) = *self;
let Au(o) = *other;
Au(s % o)
}
}
impl Neg<Au> for Au {
#[inline]
fn neg(&self) -> Au {
let Au(s) = *self;
Au(-s)
}
}
impl One for Au {
#[inline]
fn one() -> Au { Au(1) }
}
impl Num for Au {}
#[inline]
pub fn min(x: Au, y: Au) -> Au { if x < y { x } else { y } }
#[inline]
pub fn max(x: Au, y: Au) -> Au { if x > y { x } else { y } }
impl NumCast for Au {
#[inline]
fn from<T:ToPrimitive>(n: T) -> Option<Au> {
Some(Au(n.to_i32().unwrap()))
}
}
impl ToPrimitive for Au {
#[inline]
fn to_i64(&self) -> Option<i64> {
let Au(s) = *self;
Some(s as i64)
}
#[inline]
fn to_u64(&self) -> Option<u64> {
let Au(s) = *self;
Some(s as u64)
}
#[inline]
fn to_f32(&self) -> Option<f32> {
let Au(s) = *self;
s.to_f32()
}
#[inline]
fn to_f64(&self) -> Option<f64> {
let Au(s) = *self;
s.to_f64()
}
}
impl Au {
/// FIXME(pcwalton): Workaround for lack of cross crate inlining of newtype structs!
#[inline]
pub fn new(value: i32) -> Au {
Au(value)
}
#[inline]
pub fn scale_by(self, factor: f64) -> Au {
let Au(s) = self;
Au(((s as f64) * factor) as i32)
}
#[inline]
pub fn from_px(px: int) -> Au {
NumCast::from(px * 60).unwrap()
}
#[inline]
pub fn from_page_px(px: Length<PagePx, f32>) -> Au {
NumCast::from(px.get() * 60f32).unwrap()
}
#[inline]
pub fn to_nearest_px(&self) -> int {
let Au(s) = *self;
((s as f64) / 60f64).round() as int
}
#[inline]
pub fn to_snapped(&self) -> Au {
let Au(s) = *self;
let res = s % 60i32;
return if res >= 30i32 { return Au(s - res + 60i32) }
else { return Au(s - res) };
}
#[inline]
pub fn from_frac32_px(px: f32) -> Au {
Au((px * 60f32) as i32)
}
#[inline]
pub fn from_pt(pt: f64) -> Au {
from_frac_px(pt_to_px(pt))
}
#[inline]
pub fn from_frac_px(px: f64) -> Au {
Au((px * 60f64) as i32)
}
#[inline]
pub fn min(x: Au, y: Au) -> Au {
let Au(xi) = x;
let Au(yi) = y;
if xi < yi { x } else { y }
}
#[inline]
pub fn max(x: Au, y: Au) -> Au {
let Au(xi) = x;
let Au(yi) = y;
if xi > yi { x } else { y }
}
}
// assumes 72 points per inch, and 96 px per inch
pub fn pt_to_px(pt: f64) -> f64 {
pt / 72f64 * 96f64
}
// assumes 72 points per inch, and 96 px per inch
pub fn px_to_pt(px: f64) -> f64 {
px / 96f64 * 72f64
}
pub fn from_frac_px(px: f64) -> Au {
Au((px * 60f64) as i32)
}
pub fn from_px(px: int) -> Au {
NumCast::from(px * 60).unwrap()
}
pub fn to_px(au: Au) -> int {
let Au(a) = au;
(a / 60) as int
}
pub fn to_frac_px(au: Au) -> f64 {
let Au(a) = au;
(a as f64) / 60f64
}
// assumes 72 points per inch, and 96 px per inch
pub fn from_pt(pt: f64) -> Au {
from_px((pt / 72f64 * 96f64) as int)
}
// assumes 72 points per inch, and 96 px per inch
pub fn to_pt(au: Au) -> f64 {
let Au(a) = au;
(a as f64) / 60f64 * 72f64 / 96f64
}
/// Returns true if the rect contains the given point. Points on the top or left sides of the rect
/// are considered inside the rectangle, while points on the right or bottom sides of the rect are
/// not considered inside the rectangle.
pub fn rect_contains_point<T:PartialOrd + Add<T,T>>(rect: Rect<T>, point: Point2D<T>) -> bool {
point.x >= rect.origin.x && point.x < rect.origin.x + rect.size.width &&
point.y >= rect.origin.y && point.y < rect.origin.y + rect.size.height
}
/// A helper function to convert a rect of `f32` pixels to a rect of app units.
pub fn f32_rect_to_au_rect(rect: Rect<f32>) -> Rect<Au> {
Rect(Point2D(Au::from_frac32_px(rect.origin.x), Au::from_frac32_px(rect.origin.y)),
Size2D(Au::from_frac32_px(rect.size.width), Au::from_frac32_px(rect.size.height)))
}

44
components/util/lib.rs Normal file
View file

@ -0,0 +1,44 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(macro_rules,unsafe_destructor)]
#![feature(phase)]
#[phase(plugin, link)]
extern crate log;
extern crate debug;
extern crate alloc;
extern crate azure;
extern crate collections;
extern crate geom;
extern crate getopts;
extern crate layers;
extern crate libc;
extern crate native;
extern crate rand;
extern crate rustrt;
extern crate serialize;
extern crate sync;
#[cfg(target_os="macos")]
extern crate task_info;
extern crate std_time = "time";
extern crate string_cache;
pub mod atom;
pub mod cache;
pub mod debug_utils;
pub mod geometry;
pub mod logical_geometry;
pub mod memory;
pub mod namespace;
pub mod opts;
pub mod range;
pub mod smallvec;
pub mod sort;
pub mod str;
pub mod task;
pub mod time;
pub mod vec;
pub mod workqueue;

File diff suppressed because it is too large Load diff

209
components/util/memory.rs Normal file
View file

@ -0,0 +1,209 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Memory profiling functions.
use libc::{c_char,c_int,c_void,size_t};
use std::io::timer::sleep;
#[cfg(target_os="linux")]
use std::io::File;
use std::mem::size_of;
#[cfg(target_os="linux")]
use std::os::page_size;
use std::ptr::mut_null;
use task::spawn_named;
#[cfg(target_os="macos")]
use task_info::task_basic_info::{virtual_size,resident_size};
pub struct MemoryProfilerChan(pub Sender<MemoryProfilerMsg>);
impl MemoryProfilerChan {
pub fn send(&self, msg: MemoryProfilerMsg) {
let MemoryProfilerChan(ref c) = *self;
c.send(msg);
}
}
pub enum MemoryProfilerMsg {
/// Message used to force print the memory profiling metrics.
PrintMsg,
/// Tells the memory profiler to shut down.
ExitMsg,
}
pub struct MemoryProfiler {
pub port: Receiver<MemoryProfilerMsg>,
}
impl MemoryProfiler {
pub fn create(period: Option<f64>) -> MemoryProfilerChan {
let (chan, port) = channel();
match period {
Some(period) => {
let period = (period * 1000f64) as u64;
let chan = chan.clone();
spawn_named("Memory profiler timer", proc() {
loop {
sleep(period);
if chan.send_opt(PrintMsg).is_err() {
break;
}
}
});
// Spawn the memory profiler.
spawn_named("Memory profiler", proc() {
let memory_profiler = MemoryProfiler::new(port);
memory_profiler.start();
});
}
None => {
// No-op to handle messages when the memory profiler is
// inactive.
spawn_named("Memory profiler", proc() {
loop {
match port.recv_opt() {
Err(_) | Ok(ExitMsg) => break,
_ => {}
}
}
});
}
}
MemoryProfilerChan(chan)
}
pub fn new(port: Receiver<MemoryProfilerMsg>) -> MemoryProfiler {
MemoryProfiler {
port: port
}
}
pub fn start(&self) {
loop {
match self.port.recv_opt() {
Ok(msg) => {
if !self.handle_msg(msg) {
break
}
}
_ => break
}
}
}
fn handle_msg(&self, msg: MemoryProfilerMsg) -> bool {
match msg {
PrintMsg => {
self.handle_print_msg();
true
},
ExitMsg => false
}
}
fn print_measurement(path: &str, nbytes: Option<u64>) {
match nbytes {
Some(nbytes) => {
let mebi = 1024f64 * 1024f64;
println!("{:16s}: {:12.2f}", path, (nbytes as f64) / mebi);
}
None => {
println!("{:16s}: {:>12s}", path, "???");
}
}
}
fn handle_print_msg(&self) {
println!("{:16s}: {:12s}", "_category_", "_size (MiB)_");
// Virtual and physical memory usage, as reported by the OS.
MemoryProfiler::print_measurement("vsize", get_vsize());
MemoryProfiler::print_measurement("resident", get_resident());
// The descriptions of the jemalloc measurements are taken directly
// from the jemalloc documentation.
// Total number of bytes allocated by the application.
MemoryProfiler::print_measurement("heap-allocated", get_jemalloc_stat("stats.allocated"));
// Total number of bytes in active pages allocated by the application.
// This is a multiple of the page size, and greater than or equal to
// |stats.allocated|.
MemoryProfiler::print_measurement("heap-active", get_jemalloc_stat("stats.active"));
// Total number of bytes in chunks mapped on behalf of the application.
// This is a multiple of the chunk size, and is at least as large as
// |stats.active|. This does not include inactive chunks.
MemoryProfiler::print_measurement("heap-mapped", get_jemalloc_stat("stats.mapped"));
println!("");
}
}
extern {
fn je_mallctl(name: *const c_char, oldp: *mut c_void, oldlenp: *mut size_t,
newp: *mut c_void, newlen: size_t) -> c_int;
}
fn get_jemalloc_stat(name: &'static str) -> Option<u64> {
let mut old: size_t = 0;
let c_name = name.to_c_str();
let oldp = &mut old as *mut _ as *mut c_void;
let mut oldlen = size_of::<size_t>() as size_t;
let rv: c_int;
unsafe {
rv = je_mallctl(c_name.unwrap(), oldp, &mut oldlen, mut_null(), 0);
}
if rv == 0 { Some(old as u64) } else { None }
}
// Like std::macros::try!, but for Option<>.
macro_rules! option_try(
($e:expr) => (match $e { Some(e) => e, None => return None })
)
#[cfg(target_os="linux")]
fn get_proc_self_statm_field(field: uint) -> Option<u64> {
let mut f = File::open(&Path::new("/proc/self/statm"));
match f.read_to_string() {
Ok(contents) => {
let s = option_try!(contents.as_slice().words().nth(field));
let npages: u64 = option_try!(from_str(s));
Some(npages * (page_size() as u64))
}
Err(_) => None
}
}
#[cfg(target_os="linux")]
fn get_vsize() -> Option<u64> {
get_proc_self_statm_field(0)
}
#[cfg(target_os="linux")]
fn get_resident() -> Option<u64> {
get_proc_self_statm_field(1)
}
#[cfg(target_os="macos")]
fn get_vsize() -> Option<u64> {
virtual_size()
}
#[cfg(target_os="macos")]
fn get_resident() -> Option<u64> {
resident_size()
}
#[cfg(not(target_os="linux"), not(target_os = "macos"))]
fn get_vsize() -> Option<u64> {
None
}
#[cfg(not(target_os="linux"), not(target_os = "macos"))]
fn get_resident() -> Option<u64> {
None
}

View file

@ -0,0 +1,43 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#[deriving(PartialEq, Clone, Encodable)]
pub enum Namespace {
Null,
HTML,
XML,
XMLNS,
XLink,
SVG,
MathML,
Other(String)
}
impl Namespace {
/// Empty string for "no namespace"
pub fn from_str(url: &str) -> Namespace {
match url {
"http://www.w3.org/1999/xhtml" => HTML,
"http://www.w3.org/XML/1998/namespace" => XML,
"http://www.w3.org/2000/xmlns/" => XMLNS,
"http://www.w3.org/1999/xlink" => XLink,
"http://www.w3.org/2000/svg" => SVG,
"http://www.w3.org/1998/Math/MathML" => MathML,
"" => Null,
ns => Other(ns.to_string())
}
}
pub fn to_str<'a>(&'a self) -> &'a str {
match *self {
Null => "",
HTML => "http://www.w3.org/1999/xhtml",
XML => "http://www.w3.org/XML/1998/namespace",
XMLNS => "http://www.w3.org/2000/xmlns/",
XLink => "http://www.w3.org/1999/xlink",
SVG => "http://www.w3.org/2000/svg",
MathML => "http://www.w3.org/1998/Math/MathML",
Other(ref x) => x.as_slice()
}
}
}

235
components/util/opts.rs Normal file
View file

@ -0,0 +1,235 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Configuration options for a single run of the servo application. Created
//! from command line arguments.
use geometry::ScreenPx;
use azure::azure_hl::{BackendType, CairoBackend, CoreGraphicsBackend};
use azure::azure_hl::{CoreGraphicsAcceleratedBackend, Direct2DBackend, SkiaBackend};
use geom::scale_factor::ScaleFactor;
use layers::geometry::DevicePixel;
use getopts;
use std::cmp;
use std::io;
use std::os;
use std::rt;
/// Global flags for Servo, currently set on the command line.
#[deriving(Clone)]
pub struct Opts {
/// The initial URLs to load.
pub urls: Vec<String>,
/// The rendering backend to use (`-r`).
pub render_backend: BackendType,
/// How many threads to use for CPU rendering (`-t`).
///
/// FIXME(pcwalton): This is not currently used. All rendering is sequential.
pub n_render_threads: uint,
/// True to use CPU painting, false to use GPU painting via Skia-GL (`-c`). Note that
/// compositing is always done on the GPU.
pub cpu_painting: bool,
/// The maximum size of each tile in pixels (`-s`).
pub tile_size: uint,
/// The ratio of device pixels per px at the default scale. If unspecified, will use the
/// platform default setting.
pub device_pixels_per_px: Option<ScaleFactor<ScreenPx, DevicePixel, f32>>,
/// `None` to disable the time profiler or `Some` with an interval in seconds to enable it and
/// cause it to produce output on that interval (`-p`).
pub time_profiler_period: Option<f64>,
/// `None` to disable the memory profiler or `Some` with an interval in seconds to enable it
/// and cause it to produce output on that interval (`-m`).
pub memory_profiler_period: Option<f64>,
/// Enable experimental web features (`-e`).
pub enable_experimental: bool,
/// The number of threads to use for layout (`-y`). Defaults to 1, which results in a recursive
/// sequential algorithm.
pub layout_threads: uint,
/// True to exit after the page load (`-x`).
pub exit_after_load: bool,
pub output_file: Option<String>,
pub headless: bool,
pub hard_fail: bool,
/// True if we should bubble intrinsic widths sequentially (`-b`). If this is true, then
/// intrinsic widths are computed as a separate pass instead of during flow construction. You
/// may wish to turn this flag on in order to benchmark style recalculation against other
/// browser engines.
pub bubble_inline_sizes_separately: bool,
/// True if we should show borders on all layers and tiles for
/// debugging purposes (`--show-debug-borders`).
pub show_debug_borders: bool,
/// If set with --disable-text-aa, disable antialiasing on fonts. This is primarily useful for reftests
/// where pixel perfect results are required when using fonts such as the Ahem
/// font for layout tests.
pub enable_text_antialiasing: bool,
/// True if each step of layout is traced to an external JSON file
/// for debugging purposes. Settings this implies sequential layout
/// and render.
pub trace_layout: bool,
}
fn print_usage(app: &str, opts: &[getopts::OptGroup]) {
let message = format!("Usage: {} [ options ... ] [URL]\n\twhere options include", app);
println!("{}", getopts::usage(message.as_slice(), opts));
}
fn args_fail(msg: &str) {
io::stderr().write_line(msg).unwrap();
os::set_exit_status(1);
}
pub fn from_cmdline_args(args: &[String]) -> Option<Opts> {
let app_name = args[0].to_string();
let args = args.tail();
let opts = vec!(
getopts::optflag("c", "cpu", "CPU rendering"),
getopts::optopt("o", "output", "Output file", "output.png"),
getopts::optopt("r", "rendering", "Rendering backend", "direct2d|core-graphics|core-graphics-accelerated|cairo|skia."),
getopts::optopt("s", "size", "Size of tiles", "512"),
getopts::optopt("", "device-pixel-ratio", "Device pixels per px", ""),
getopts::optflag("e", "experimental", "Enable experimental web features"),
getopts::optopt("t", "threads", "Number of render threads", "1"),
getopts::optflagopt("p", "profile", "Profiler flag and output interval", "10"),
getopts::optflagopt("m", "memory-profile", "Memory profiler flag and output interval", "10"),
getopts::optflag("x", "exit", "Exit after load flag"),
getopts::optopt("y", "layout-threads", "Number of threads to use for layout", "1"),
getopts::optflag("z", "headless", "Headless mode"),
getopts::optflag("f", "hard-fail", "Exit on task failure instead of displaying about:failure"),
getopts::optflag("b", "bubble-widths", "Bubble intrinsic widths separately like other engines"),
getopts::optflag("", "show-debug-borders", "Show debugging borders on layers and tiles."),
getopts::optflag("", "disable-text-aa", "Disable antialiasing for text rendering."),
getopts::optflag("", "trace-layout", "Write layout trace to external file for debugging."),
getopts::optflag("h", "help", "Print this message")
);
let opt_match = match getopts::getopts(args, opts.as_slice()) {
Ok(m) => m,
Err(f) => {
args_fail(format!("{}", f).as_slice());
return None;
}
};
if opt_match.opt_present("h") || opt_match.opt_present("help") {
print_usage(app_name.as_slice(), opts.as_slice());
return None;
};
let urls = if opt_match.free.is_empty() {
print_usage(app_name.as_slice(), opts.as_slice());
args_fail("servo asks that you provide 1 or more URLs");
return None;
} else {
opt_match.free.clone()
};
let render_backend = match opt_match.opt_str("r") {
Some(backend_str) => {
if "direct2d" == backend_str.as_slice() {
Direct2DBackend
} else if "core-graphics" == backend_str.as_slice() {
CoreGraphicsBackend
} else if "core-graphics-accelerated" == backend_str.as_slice() {
CoreGraphicsAcceleratedBackend
} else if "cairo" == backend_str.as_slice() {
CairoBackend
} else if "skia" == backend_str.as_slice() {
SkiaBackend
} else {
fail!("unknown backend type")
}
}
None => SkiaBackend
};
let tile_size: uint = match opt_match.opt_str("s") {
Some(tile_size_str) => from_str(tile_size_str.as_slice()).unwrap(),
None => 512,
};
let device_pixels_per_px = opt_match.opt_str("device-pixel-ratio").map(|dppx_str|
ScaleFactor(from_str(dppx_str.as_slice()).unwrap())
);
let mut n_render_threads: uint = match opt_match.opt_str("t") {
Some(n_render_threads_str) => from_str(n_render_threads_str.as_slice()).unwrap(),
None => 1, // FIXME: Number of cores.
};
// If only the flag is present, default to a 5 second period for both profilers.
let time_profiler_period = opt_match.opt_default("p", "5").map(|period| {
from_str(period.as_slice()).unwrap()
});
let memory_profiler_period = opt_match.opt_default("m", "5").map(|period| {
from_str(period.as_slice()).unwrap()
});
let cpu_painting = opt_match.opt_present("c");
let mut layout_threads: uint = match opt_match.opt_str("y") {
Some(layout_threads_str) => from_str(layout_threads_str.as_slice()).unwrap(),
None => cmp::max(rt::default_sched_threads() * 3 / 4, 1),
};
let mut bubble_inline_sizes_separately = opt_match.opt_present("b");
let trace_layout = opt_match.opt_present("trace-layout");
if trace_layout {
n_render_threads = 1;
layout_threads = 1;
bubble_inline_sizes_separately = true;
}
Some(Opts {
urls: urls,
render_backend: render_backend,
n_render_threads: n_render_threads,
cpu_painting: cpu_painting,
tile_size: tile_size,
device_pixels_per_px: device_pixels_per_px,
time_profiler_period: time_profiler_period,
memory_profiler_period: memory_profiler_period,
enable_experimental: opt_match.opt_present("e"),
layout_threads: layout_threads,
exit_after_load: opt_match.opt_present("x"),
output_file: opt_match.opt_str("o"),
headless: opt_match.opt_present("z"),
hard_fail: opt_match.opt_present("f"),
bubble_inline_sizes_separately: bubble_inline_sizes_separately,
show_debug_borders: opt_match.opt_present("show-debug-borders"),
enable_text_antialiasing: !opt_match.opt_present("disable-text-aa"),
trace_layout: trace_layout,
})
}
static mut EXPERIMENTAL_ENABLED: bool = false;
pub fn set_experimental_enabled(new_value: bool) {
unsafe {
EXPERIMENTAL_ENABLED = new_value;
}
}
pub fn experimental_enabled() -> bool {
unsafe {
EXPERIMENTAL_ENABLED
}
}

355
components/util/range.rs Normal file
View file

@ -0,0 +1,355 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::cmp::{max, min};
use std::iter;
use std::fmt;
use std::num;
use std::num::{Bounded, Zero};
/// An index type to be used by a `Range`
pub trait RangeIndex: Copy
+ Clone
+ fmt::Show
+ PartialEq
+ PartialOrd
+ Eq
+ Ord
+ Add<Self, Self>
+ Sub<Self, Self>
+ Neg<Self>
+ Zero {}
pub trait IntRangeIndex<T>: RangeIndex + Copy {
fn new(x: T) -> Self;
fn get(self) -> T;
}
impl RangeIndex for int {}
impl IntRangeIndex<int> for int {
#[inline]
fn new(x: int) -> int { x }
#[inline]
fn get(self) -> int { self }
}
/// Implements a range index type with operator overloads
#[macro_export]
macro_rules! int_range_index {
($(#[$attr:meta])* struct $Self:ident($T:ty)) => (
#[deriving(Clone, PartialEq, PartialOrd, Eq, Ord, Show, Zero)]
$(#[$attr])*
pub struct $Self(pub $T);
impl $Self {
#[inline]
pub fn to_uint(self) -> uint {
self.get() as uint
}
}
impl RangeIndex for $Self {}
impl IntRangeIndex<$T> for $Self {
#[inline]
fn new(x: $T) -> $Self {
$Self(x)
}
#[inline]
fn get(self) -> $T {
match self { $Self(x) => x }
}
}
impl Add<$Self, $Self> for $Self {
#[inline]
fn add(&self, other: &$Self) -> $Self {
$Self(self.get() + other.get())
}
}
impl Sub<$Self, $Self> for $Self {
#[inline]
fn sub(&self, other: &$Self) -> $Self {
$Self(self.get() - other.get())
}
}
impl Neg<$Self> for $Self {
#[inline]
fn neg(&self) -> $Self {
$Self(-self.get())
}
}
)
}
#[deriving(Show)]
pub enum RangeRelation<I> {
OverlapsBegin(/* overlap */ I),
OverlapsEnd(/* overlap */ I),
ContainedBy,
Contains,
Coincides,
EntirelyBefore,
EntirelyAfter
}
/// A range of indices
#[deriving(Clone, Encodable)]
pub struct Range<I> {
begin: I,
length: I,
}
impl<I: RangeIndex> fmt::Show for Range<I> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[{} .. {})", self.begin(), self.end())
}
}
/// An iterator over each index in a range
pub struct EachIndex<T, I> {
it: iter::Range<T>,
}
pub fn each_index<T: Int, I: IntRangeIndex<T>>(start: I, stop: I) -> EachIndex<T, I> {
EachIndex { it: iter::range(start.get(), stop.get()) }
}
impl<T: Int, I: IntRangeIndex<T>> Iterator<I> for EachIndex<T, I> {
#[inline]
fn next(&mut self) -> Option<I> {
self.it.next().map(|i| IntRangeIndex::new(i))
}
#[inline]
fn size_hint(&self) -> (uint, Option<uint>) {
self.it.size_hint()
}
}
impl<I: RangeIndex> Range<I> {
/// Create a new range from beginning and length offsets. This could be
/// denoted as `[begin, begin + length)`.
///
/// ~~~
/// |-- begin ->|-- length ->|
/// | | |
/// <- o - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn new(begin: I, length: I) -> Range<I> {
Range { begin: begin, length: length }
}
#[inline]
pub fn empty() -> Range<I> {
Range::new(num::zero(), num::zero())
}
/// The index offset to the beginning of the range.
///
/// ~~~
/// |-- begin ->|
/// | |
/// <- o - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn begin(&self) -> I { self.begin }
/// The index offset from the beginning to the end of the range.
///
/// ~~~
/// |-- length ->|
/// | |
/// <- o - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn length(&self) -> I { self.length }
/// The index offset to the end of the range.
///
/// ~~~
/// |--------- end --------->|
/// | |
/// <- o - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn end(&self) -> I { self.begin + self.length }
/// `true` if the index is between the beginning and the end of the range.
///
/// ~~~
/// false true false
/// | | |
/// <- o - - + - - +=====+======+ - + - ->
/// ~~~
#[inline]
pub fn contains(&self, i: I) -> bool {
i >= self.begin() && i < self.end()
}
/// `true` if the offset from the beginning to the end of the range is zero.
#[inline]
pub fn is_empty(&self) -> bool {
self.length().is_zero()
}
/// Shift the entire range by the supplied index delta.
///
/// ~~~
/// |-- delta ->|
/// | |
/// <- o - +============+ - - - - - | - - - ->
/// |
/// <- o - - - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn shift_by(&mut self, delta: I) {
self.begin = self.begin + delta;
}
/// Extend the end of the range by the supplied index delta.
///
/// ~~~
/// |-- delta ->|
/// | |
/// <- o - - - - - +====+ - - - - - | - - - ->
/// |
/// <- o - - - - - +================+ - - - ->
/// ~~~
#[inline]
pub fn extend_by(&mut self, delta: I) {
self.length = self.length + delta;
}
/// Move the end of the range to the target index.
///
/// ~~~
/// target
/// |
/// <- o - - - - - +====+ - - - - - | - - - ->
/// |
/// <- o - - - - - +================+ - - - ->
/// ~~~
#[inline]
pub fn extend_to(&mut self, target: I) {
self.length = target - self.begin;
}
/// Adjust the beginning offset and the length by the supplied deltas.
#[inline]
pub fn adjust_by(&mut self, begin_delta: I, length_delta: I) {
self.begin = self.begin + begin_delta;
self.length = self.length + length_delta;
}
/// Set the begin and length values.
#[inline]
pub fn reset(&mut self, begin: I, length: I) {
self.begin = begin;
self.length = length;
}
#[inline]
pub fn intersect(&self, other: &Range<I>) -> Range<I> {
let begin = max(self.begin(), other.begin());
let end = min(self.end(), other.end());
if end < begin {
Range::empty()
} else {
Range::new(begin, end - begin)
}
}
/// Computes the relationship between two ranges (`self` and `other`),
/// from the point of view of `self`. So, 'EntirelyBefore' means
/// that the `self` range is entirely before `other` range.
#[inline]
pub fn relation_to_range(&self, other: &Range<I>) -> RangeRelation<I> {
if other.begin() > self.end() {
return EntirelyBefore;
}
if self.begin() > other.end() {
return EntirelyAfter;
}
if self.begin() == other.begin() && self.end() == other.end() {
return Coincides;
}
if self.begin() <= other.begin() && self.end() >= other.end() {
return Contains;
}
if self.begin() >= other.begin() && self.end() <= other.end() {
return ContainedBy;
}
if self.begin() < other.begin() && self.end() < other.end() {
let overlap = self.end() - other.begin();
return OverlapsBegin(overlap);
}
if self.begin() > other.begin() && self.end() > other.end() {
let overlap = other.end() - self.begin();
return OverlapsEnd(overlap);
}
fail!("relation_to_range(): didn't classify self={}, other={}",
self, other);
}
}
/// Methods for `Range`s with indices based on integer values
impl<T: Int, I: IntRangeIndex<T>> Range<I> {
/// Returns an iterater that increments over `[begin, end)`.
#[inline]
pub fn each_index(&self) -> EachIndex<T, I> {
each_index(self.begin(), self.end())
}
#[inline]
pub fn is_valid_for_string(&self, s: &str) -> bool {
let s_len = s.len();
match num::cast::<uint, T>(s_len) {
Some(len) => {
let len = IntRangeIndex::new(len);
self.begin() < len
&& self.end() <= len
&& self.length() <= len
},
None => {
debug!("Range<T>::is_valid_for_string: string length (len={}) is longer than the \
max value for the range index (max={})", s_len,
{
let max: T = Bounded::max_value();
let val: I = IntRangeIndex::new(max);
val
});
false
},
}
}
#[inline]
pub fn repair_after_coalesced_range(&mut self, other: &Range<I>) {
let relation = self.relation_to_range(other);
debug!("repair_after_coalesced_range: possibly repairing range {}", *self);
debug!("repair_after_coalesced_range: relation of original range and coalesced range {}: {}",
*other, relation);
let _1: I = IntRangeIndex::new(num::one::<T>());
match relation {
EntirelyBefore => {},
EntirelyAfter => { self.shift_by(-other.length()); },
Coincides | ContainedBy => { self.reset(other.begin(), _1); },
Contains => { self.extend_by(-other.length()); },
OverlapsBegin(overlap) => { self.extend_by(_1 - overlap); },
OverlapsEnd(overlap) => {
let len = self.length() - overlap + _1;
self.reset(other.begin(), len);
}
};
debug!("repair_after_coalesced_range: new range: ---- {}", *self);
}
}

530
components/util/smallvec.rs Normal file
View file

@ -0,0 +1,530 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Small vectors in various sizes. These store a certain number of elements inline and fall back
//! to the heap for larger allocations.
use i = std::mem::init;
use std::cmp;
use std::intrinsics;
use std::kinds::marker::ContravariantLifetime;
use std::mem;
use std::ptr;
use std::raw::Slice;
use rustrt::local_heap;
use alloc::heap;
// Generic code for all small vectors
pub trait VecLike<T> {
fn vec_len(&self) -> uint;
fn vec_push(&mut self, value: T);
fn vec_mut_slice<'a>(&'a mut self, start: uint, end: uint) -> &'a mut [T];
#[inline]
fn vec_mut_slice_from<'a>(&'a mut self, start: uint) -> &'a mut [T] {
let len = self.vec_len();
self.vec_mut_slice(start, len)
}
}
impl<T> VecLike<T> for Vec<T> {
#[inline]
fn vec_len(&self) -> uint {
self.len()
}
#[inline]
fn vec_push(&mut self, value: T) {
self.push(value);
}
#[inline]
fn vec_mut_slice<'a>(&'a mut self, start: uint, end: uint) -> &'a mut [T] {
self.mut_slice(start, end)
}
}
trait SmallVecPrivate<T> {
unsafe fn set_len(&mut self, new_len: uint);
unsafe fn set_cap(&mut self, new_cap: uint);
fn data(&self, index: uint) -> *const T;
fn mut_data(&mut self, index: uint) -> *mut T;
unsafe fn ptr(&self) -> *const T;
unsafe fn mut_ptr(&mut self) -> *mut T;
unsafe fn set_ptr(&mut self, new_ptr: *mut T);
}
pub trait SmallVec<T> : SmallVecPrivate<T> {
fn inline_size(&self) -> uint;
fn len(&self) -> uint;
fn cap(&self) -> uint;
fn spilled(&self) -> bool {
self.cap() > self.inline_size()
}
fn begin(&self) -> *const T {
unsafe {
if self.spilled() {
self.ptr()
} else {
self.data(0)
}
}
}
fn end(&self) -> *const T {
unsafe {
self.begin().offset(self.len() as int)
}
}
fn iter<'a>(&'a self) -> SmallVecIterator<'a,T> {
SmallVecIterator {
ptr: self.begin(),
end: self.end(),
lifetime: ContravariantLifetime::<'a>,
}
}
fn mut_iter<'a>(&'a mut self) -> SmallVecMutIterator<'a,T> {
unsafe {
SmallVecMutIterator {
ptr: mem::transmute(self.begin()),
end: mem::transmute(self.end()),
lifetime: ContravariantLifetime::<'a>,
}
}
}
/// NB: For efficiency reasons (avoiding making a second copy of the inline elements), this
/// actually clears out the original array instead of moving it.
fn move_iter<'a>(&'a mut self) -> SmallVecMoveIterator<'a,T> {
unsafe {
let iter = mem::transmute(self.iter());
let ptr_opt = if self.spilled() {
Some(mem::transmute(self.ptr()))
} else {
None
};
let inline_size = self.inline_size();
self.set_cap(inline_size);
self.set_len(0);
SmallVecMoveIterator {
allocation: ptr_opt,
cap: inline_size,
iter: iter,
lifetime: ContravariantLifetime::<'a>,
}
}
}
fn push(&mut self, value: T) {
let cap = self.cap();
if self.len() == cap {
self.grow(cmp::max(cap * 2, 1))
}
unsafe {
let end: &mut T = mem::transmute(self.end());
ptr::write(end, value);
let len = self.len();
self.set_len(len + 1)
}
}
fn push_all_move<V:SmallVec<T>>(&mut self, mut other: V) {
for value in other.move_iter() {
self.push(value)
}
}
fn pop(&mut self) -> Option<T> {
if self.len() == 0 {
return None
}
unsafe {
let mut value: T = mem::uninitialized();
let last_index = self.len() - 1;
if (last_index as int) < 0 {
fail!("overflow")
}
let end_ptr = self.begin().offset(last_index as int);
mem::swap(&mut value, mem::transmute::<*const T,&mut T>(end_ptr));
self.set_len(last_index);
Some(value)
}
}
fn grow(&mut self, new_cap: uint) {
unsafe {
let new_alloc: *mut T = mem::transmute(heap::allocate(mem::size_of::<T>() *
new_cap,
mem::min_align_of::<T>()));
ptr::copy_nonoverlapping_memory(new_alloc, self.begin(), self.len());
if self.spilled() {
if intrinsics::owns_managed::<T>() {
local_heap::local_free(self.ptr() as *mut u8)
} else {
heap::deallocate(self.mut_ptr() as *mut u8,
mem::size_of::<T>() * self.cap(),
mem::min_align_of::<T>())
}
} else {
let mut_begin: *mut T = mem::transmute(self.begin());
intrinsics::set_memory(mut_begin, 0, self.len())
}
self.set_ptr(new_alloc);
self.set_cap(new_cap)
}
}
fn get<'a>(&'a self, index: uint) -> &'a T {
if index >= self.len() {
self.fail_bounds_check(index)
}
unsafe {
mem::transmute(self.begin().offset(index as int))
}
}
fn get_mut<'a>(&'a mut self, index: uint) -> &'a mut T {
if index >= self.len() {
self.fail_bounds_check(index)
}
unsafe {
mem::transmute(self.begin().offset(index as int))
}
}
fn slice<'a>(&'a self, start: uint, end: uint) -> &'a [T] {
assert!(start <= end);
assert!(end <= self.len());
unsafe {
mem::transmute(Slice {
data: self.begin().offset(start as int),
len: (end - start)
})
}
}
fn as_slice<'a>(&'a self) -> &'a [T] {
self.slice(0, self.len())
}
fn as_mut_slice<'a>(&'a mut self) -> &'a mut [T] {
let len = self.len();
self.mut_slice(0, len)
}
fn mut_slice<'a>(&'a mut self, start: uint, end: uint) -> &'a mut [T] {
assert!(start <= end);
assert!(end <= self.len());
unsafe {
mem::transmute(Slice {
data: self.begin().offset(start as int),
len: (end - start)
})
}
}
fn mut_slice_from<'a>(&'a mut self, start: uint) -> &'a mut [T] {
let len = self.len();
self.mut_slice(start, len)
}
fn fail_bounds_check(&self, index: uint) {
fail!("index {} beyond length ({})", index, self.len())
}
}
pub struct SmallVecIterator<'a,T> {
ptr: *const T,
end: *const T,
lifetime: ContravariantLifetime<'a>
}
impl<'a,T> Iterator<&'a T> for SmallVecIterator<'a,T> {
#[inline]
fn next(&mut self) -> Option<&'a T> {
unsafe {
if self.ptr == self.end {
return None
}
let old = self.ptr;
self.ptr = if mem::size_of::<T>() == 0 {
mem::transmute(self.ptr as uint + 1)
} else {
self.ptr.offset(1)
};
Some(mem::transmute(old))
}
}
}
pub struct SmallVecMutIterator<'a,T> {
ptr: *mut T,
end: *mut T,
lifetime: ContravariantLifetime<'a>,
}
impl<'a,T> Iterator<&'a mut T> for SmallVecMutIterator<'a,T> {
#[inline]
fn next(&mut self) -> Option<&'a mut T> {
unsafe {
if self.ptr == self.end {
return None
}
let old = self.ptr;
self.ptr = if mem::size_of::<T>() == 0 {
mem::transmute(self.ptr as uint + 1)
} else {
self.ptr.offset(1)
};
Some(mem::transmute(old))
}
}
}
pub struct SmallVecMoveIterator<'a,T> {
allocation: Option<*mut u8>,
cap: uint,
iter: SmallVecIterator<'static,T>,
lifetime: ContravariantLifetime<'a>,
}
impl<'a,T> Iterator<T> for SmallVecMoveIterator<'a,T> {
#[inline]
fn next(&mut self) -> Option<T> {
unsafe {
match self.iter.next() {
None => None,
Some(reference) => {
// Zero out the values as we go so they don't get double-freed.
let reference: &mut T = mem::transmute(reference);
Some(mem::replace(reference, mem::zeroed()))
}
}
}
}
}
#[unsafe_destructor]
impl<'a,T> Drop for SmallVecMoveIterator<'a,T> {
fn drop(&mut self) {
// Destroy the remaining elements.
for _ in *self {}
match self.allocation {
None => {}
Some(allocation) => {
unsafe {
if intrinsics::owns_managed::<T>() {
local_heap::local_free(allocation as *mut u8)
} else {
heap::deallocate(allocation as *mut u8,
mem::size_of::<T>() * self.cap,
mem::min_align_of::<T>())
}
}
}
}
}
}
// Concrete implementations
macro_rules! def_small_vector(
($name:ident, $size:expr) => (
pub struct $name<T> {
len: uint,
cap: uint,
ptr: *const T,
data: [T, ..$size],
}
impl<T> SmallVecPrivate<T> for $name<T> {
unsafe fn set_len(&mut self, new_len: uint) {
self.len = new_len
}
unsafe fn set_cap(&mut self, new_cap: uint) {
self.cap = new_cap
}
fn data(&self, index: uint) -> *const T {
let ptr: *const T = &self.data[index];
ptr
}
fn mut_data(&mut self, index: uint) -> *mut T {
let ptr: *mut T = &mut self.data[index];
ptr
}
unsafe fn ptr(&self) -> *const T {
self.ptr
}
unsafe fn mut_ptr(&mut self) -> *mut T {
mem::transmute(self.ptr)
}
unsafe fn set_ptr(&mut self, new_ptr: *mut T) {
self.ptr = mem::transmute(new_ptr)
}
}
impl<T> SmallVec<T> for $name<T> {
fn inline_size(&self) -> uint {
$size
}
fn len(&self) -> uint {
self.len
}
fn cap(&self) -> uint {
self.cap
}
}
impl<T> VecLike<T> for $name<T> {
#[inline]
fn vec_len(&self) -> uint {
self.len()
}
#[inline]
fn vec_push(&mut self, value: T) {
self.push(value);
}
#[inline]
fn vec_mut_slice<'a>(&'a mut self, start: uint, end: uint) -> &'a mut [T] {
self.mut_slice(start, end)
}
}
impl<T> $name<T> {
#[inline]
pub fn new() -> $name<T> {
unsafe {
$name {
len: 0,
cap: $size,
ptr: ptr::null(),
data: mem::zeroed(),
}
}
}
}
)
)
def_small_vector!(SmallVec1, 1)
def_small_vector!(SmallVec2, 2)
def_small_vector!(SmallVec4, 4)
def_small_vector!(SmallVec8, 8)
def_small_vector!(SmallVec16, 16)
def_small_vector!(SmallVec24, 24)
def_small_vector!(SmallVec32, 32)
macro_rules! def_small_vector_drop_impl(
($name:ident, $size:expr) => (
#[unsafe_destructor]
impl<T> Drop for $name<T> {
fn drop(&mut self) {
if !self.spilled() {
return
}
unsafe {
let ptr = self.mut_ptr();
for i in range(0, self.len()) {
*ptr.offset(i as int) = mem::uninitialized();
}
if intrinsics::owns_managed::<T>() {
local_heap::local_free(self.ptr() as *mut u8)
} else {
heap::deallocate(self.mut_ptr() as *mut u8,
mem::size_of::<T>() * self.cap(),
mem::min_align_of::<T>())
}
}
}
}
)
)
def_small_vector_drop_impl!(SmallVec1, 1)
def_small_vector_drop_impl!(SmallVec2, 2)
def_small_vector_drop_impl!(SmallVec4, 4)
def_small_vector_drop_impl!(SmallVec8, 8)
def_small_vector_drop_impl!(SmallVec16, 16)
def_small_vector_drop_impl!(SmallVec24, 24)
def_small_vector_drop_impl!(SmallVec32, 32)
macro_rules! def_small_vector_clone_impl(
($name:ident) => (
impl<T:Clone> Clone for $name<T> {
fn clone(&self) -> $name<T> {
let mut new_vector = $name::new();
for element in self.iter() {
new_vector.push((*element).clone())
}
new_vector
}
}
)
)
def_small_vector_clone_impl!(SmallVec1)
def_small_vector_clone_impl!(SmallVec2)
def_small_vector_clone_impl!(SmallVec4)
def_small_vector_clone_impl!(SmallVec8)
def_small_vector_clone_impl!(SmallVec16)
def_small_vector_clone_impl!(SmallVec24)
def_small_vector_clone_impl!(SmallVec32)
#[cfg(test)]
pub mod tests {
use smallvec::{SmallVec, SmallVec2, SmallVec16};
// We heap allocate all these strings so that double frees will show up under valgrind.
#[test]
pub fn test_inline() {
let mut v = SmallVec16::new();
v.push("hello".to_string());
v.push("there".to_string());
assert_eq!(v.as_slice(), &["hello".to_string(), "there".to_string()]);
}
#[test]
pub fn test_spill() {
let mut v = SmallVec2::new();
v.push("hello".to_string());
v.push("there".to_string());
v.push("burma".to_string());
v.push("shave".to_string());
assert_eq!(v.as_slice(), &["hello".to_string(), "there".to_string(), "burma".to_string(), "shave".to_string()]);
}
#[test]
pub fn test_double_spill() {
let mut v = SmallVec2::new();
v.push("hello".to_string());
v.push("there".to_string());
v.push("burma".to_string());
v.push("shave".to_string());
v.push("hello".to_string());
v.push("there".to_string());
v.push("burma".to_string());
v.push("shave".to_string());
assert_eq!(v.as_slice(), &[
"hello".to_string(), "there".to_string(), "burma".to_string(), "shave".to_string(), "hello".to_string(), "there".to_string(), "burma".to_string(), "shave".to_string(),
]);
}
}

101
components/util/sort.rs Normal file
View file

@ -0,0 +1,101 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! In-place sorting.
fn quicksort_helper<T>(arr: &mut [T], left: int, right: int, compare: fn(&T, &T) -> Ordering) {
if right <= left {
return
}
let mut i: int = left - 1;
let mut j: int = right;
let mut p: int = i;
let mut q: int = j;
unsafe {
let v: *mut T = &mut arr[right as uint];
loop {
i += 1;
while compare(&arr[i as uint], &*v) == Less {
i += 1
}
j -= 1;
while compare(&*v, &arr[j as uint]) == Less {
if j == left {
break
}
j -= 1;
}
if i >= j {
break
}
arr.swap(i as uint, j as uint);
if compare(&arr[i as uint], &*v) == Equal {
p += 1;
arr.swap(p as uint, i as uint)
}
if compare(&*v, &arr[j as uint]) == Equal {
q -= 1;
arr.swap(j as uint, q as uint)
}
}
}
arr.swap(i as uint, right as uint);
j = i - 1;
i += 1;
let mut k: int = left;
while k < p {
arr.swap(k as uint, j as uint);
k += 1;
j -= 1;
assert!(k < arr.len() as int);
}
k = right - 1;
while k > q {
arr.swap(i as uint, k as uint);
k -= 1;
i += 1;
assert!(k != 0);
}
quicksort_helper(arr, left, j, compare);
quicksort_helper(arr, i, right, compare);
}
/// An in-place quicksort.
///
/// The algorithm is from Sedgewick and Bentley, "Quicksort is Optimal":
/// http://www.cs.princeton.edu/~rs/talks/QuicksortIsOptimal.pdf
pub fn quicksort_by<T>(arr: &mut [T], compare: fn(&T, &T) -> Ordering) {
if arr.len() <= 1 {
return
}
let len = arr.len();
quicksort_helper(arr, 0, (len - 1) as int, compare);
}
#[cfg(test)]
pub mod test {
use std::rand;
use std::rand::Rng;
use sort;
#[test]
pub fn random() {
let mut rng = rand::task_rng();
for _ in range(0u32, 50000u32) {
let len: uint = rng.gen();
let mut v: Vec<int> = rng.gen_iter::<int>().take((len % 32) + 1).collect();
fn compare_ints(a: &int, b: &int) -> Ordering { a.cmp(b) }
sort::quicksort_by(v.as_mut_slice(), compare_ints);
for i in range(0, v.len() - 1) {
assert!(v.get(i) <= v.get(i + 1))
}
}
}
}

111
components/util/str.rs Normal file
View file

@ -0,0 +1,111 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::iter::Filter;
use std::str::CharSplits;
pub type DOMString = String;
pub type StaticCharVec = &'static [char];
pub type StaticStringVec = &'static [&'static str];
pub fn null_str_as_empty(s: &Option<DOMString>) -> DOMString {
// We don't use map_default because it would allocate "".to_string() even for Some.
match *s {
Some(ref s) => s.clone(),
None => "".to_string()
}
}
pub fn null_str_as_empty_ref<'a>(s: &'a Option<DOMString>) -> &'a str {
match *s {
Some(ref s) => s.as_slice(),
None => ""
}
}
pub fn is_whitespace(s: &str) -> bool {
s.chars().all(|c| match c {
'\u0020' | '\u0009' | '\u000D' | '\u000A' => true,
_ => false
})
}
/// A "space character" according to:
///
/// http://www.whatwg.org/specs/web-apps/current-work/multipage/common-microsyntaxes.html#
/// space-character
pub static HTML_SPACE_CHARACTERS: StaticCharVec = &[
'\u0020',
'\u0009',
'\u000a',
'\u000c',
'\u000d',
];
pub fn split_html_space_chars<'a>(s: &'a str) -> Filter<'a, &'a str, CharSplits<'a, StaticCharVec>> {
s.split(HTML_SPACE_CHARACTERS).filter(|&split| !split.is_empty())
}
/// Shared implementation to parse an integer according to
/// <http://www.whatwg.org/html/#rules-for-parsing-integers> or
/// <http://www.whatwg.org/html/#rules-for-parsing-non-negative-integers>.
fn do_parse_integer<T: Iterator<char>>(input: T) -> Option<i64> {
fn is_ascii_digit(c: &char) -> bool {
match *c {
'0'..'9' => true,
_ => false,
}
}
let mut input = input.skip_while(|c| {
HTML_SPACE_CHARACTERS.iter().any(|s| s == c)
}).peekable();
let sign = match input.peek() {
None => return None,
Some(&'-') => {
input.next();
-1
},
Some(&'+') => {
input.next();
1
},
Some(_) => 1,
};
match input.peek() {
Some(c) if is_ascii_digit(c) => (),
_ => return None,
}
let value = input.take_while(is_ascii_digit).map(|d| {
d as i64 - '0' as i64
}).fold(Some(0i64), |accumulator, d| {
accumulator.and_then(|accumulator| {
accumulator.checked_mul(&10)
}).and_then(|accumulator| {
accumulator.checked_add(&d)
})
});
return value.and_then(|value| value.checked_mul(&sign));
}
/// Parse an integer according to
/// <http://www.whatwg.org/html/#rules-for-parsing-integers>.
pub fn parse_integer<T: Iterator<char>>(input: T) -> Option<i32> {
do_parse_integer(input).and_then(|result| {
result.to_i32()
})
}
/// Parse an integer according to
/// <http://www.whatwg.org/html/#rules-for-parsing-non-negative-integers>.
pub fn parse_unsigned_integer<T: Iterator<char>>(input: T) -> Option<u32> {
do_parse_integer(input).and_then(|result| {
result.to_u32()
})
}

40
components/util/task.rs Normal file
View file

@ -0,0 +1,40 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::str::IntoMaybeOwned;
use std::task;
use std::comm::Sender;
use std::task::TaskBuilder;
use native::task::NativeTaskBuilder;
pub fn spawn_named<S: IntoMaybeOwned<'static>>(name: S, f: proc():Send) {
let builder = task::TaskBuilder::new().named(name);
builder.spawn(f);
}
/// Arrange to send a particular message to a channel if the task built by
/// this `TaskBuilder` fails.
pub fn spawn_named_with_send_on_failure<T: Send>(name: &'static str,
f: proc(): Send,
msg: T,
dest: Sender<T>,
native: bool) {
let future_result = if native {
TaskBuilder::new().named(name).native().try_future(f)
} else {
TaskBuilder::new().named(name).try_future(f)
};
let watched_name = name.to_string();
let watcher_name = format!("{:s}Watcher", watched_name);
TaskBuilder::new().named(watcher_name).spawn(proc() {
match future_result.unwrap() {
Ok(()) => (),
Err(..) => {
debug!("{:s} failed, notifying constellation", name);
dest.send(msg);
}
}
});
}

241
components/util/time.rs Normal file
View file

@ -0,0 +1,241 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Timing functions.
use std_time::precise_time_ns;
use collections::treemap::TreeMap;
use std::comm::{Sender, channel, Receiver};
use std::f64;
use std::iter::AdditiveIterator;
use std::io::timer::sleep;
use task::{spawn_named};
// front-end representation of the profiler used to communicate with the profiler
#[deriving(Clone)]
pub struct TimeProfilerChan(pub Sender<TimeProfilerMsg>);
impl TimeProfilerChan {
pub fn send(&self, msg: TimeProfilerMsg) {
let TimeProfilerChan(ref c) = *self;
c.send(msg);
}
}
pub enum TimeProfilerMsg {
/// Normal message used for reporting time
TimeMsg(TimeProfilerCategory, f64),
/// Message used to force print the profiling metrics
PrintMsg,
/// Tells the profiler to shut down.
ExitMsg,
}
#[repr(u32)]
#[deriving(PartialEq, Clone, PartialOrd, Eq, Ord)]
pub enum TimeProfilerCategory {
CompositingCategory,
LayoutQueryCategory,
LayoutPerformCategory,
LayoutStyleRecalcCategory,
LayoutSelectorMatchCategory,
LayoutTreeBuilderCategory,
LayoutDamagePropagateCategory,
LayoutMainCategory,
LayoutParallelWarmupCategory,
LayoutShapingCategory,
LayoutDispListBuildCategory,
GfxRegenAvailableFontsCategory,
RenderingDrawingCategory,
RenderingPrepBuffCategory,
RenderingCategory,
// FIXME(rust#8803): workaround for lack of CTFE function on enum types to return length
NumBuckets,
}
impl TimeProfilerCategory {
// convenience function to not have to cast every time
pub fn num_buckets() -> uint {
NumBuckets as uint
}
// enumeration of all TimeProfilerCategory types
fn empty_buckets() -> TimeProfilerBuckets {
let mut buckets = TreeMap::new();
buckets.insert(CompositingCategory, vec!());
buckets.insert(LayoutQueryCategory, vec!());
buckets.insert(LayoutPerformCategory, vec!());
buckets.insert(LayoutStyleRecalcCategory, vec!());
buckets.insert(LayoutSelectorMatchCategory, vec!());
buckets.insert(LayoutTreeBuilderCategory, vec!());
buckets.insert(LayoutMainCategory, vec!());
buckets.insert(LayoutParallelWarmupCategory, vec!());
buckets.insert(LayoutShapingCategory, vec!());
buckets.insert(LayoutDamagePropagateCategory, vec!());
buckets.insert(LayoutDispListBuildCategory, vec!());
buckets.insert(GfxRegenAvailableFontsCategory, vec!());
buckets.insert(RenderingDrawingCategory, vec!());
buckets.insert(RenderingPrepBuffCategory, vec!());
buckets.insert(RenderingCategory, vec!());
buckets
}
// some categories are subcategories of LayoutPerformCategory
// and should be printed to indicate this
pub fn format(self) -> String {
let padding = match self {
LayoutStyleRecalcCategory |
LayoutMainCategory |
LayoutDispListBuildCategory |
LayoutShapingCategory |
LayoutDamagePropagateCategory => "+ ",
LayoutParallelWarmupCategory |
LayoutSelectorMatchCategory |
LayoutTreeBuilderCategory => "| + ",
_ => ""
};
format!("{:s}{:?}", padding, self)
}
}
type TimeProfilerBuckets = TreeMap<TimeProfilerCategory, Vec<f64>>;
// back end of the profiler that handles data aggregation and performance metrics
pub struct TimeProfiler {
pub port: Receiver<TimeProfilerMsg>,
buckets: TimeProfilerBuckets,
pub last_msg: Option<TimeProfilerMsg>,
}
impl TimeProfiler {
pub fn create(period: Option<f64>) -> TimeProfilerChan {
let (chan, port) = channel();
match period {
Some(period) => {
let period = (period * 1000f64) as u64;
let chan = chan.clone();
spawn_named("Time profiler timer", proc() {
loop {
sleep(period);
if chan.send_opt(PrintMsg).is_err() {
break;
}
}
});
// Spawn the time profiler.
spawn_named("Time profiler", proc() {
let mut profiler = TimeProfiler::new(port);
profiler.start();
});
}
None => {
// No-op to handle messages when the time profiler is inactive.
spawn_named("Time profiler", proc() {
loop {
match port.recv_opt() {
Err(_) | Ok(ExitMsg) => break,
_ => {}
}
}
});
}
}
TimeProfilerChan(chan)
}
pub fn new(port: Receiver<TimeProfilerMsg>) -> TimeProfiler {
TimeProfiler {
port: port,
buckets: TimeProfilerCategory::empty_buckets(),
last_msg: None,
}
}
pub fn start(&mut self) {
loop {
let msg = self.port.recv_opt();
match msg {
Ok(msg) => {
if !self.handle_msg(msg) {
break
}
}
_ => break
}
}
}
fn handle_msg(&mut self, msg: TimeProfilerMsg) -> bool {
match msg {
TimeMsg(category, t) => self.buckets.find_mut(&category).unwrap().push(t),
PrintMsg => match self.last_msg {
// only print if more data has arrived since the last printout
Some(TimeMsg(..)) => self.print_buckets(),
_ => ()
},
ExitMsg => return false,
};
self.last_msg = Some(msg);
true
}
fn print_buckets(&mut self) {
println!("{:39s} {:15s} {:15s} {:15s} {:15s} {:15s}",
"_category_", "_mean (ms)_", "_median (ms)_",
"_min (ms)_", "_max (ms)_", "_bucket size_");
for (category, data) in self.buckets.mut_iter() {
data.sort_by(|a, b| {
if a < b {
Less
} else {
Greater
}
});
let data_len = data.len();
if data_len > 0 {
let (mean, median, min, max) =
(data.iter().map(|&x|x).sum() / (data_len as f64),
(*data)[data_len / 2],
data.iter().fold(f64::INFINITY, |a, &b| a.min(b)),
data.iter().fold(-f64::INFINITY, |a, &b| a.max(b)));
println!("{:-35s}: {:15.4f} {:15.4f} {:15.4f} {:15.4f} {:15u}",
category.format(), mean, median, min, max, data_len);
}
}
println!("");
}
}
pub fn profile<T>(category: TimeProfilerCategory,
time_profiler_chan: TimeProfilerChan,
callback: || -> T)
-> T {
let start_time = precise_time_ns();
let val = callback();
let end_time = precise_time_ns();
let ms = (end_time - start_time) as f64 / 1000000f64;
time_profiler_chan.send(TimeMsg(category, ms));
return val;
}
pub fn time<T>(msg: &str, callback: || -> T) -> T{
let start_time = precise_time_ns();
let val = callback();
let end_time = precise_time_ns();
let ms = (end_time - start_time) as f64 / 1000000f64;
if ms >= 5f64 {
debug!("{:s} took {} ms", msg, ms);
}
return val;
}
// ensure that the order of the buckets matches the order of the enum categories
#[test]
fn check_order() {
let buckets = TimeProfilerCategory::empty_buckets();
assert!(buckets.len() == NumBuckets as uint);
}

124
components/util/vec.rs Normal file
View file

@ -0,0 +1,124 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::cmp::{PartialOrd, PartialEq};
/// FIXME(pcwalton): Workaround for lack of unboxed closures. This is called in
/// performance-critical code, so a closure is insufficient.
pub trait Comparator<K,T> {
fn compare(&self, key: &K, value: &T) -> Ordering;
}
pub trait BinarySearchMethods<'a, T: Ord + PartialOrd + PartialEq> {
fn binary_search(&self, key: &T) -> Option<&'a T>;
fn binary_search_index(&self, key: &T) -> Option<uint>;
}
pub trait FullBinarySearchMethods<T> {
fn binary_search_index_by<K,C:Comparator<K,T>>(&self, key: &K, cmp: C) -> Option<uint>;
}
impl<'a, T: Ord + PartialOrd + PartialEq> BinarySearchMethods<'a, T> for &'a [T] {
fn binary_search(&self, key: &T) -> Option<&'a T> {
self.binary_search_index(key).map(|i| &self[i])
}
fn binary_search_index(&self, key: &T) -> Option<uint> {
self.binary_search_index_by(key, DefaultComparator)
}
}
impl<'a, T> FullBinarySearchMethods<T> for &'a [T] {
fn binary_search_index_by<K,C:Comparator<K,T>>(&self, key: &K, cmp: C) -> Option<uint> {
if self.len() == 0 {
return None;
}
let mut low : int = 0;
let mut high : int = (self.len() as int) - 1;
while low <= high {
// http://googleresearch.blogspot.com/2006/06/extra-extra-read-all-about-it-nearly.html
let mid = ((low as uint) + (high as uint)) >> 1;
let midv = &self[mid];
match cmp.compare(key, midv) {
Greater => low = (mid as int) + 1,
Less => high = (mid as int) - 1,
Equal => return Some(mid),
}
}
return None;
}
}
struct DefaultComparator;
impl<T:PartialEq + PartialOrd + Ord> Comparator<T,T> for DefaultComparator {
fn compare(&self, key: &T, value: &T) -> Ordering {
(*key).cmp(value)
}
}
#[cfg(test)]
fn test_find_all_elems<T: PartialEq + PartialOrd + Eq + Ord>(arr: &[T]) {
let mut i = 0;
while i < arr.len() {
assert!(test_match(&arr[i], arr.binary_search(&arr[i])));
i += 1;
}
}
#[cfg(test)]
fn test_miss_all_elems<T: PartialEq + PartialOrd + Eq + Ord>(arr: &[T], misses: &[T]) {
let mut i = 0;
while i < misses.len() {
let res = arr.binary_search(&misses[i]);
debug!("{:?} == {:?} ?", misses[i], res);
assert!(!test_match(&misses[i], arr.binary_search(&misses[i])));
i += 1;
}
}
#[cfg(test)]
fn test_match<T: PartialEq>(b: &T, a: Option<&T>) -> bool {
match a {
None => false,
Some(t) => t == b
}
}
#[test]
fn should_find_all_elements() {
let arr_odd = [1u32, 2, 4, 6, 7, 8, 9];
let arr_even = [1u32, 2, 5, 6, 7, 8, 9, 42];
let arr_double = [1u32, 1, 2, 2, 6, 8, 22];
let arr_one = [234986325u32];
let arr_two = [3044u32, 8393];
let arr_three = [12u32, 23, 34];
test_find_all_elems(arr_odd);
test_find_all_elems(arr_even);
test_find_all_elems(arr_double);
test_find_all_elems(arr_one);
test_find_all_elems(arr_two);
test_find_all_elems(arr_three);
}
#[test]
fn should_not_find_missing_elements() {
let arr_odd = [1u32, 2, 4, 6, 7, 8, 9];
let arr_even = [1u32, 2, 5, 6, 7, 8, 9, 42];
let arr_double = [1u32, 1, 2, 2, 6, 8, 22];
let arr_one = [234986325u32];
let arr_two = [3044u32, 8393];
let arr_three = [12u32, 23, 34];
test_miss_all_elems(arr_odd, [-22, 0, 3, 5, 34938, 10, 11, 12]);
test_miss_all_elems(arr_even, [-1, 0, 3, 34938, 10, 11, 12]);
test_miss_all_elems(arr_double, [-1, 0, 3, 4, 34938, 10, 11, 12, 234, 234, 33]);
test_miss_all_elems(arr_one, [-1, 0, 3, 34938, 10, 11, 12, 234, 234, 33]);
test_miss_all_elems(arr_two, [-1, 0, 3, 34938, 10, 11, 12, 234, 234, 33]);
test_miss_all_elems(arr_three, [-2, 0, 1, 2, 3, 34938, 10, 11, 234, 234, 33]);
}

View file

@ -0,0 +1,291 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A work queue for scheduling units of work across threads in a fork-join fashion.
//!
//! Data associated with queues is simply a pair of unsigned integers. It is expected that a
//! higher-level API on top of this could allow safe fork-join parallelism.
use native::task::NativeTaskBuilder;
use rand::{Rng, XorShiftRng};
use std::mem;
use std::rand::weak_rng;
use std::sync::atomics::{AtomicUint, SeqCst};
use std::sync::deque::{Abort, BufferPool, Data, Empty, Stealer, Worker};
use std::task::TaskBuilder;
/// A unit of work.
///
/// # Type parameters
///
/// - `QueueData`: global custom data for the entire work queue.
/// - `WorkData`: custom data specific to each unit of work.
pub struct WorkUnit<QueueData, WorkData> {
/// The function to execute.
pub fun: extern "Rust" fn(WorkData, &mut WorkerProxy<QueueData, WorkData>),
/// Arbitrary data.
pub data: WorkData,
}
/// Messages from the supervisor to the worker.
enum WorkerMsg<QueueData, WorkData> {
/// Tells the worker to start work.
StartMsg(Worker<WorkUnit<QueueData, WorkData>>, *mut AtomicUint, *const QueueData),
/// Tells the worker to stop. It can be restarted again with a `StartMsg`.
StopMsg,
/// Tells the worker thread to terminate.
ExitMsg,
}
/// Messages to the supervisor.
enum SupervisorMsg<QueueData, WorkData> {
FinishedMsg,
ReturnDequeMsg(uint, Worker<WorkUnit<QueueData, WorkData>>),
}
/// Information that the supervisor thread keeps about the worker threads.
struct WorkerInfo<QueueData, WorkData> {
/// The communication channel to the workers.
chan: Sender<WorkerMsg<QueueData, WorkData>>,
/// The worker end of the deque, if we have it.
deque: Option<Worker<WorkUnit<QueueData, WorkData>>>,
/// The thief end of the work-stealing deque.
thief: Stealer<WorkUnit<QueueData, WorkData>>,
}
/// Information specific to each worker thread that the thread keeps.
struct WorkerThread<QueueData, WorkData> {
/// The index of this worker.
index: uint,
/// The communication port from the supervisor.
port: Receiver<WorkerMsg<QueueData, WorkData>>,
/// The communication channel on which messages are sent to the supervisor.
chan: Sender<SupervisorMsg<QueueData, WorkData>>,
/// The thief end of the work-stealing deque for all other workers.
other_deques: Vec<Stealer<WorkUnit<QueueData, WorkData>>>,
/// The random number generator for this worker.
rng: XorShiftRng,
}
static SPIN_COUNT: uint = 1000;
impl<QueueData: Send, WorkData: Send> WorkerThread<QueueData, WorkData> {
/// The main logic. This function starts up the worker and listens for
/// messages.
fn start(&mut self) {
loop {
// Wait for a start message.
let (mut deque, ref_count, queue_data) = match self.port.recv() {
StartMsg(deque, ref_count, queue_data) => (deque, ref_count, queue_data),
StopMsg => fail!("unexpected stop message"),
ExitMsg => return,
};
// We're off!
//
// FIXME(pcwalton): Can't use labeled break or continue cross-crate due to a Rust bug.
loop {
// FIXME(pcwalton): Nasty workaround for the lack of labeled break/continue
// cross-crate.
let mut work_unit = unsafe {
mem::uninitialized()
};
match deque.pop() {
Some(work) => work_unit = work,
None => {
// Become a thief.
let mut i = 0;
let mut should_continue = true;
loop {
let victim = (self.rng.next_u32() as uint) % self.other_deques.len();
match self.other_deques.get_mut(victim).steal() {
Empty | Abort => {
// Continue.
}
Data(work) => {
work_unit = work;
break
}
}
if i == SPIN_COUNT {
match self.port.try_recv() {
Ok(StopMsg) => {
should_continue = false;
break
}
Ok(ExitMsg) => return,
Ok(_) => fail!("unexpected message"),
_ => {}
}
i = 0
} else {
i += 1
}
}
if !should_continue {
break
}
}
}
// At this point, we have some work. Perform it.
let mut proxy = WorkerProxy {
worker: &mut deque,
ref_count: ref_count,
queue_data: queue_data,
};
(work_unit.fun)(work_unit.data, &mut proxy);
// The work is done. Now decrement the count of outstanding work items. If this was
// the last work unit in the queue, then send a message on the channel.
unsafe {
if (*ref_count).fetch_sub(1, SeqCst) == 1 {
self.chan.send(FinishedMsg)
}
}
}
// Give the deque back to the supervisor.
self.chan.send(ReturnDequeMsg(self.index, deque))
}
}
}
/// A handle to the work queue that individual work units have.
pub struct WorkerProxy<'a, QueueData, WorkData> {
worker: &'a mut Worker<WorkUnit<QueueData, WorkData>>,
ref_count: *mut AtomicUint,
queue_data: *const QueueData,
}
impl<'a, QueueData, WorkData: Send> WorkerProxy<'a, QueueData, WorkData> {
/// Enqueues a block into the work queue.
#[inline]
pub fn push(&mut self, work_unit: WorkUnit<QueueData, WorkData>) {
unsafe {
drop((*self.ref_count).fetch_add(1, SeqCst));
}
self.worker.push(work_unit);
}
/// Retrieves the queue user data.
#[inline]
pub fn user_data<'a>(&'a self) -> &'a QueueData {
unsafe {
mem::transmute(self.queue_data)
}
}
}
/// A work queue on which units of work can be submitted.
pub struct WorkQueue<QueueData, WorkData> {
/// Information about each of the workers.
workers: Vec<WorkerInfo<QueueData, WorkData>>,
/// A port on which deques can be received from the workers.
port: Receiver<SupervisorMsg<QueueData, WorkData>>,
/// The amount of work that has been enqueued.
work_count: uint,
/// Arbitrary user data.
pub data: QueueData,
}
impl<QueueData: Send, WorkData: Send> WorkQueue<QueueData, WorkData> {
/// Creates a new work queue and spawns all the threads associated with
/// it.
pub fn new(task_name: &'static str, thread_count: uint, user_data: QueueData) -> WorkQueue<QueueData, WorkData> {
// Set up data structures.
let (supervisor_chan, supervisor_port) = channel();
let (mut infos, mut threads) = (vec!(), vec!());
for i in range(0, thread_count) {
let (worker_chan, worker_port) = channel();
let pool = BufferPool::new();
let (worker, thief) = pool.deque();
infos.push(WorkerInfo {
chan: worker_chan,
deque: Some(worker),
thief: thief,
});
threads.push(WorkerThread {
index: i,
port: worker_port,
chan: supervisor_chan.clone(),
other_deques: vec!(),
rng: weak_rng(),
});
}
// Connect workers to one another.
for i in range(0, thread_count) {
for j in range(0, thread_count) {
if i != j {
threads.get_mut(i).other_deques.push(infos[j].thief.clone())
}
}
assert!(threads.get(i).other_deques.len() == thread_count - 1)
}
// Spawn threads.
for thread in threads.move_iter() {
TaskBuilder::new().named(task_name).native().spawn(proc() {
let mut thread = thread;
thread.start()
})
}
WorkQueue {
workers: infos,
port: supervisor_port,
work_count: 0,
data: user_data,
}
}
/// Enqueues a block into the work queue.
#[inline]
pub fn push(&mut self, work_unit: WorkUnit<QueueData, WorkData>) {
match self.workers.get_mut(0).deque {
None => {
fail!("tried to push a block but we don't have the deque?!")
}
Some(ref mut deque) => deque.push(work_unit),
}
self.work_count += 1
}
/// Synchronously runs all the enqueued tasks and waits for them to complete.
pub fn run(&mut self) {
// Tell the workers to start.
let mut work_count = AtomicUint::new(self.work_count);
for worker in self.workers.mut_iter() {
worker.chan.send(StartMsg(worker.deque.take_unwrap(), &mut work_count, &self.data))
}
// Wait for the work to finish.
drop(self.port.recv());
self.work_count = 0;
// Tell everyone to stop.
for worker in self.workers.iter() {
worker.chan.send(StopMsg)
}
// Get our deques back.
for _ in range(0, self.workers.len()) {
match self.port.recv() {
ReturnDequeMsg(index, deque) => self.workers.get_mut(index).deque = Some(deque),
FinishedMsg => fail!("unexpected finished message!"),
}
}
}
pub fn shutdown(&mut self) {
for worker in self.workers.iter() {
worker.chan.send(ExitMsg)
}
}
}