auto merge of #5416 : Ms2ger/servo/int, r=jdm

This commit is contained in:
bors-servo 2015-03-28 13:58:02 -06:00
commit 674e52afa1
17 changed files with 64 additions and 70 deletions

View file

@ -93,7 +93,7 @@ pub struct IOCompositor<Window: WindowMethods> {
shutdown_state: ShutdownState,
/// Tracks outstanding paint_msg's sent to the paint tasks.
outstanding_paint_msgs: uint,
outstanding_paint_msgs: u32,
/// Tracks the last composite time.
last_composite_time: u64,
@ -463,7 +463,7 @@ impl<Window: WindowMethods> IOCompositor<Window> {
self.has_paint_msg_tracking() && self.outstanding_paint_msgs > 0
}
fn add_outstanding_paint_msg(&mut self, count: uint) {
fn add_outstanding_paint_msg(&mut self, count: u32) {
// return early if not tracking paint_msg's
if !self.has_paint_msg_tracking() {
return;

View file

@ -1199,7 +1199,7 @@ impl ScaledFontExtensionMethods for ScaledFont {
let mut origin = baseline_origin.clone();
let mut azglyphs = vec!();
azglyphs.reserve(range.length().to_uint());
azglyphs.reserve(range.length().to_usize());
for slice in run.natural_word_slices_in_range(range) {
for (_i, glyph) in slice.glyphs.iter_glyphs_for_char_range(&slice.range) {

View file

@ -455,7 +455,7 @@ pub enum GlyphInfo<'a> {
impl<'a> GlyphInfo<'a> {
pub fn id(self) -> GlyphId {
match self {
GlyphInfo::Simple(store, entry_i) => store.entry_buffer[entry_i.to_uint()].id(),
GlyphInfo::Simple(store, entry_i) => store.entry_buffer[entry_i.to_usize()].id(),
GlyphInfo::Detail(store, entry_i, detail_j) => {
store.detail_store.get_detailed_glyph_with_index(entry_i, detail_j).id
}
@ -466,7 +466,7 @@ impl<'a> GlyphInfo<'a> {
// FIXME: Resolution conflicts with IteratorUtil trait so adding trailing _
pub fn advance(self) -> Au {
match self {
GlyphInfo::Simple(store, entry_i) => store.entry_buffer[entry_i.to_uint()].advance(),
GlyphInfo::Simple(store, entry_i) => store.entry_buffer[entry_i.to_usize()].advance(),
GlyphInfo::Detail(store, entry_i, detail_j) => {
store.detail_store.get_detailed_glyph_with_index(entry_i, detail_j).advance
}
@ -575,13 +575,13 @@ impl<'a> GlyphStore {
};
// FIXME(pcwalton): Is this necessary? I think it's a no-op.
entry = entry.adapt_character_flags_of_entry(self.entry_buffer[i.to_uint()]);
entry = entry.adapt_character_flags_of_entry(self.entry_buffer[i.to_usize()]);
if character == Some(' ') {
entry = entry.set_char_is_space()
}
self.entry_buffer[i.to_uint()] = entry;
self.entry_buffer[i.to_usize()] = entry;
}
pub fn add_glyphs_for_char_index(&mut self, i: CharIndex, data_for_glyphs: &[GlyphData]) {
@ -605,11 +605,11 @@ impl<'a> GlyphStore {
first_glyph_data.ligature_start,
glyph_count)
}
}.adapt_character_flags_of_entry(self.entry_buffer[i.to_uint()]);
}.adapt_character_flags_of_entry(self.entry_buffer[i.to_usize()]);
debug!("Adding multiple glyphs[idx={:?}, count={}]: {:?}", i, glyph_count, entry);
self.entry_buffer[i.to_uint()] = entry;
self.entry_buffer[i.to_usize()] = entry;
}
// used when a character index has no associated glyph---for example, a ligature continuation.
@ -619,7 +619,7 @@ impl<'a> GlyphStore {
let entry = GlyphEntry::complex(cluster_start, ligature_start, 0);
debug!("adding spacer for chracter without associated glyph[idx={:?}]", i);
self.entry_buffer[i.to_uint()] = entry;
self.entry_buffer[i.to_usize()] = entry;
}
pub fn iter_glyphs_for_char_index(&'a self, i: CharIndex) -> GlyphIterator<'a> {
@ -652,57 +652,57 @@ impl<'a> GlyphStore {
// getter methods
pub fn char_is_space(&self, i: CharIndex) -> bool {
assert!(i < self.char_len());
self.entry_buffer[i.to_uint()].char_is_space()
self.entry_buffer[i.to_usize()].char_is_space()
}
pub fn char_is_tab(&self, i: CharIndex) -> bool {
assert!(i < self.char_len());
self.entry_buffer[i.to_uint()].char_is_tab()
self.entry_buffer[i.to_usize()].char_is_tab()
}
pub fn char_is_newline(&self, i: CharIndex) -> bool {
assert!(i < self.char_len());
self.entry_buffer[i.to_uint()].char_is_newline()
self.entry_buffer[i.to_usize()].char_is_newline()
}
pub fn is_ligature_start(&self, i: CharIndex) -> bool {
assert!(i < self.char_len());
self.entry_buffer[i.to_uint()].is_ligature_start()
self.entry_buffer[i.to_usize()].is_ligature_start()
}
pub fn is_cluster_start(&self, i: CharIndex) -> bool {
assert!(i < self.char_len());
self.entry_buffer[i.to_uint()].is_cluster_start()
self.entry_buffer[i.to_usize()].is_cluster_start()
}
pub fn can_break_before(&self, i: CharIndex) -> BreakType {
assert!(i < self.char_len());
self.entry_buffer[i.to_uint()].can_break_before()
self.entry_buffer[i.to_usize()].can_break_before()
}
// setter methods
pub fn set_char_is_space(&mut self, i: CharIndex) {
assert!(i < self.char_len());
let entry = self.entry_buffer[i.to_uint()];
self.entry_buffer[i.to_uint()] = entry.set_char_is_space();
let entry = self.entry_buffer[i.to_usize()];
self.entry_buffer[i.to_usize()] = entry.set_char_is_space();
}
pub fn set_char_is_tab(&mut self, i: CharIndex) {
assert!(i < self.char_len());
let entry = self.entry_buffer[i.to_uint()];
self.entry_buffer[i.to_uint()] = entry.set_char_is_tab();
let entry = self.entry_buffer[i.to_usize()];
self.entry_buffer[i.to_usize()] = entry.set_char_is_tab();
}
pub fn set_char_is_newline(&mut self, i: CharIndex) {
assert!(i < self.char_len());
let entry = self.entry_buffer[i.to_uint()];
self.entry_buffer[i.to_uint()] = entry.set_char_is_newline();
let entry = self.entry_buffer[i.to_usize()];
self.entry_buffer[i.to_usize()] = entry.set_char_is_newline();
}
pub fn set_can_break_before(&mut self, i: CharIndex, t: BreakType) {
assert!(i < self.char_len());
let entry = self.entry_buffer[i.to_uint()];
self.entry_buffer[i.to_uint()] = entry.set_can_break_before(t);
let entry = self.entry_buffer[i.to_usize()];
self.entry_buffer[i.to_usize()] = entry.set_can_break_before(t);
}
pub fn space_count_in_range(&self, range: &Range<CharIndex>) -> u32 {
@ -723,7 +723,7 @@ impl<'a> GlyphStore {
for index in range.each_index() {
// TODO(pcwalton): Handle spaces that are detailed glyphs -- these are uncommon but
// possible.
let entry = &mut self.entry_buffer[index.to_uint()];
let entry = &mut self.entry_buffer[index.to_usize()];
if entry.is_simple() && entry.char_is_space() {
// FIXME(pcwalton): This can overflow for very large font-sizes.
let advance =
@ -789,7 +789,7 @@ impl<'a> Iterator for GlyphIterator<'a> {
self.char_range.next().and_then(|i| {
self.char_index = i;
assert!(i < self.store.char_len());
let entry = self.store.entry_buffer[i.to_uint()];
let entry = self.store.entry_buffer[i.to_usize()];
if entry.is_simple() {
Some((self.char_index, GlyphInfo::Simple(self.store, i)))
} else {

View file

@ -1869,11 +1869,11 @@ impl Flow for BlockFlow {
self.fragment.border_box - self.fragment.style().logical_border_width()
}
fn layer_id(&self, fragment_index: uint) -> LayerId {
fn layer_id(&self, fragment_index: u32) -> LayerId {
// FIXME(#2010, pcwalton): This is a hack and is totally bogus in the presence of pseudo-
// elements. But until we have incremental reflow we can't do better--we recreate the flow
// for every DOM node so otherwise we nuke layers on every reflow.
LayerId(self.fragment.node.id() as uint, fragment_index)
LayerId(self.fragment.node.id() as usize, fragment_index)
}
fn is_absolute_containing_block(&self) -> bool {

View file

@ -311,12 +311,9 @@ pub trait Flow: fmt::Debug + Sync {
/// Returns a layer ID for the given fragment.
#[allow(unsafe_code)]
fn layer_id(&self, fragment_id: uint) -> LayerId {
unsafe {
let obj = mem::transmute::<&&Self, &raw::TraitObject>(&self);
let pointer: uint = mem::transmute(obj.data);
LayerId(pointer, fragment_id)
}
fn layer_id(&self, fragment_id: u32) -> LayerId {
let obj = unsafe { mem::transmute::<&&Self, &raw::TraitObject>(&self) };
LayerId(obj.data as usize, fragment_id)
}
/// Attempts to perform incremental fixup of this flow by replacing its fragment's style with

View file

@ -1600,8 +1600,8 @@ impl Fragment {
// FIXME(pcwalton): Is there a more clever (i.e. faster) way to do this?
if let Some(ref mut inline_end_range) = inline_end_range {
let inline_end_fragment_text =
text_fragment_info.run.text.slice_chars(inline_end_range.begin().to_uint(),
inline_end_range.end().to_uint());
text_fragment_info.run.text.slice_chars(inline_end_range.begin().to_usize(),
inline_end_range.end().to_usize());
let mut leading_whitespace_character_count = 0i;
for ch in inline_end_fragment_text.chars() {
if ch.is_whitespace() {
@ -2128,7 +2128,7 @@ pub enum CoordinateSystem {
/// if any modifications were made.
fn strip_trailing_whitespace(text_run: &TextRun, range: &mut Range<CharIndex>) -> bool {
// FIXME(pcwalton): Is there a more clever (i.e. faster) way to do this?
let text = text_run.text.slice_chars(range.begin().to_uint(), range.end().to_uint());
let text = text_run.text.slice_chars(range.begin().to_usize(), range.end().to_usize());
let mut trailing_whitespace_character_count = 0i;
for ch in text.chars().rev() {
if ch.is_whitespace() {

View file

@ -915,7 +915,7 @@ impl InlineFlow {
}
for fragment_index in range(line.range.begin(), line.range.end()) {
let fragment = fragments.get_mut(fragment_index.to_uint());
let fragment = fragments.get_mut(fragment_index.to_usize());
let size = fragment.border_box.size;
fragment.border_box = LogicalRect::new(fragment.style.writing_mode,
inline_start_position_for_fragment,
@ -940,7 +940,7 @@ impl InlineFlow {
// First, calculate the number of expansion opportunities (spaces, normally).
let mut expansion_opportunities = 0i32;
for fragment_index in line.range.each_index() {
let fragment = fragments.get(fragment_index.to_uint());
let fragment = fragments.get(fragment_index.to_usize());
let scanned_text_fragment_info =
if let SpecificFragmentInfo::ScannedText(ref info) = fragment.specific {
info
@ -957,7 +957,7 @@ impl InlineFlow {
let space_per_expansion_opportunity = slack_inline_size.to_subpx() /
(expansion_opportunities as f64);
for fragment_index in line.range.each_index() {
let fragment = fragments.get_mut(fragment_index.to_uint());
let fragment = fragments.get_mut(fragment_index.to_usize());
let mut scanned_text_fragment_info =
if let SpecificFragmentInfo::ScannedText(ref mut info) = fragment.specific {
info
@ -1004,7 +1004,7 @@ impl InlineFlow {
baseline_distance_from_block_start: Au,
largest_depth_below_baseline: Au) {
for fragment_index in range(line.range.begin(), line.range.end()) {
let fragment = fragments.get_mut(fragment_index.to_uint());
let fragment = fragments.get_mut(fragment_index.to_usize());
match fragment.vertical_align() {
vertical_align::T::top => {
fragment.border_box.start.b = fragment.border_box.start.b +
@ -1221,7 +1221,7 @@ impl Flow for InlineFlow {
mut largest_block_size_for_bottom_fragments) = (Au(0), Au(0));
for fragment_index in range(line.range.begin(), line.range.end()) {
let fragment = &mut self.fragments.fragments[fragment_index.to_uint()];
let fragment = &mut self.fragments.fragments[fragment_index.to_usize()];
let InlineMetrics {
mut block_size_above_baseline,

View file

@ -34,7 +34,7 @@ pub enum ReadyState {
/// A newtype struct for denoting the age of messages; prevents race conditions.
#[derive(PartialEq, Eq, Debug, Copy)]
pub struct Epoch(pub uint);
pub struct Epoch(pub u32);
impl Epoch {
pub fn next(&mut self) {
@ -44,7 +44,7 @@ impl Epoch {
}
#[derive(Clone, PartialEq, Eq, Copy)]
pub struct LayerId(pub uint, pub uint);
pub struct LayerId(pub usize, pub u32);
impl Debug for LayerId {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {

View file

@ -309,16 +309,16 @@ pub enum NavigationDirection {
}
#[derive(Clone, PartialEq, Eq, Copy, Hash, Debug)]
pub struct FrameId(pub uint);
pub struct FrameId(pub u32);
#[derive(Clone, PartialEq, Eq, Copy, Hash, Debug)]
pub struct WorkerId(pub uint);
pub struct WorkerId(pub u32);
#[derive(Clone, PartialEq, Eq, Copy, Hash, Debug)]
pub struct PipelineId(pub uint);
pub struct PipelineId(pub u32);
#[derive(Clone, PartialEq, Eq, Copy, Hash, Debug)]
pub struct SubpageId(pub uint);
pub struct SubpageId(pub u32);
// The type of pipeline exit. During complete shutdowns, pipelines do not have to
// release resources automatically released on process termination.

View file

@ -2,8 +2,6 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(int_uint)]
extern crate azure;
#[macro_use] extern crate bitflags;
extern crate geom;

View file

@ -119,7 +119,7 @@ pub struct Window {
parent_info: Option<(PipelineId, SubpageId)>,
/// Unique id for last reflow request; used for confirming completion reply.
last_reflow_id: Cell<uint>,
last_reflow_id: Cell<u32>,
/// Global static data related to the DOM.
dom_static: GlobalStaticData,
@ -460,7 +460,7 @@ pub trait WindowHelpers {
fn layout(&self) -> &LayoutRPC;
fn content_box_query(self, content_box_request: TrustedNodeAddress) -> Rect<Au>;
fn content_boxes_query(self, content_boxes_request: TrustedNodeAddress) -> Vec<Rect<Au>>;
fn handle_reflow_complete_msg(self, reflow_id: uint);
fn handle_reflow_complete_msg(self, reflow_id: u32);
fn handle_resize_inactive_msg(self, new_size: WindowSizeData);
fn set_fragment_name(self, fragment: Option<String>);
fn steal_fragment_name(self) -> Option<String>;
@ -631,7 +631,7 @@ impl<'a> WindowHelpers for JSRef<'a, Window> {
rects
}
fn handle_reflow_complete_msg(self, reflow_id: uint) {
fn handle_reflow_complete_msg(self, reflow_id: u32) {
let last_reflow_id = self.last_reflow_id.get();
if last_reflow_id == reflow_id {
*self.layout_join_port.borrow_mut() = None;

View file

@ -115,7 +115,7 @@ pub struct Reflow {
/// The channel that we send a notification to.
pub script_join_chan: Sender<()>,
/// Unique identifier
pub id: uint,
pub id: u32,
/// The type of query if any to perform during this reflow.
pub query_type: ReflowQueryType,
/// A clipping rectangle for the page, an enlarged rectangle containing the viewport.

View file

@ -846,7 +846,7 @@ impl ScriptTask {
}
/// Handles a notification that reflow completed.
fn handle_reflow_complete_msg(&self, pipeline_id: PipelineId, reflow_id: uint) {
fn handle_reflow_complete_msg(&self, pipeline_id: PipelineId, reflow_id: u32) {
debug!("Script: Reflow {:?} complete for {:?}", reflow_id, pipeline_id);
let page = self.root_page();
let page = page.find(pipeline_id).expect(

View file

@ -61,7 +61,7 @@ pub enum ConstellationControlMsg {
/// Sends a DOM event.
SendEvent(PipelineId, CompositorEvent),
/// Notifies script that reflow is finished.
ReflowComplete(PipelineId, uint),
ReflowComplete(PipelineId, u32),
/// Notifies script of the viewport.
Viewport(PipelineId, Rect<f32>),
/// Requests that the script task immediately send the constellation the title of a pipeline.

View file

@ -192,7 +192,7 @@ fn test_lru_cache() {
let four = Cell::new("four");
// Test normal insertion.
let mut cache: LRUCache<uint,Cell<&str>> = LRUCache::new(2); // (_, _) (cache is empty)
let mut cache: LRUCache<usize,Cell<&str>> = LRUCache::new(2); // (_, _) (cache is empty)
cache.insert(1, one); // (1, _)
cache.insert(2, two); // (1, 2)
cache.insert(3, three); // (2, 3)

View file

@ -8,7 +8,6 @@
#![feature(core)]
#![feature(exit_status)]
#![feature(hash)]
#![feature(int_uint)]
#![feature(io)]
#![feature(optin_builtin_traits)]
#![feature(path)]

View file

@ -16,13 +16,13 @@ pub trait RangeIndex: Int + fmt::Debug {
fn get(self) -> Self::Index;
}
impl RangeIndex for int {
type Index = int;
impl RangeIndex for isize {
type Index = isize;
#[inline]
fn new(x: int) -> int { x }
fn new(x: isize) -> isize { x }
#[inline]
fn get(self) -> int { self }
fn get(self) -> isize { self }
}
/// Implements a range index type with operator overloads
@ -35,8 +35,8 @@ macro_rules! int_range_index {
impl $Self_ {
#[inline]
pub fn to_uint(self) -> uint {
self.get() as uint
pub fn to_usize(self) -> usize {
self.get() as usize
}
}
@ -172,16 +172,16 @@ macro_rules! int_range_index {
}
}
impl Shl<uint> for $Self_ {
impl Shl<usize> for $Self_ {
type Output = $Self_;
fn shl(self, n: uint) -> $Self_ {
fn shl(self, n: usize) -> $Self_ {
$Self_(self.get() << n)
}
}
impl Shr<uint> for $Self_ {
impl Shr<usize> for $Self_ {
type Output = $Self_;
fn shr(self, n: uint) -> $Self_ {
fn shr(self, n: usize) -> $Self_ {
$Self_(self.get() >> n)
}
}
@ -247,7 +247,7 @@ impl<T: Int, I: RangeIndex<Index=T>> Iterator for EachIndex<T, I> {
}
#[inline]
fn size_hint(&self) -> (uint, Option<uint>) {
fn size_hint(&self) -> (usize, Option<usize>) {
self.it.size_hint()
}
}
@ -399,7 +399,7 @@ impl<T: Int, I: RangeIndex<Index=T>> Range<I> {
#[inline]
pub fn is_valid_for_string(&self, s: &str) -> bool {
let s_len = s.len();
match num::cast::<uint, T>(s_len) {
match num::cast::<usize, T>(s_len) {
Some(len) => {
let len = RangeIndex::new(len);
self.begin() < len