Use std::sync::atomic::Ordering explicitly.

This commit is contained in:
Ms2ger 2015-01-22 13:25:20 +01:00
parent 524966e3af
commit faefb27f3e
8 changed files with 22 additions and 22 deletions

View file

@ -53,7 +53,7 @@ use servo_util::opts;
use std::borrow::ToOwned;
use std::collections::DList;
use std::mem;
use std::sync::atomic::Relaxed;
use std::sync::atomic::Ordering;
use style::computed_values::{caption_side, display, empty_cells, float, list_style_position};
use style::computed_values::{position};
use style::{mod, ComputedValues};
@ -1385,7 +1385,7 @@ impl FlowConstructionUtils for FlowRef {
}
base.children.push_back(new_child);
let _ = base.parallel.children_count.fetch_add(1, Relaxed);
let _ = base.parallel.children_count.fetch_add(1, Ordering::Relaxed);
}
/// Finishes a flow. Once a flow is finished, no more child flows or fragments may be added to

View file

@ -56,7 +56,7 @@ use std::mem;
use std::fmt;
use std::iter::Zip;
use std::raw;
use std::sync::atomic::{AtomicUint, SeqCst};
use std::sync::atomic::{AtomicUint, Ordering};
use std::slice::MutItems;
use style::computed_values::{clear, empty_cells, float, position, text_align};
use style::ComputedValues;
@ -781,7 +781,7 @@ impl fmt::Show for BaseFlow {
write!(f,
"@ {}, CC {}, ADC {}",
self.position,
self.parallel.children_count.load(SeqCst),
self.parallel.children_count.load(Ordering::SeqCst),
self.abs_descendants.len())
}
}
@ -830,7 +830,7 @@ impl<E, S: Encoder<E>> Encodable<S, E> for BaseFlow {
#[unsafe_destructor]
impl Drop for BaseFlow {
fn drop(&mut self) {
if self.ref_count.load(SeqCst) != 0 {
if self.ref_count.load(Ordering::SeqCst) != 0 {
panic!("Flow destroyed before its ref count hit zero—this is unsafe!")
}
}

View file

@ -12,7 +12,7 @@ use flow;
use std::mem;
use std::ptr;
use std::raw;
use std::sync::atomic::SeqCst;
use std::sync::atomic::Ordering;
#[unsafe_no_drop_flag]
pub struct FlowRef {
@ -55,7 +55,7 @@ impl Drop for FlowRef {
if self.object.vtable.is_null() {
return
}
if flow::base(&**self).ref_count().fetch_sub(1, SeqCst) > 1 {
if flow::base(&**self).ref_count().fetch_sub(1, Ordering::SeqCst) > 1 {
return
}
let flow_ref: FlowRef = mem::replace(self, FlowRef {
@ -75,7 +75,7 @@ impl Drop for FlowRef {
impl Clone for FlowRef {
fn clone(&self) -> FlowRef {
unsafe {
drop(flow::base(self.deref()).ref_count().fetch_add(1, SeqCst));
drop(flow::base(self.deref()).ref_count().fetch_add(1, Ordering::SeqCst));
FlowRef {
object: raw::TraitObject {
vtable: self.object.vtable,

View file

@ -14,7 +14,7 @@ use serialize::json;
use std::borrow::ToOwned;
use std::cell::RefCell;
use std::io::File;
use std::sync::atomic::{AtomicUint, SeqCst, INIT_ATOMIC_UINT};
use std::sync::atomic::{AtomicUint, Ordering, INIT_ATOMIC_UINT};
thread_local!(static STATE_KEY: RefCell<Option<State>> = RefCell::new(None))
@ -96,7 +96,7 @@ impl Drop for Scope {
/// which are often reallocated but represent essentially the
/// same data.
pub fn generate_unique_debug_id() -> u16 {
unsafe { DEBUG_ID_COUNTER.fetch_add(1, SeqCst) as u16 }
unsafe { DEBUG_ID_COUNTER.fetch_add(1, Ordering::SeqCst) as u16 }
}
/// Begin a layout debug trace. If this has not been called,

View file

@ -23,7 +23,7 @@ use servo_util::time::{TimeProfilerCategory, ProfilerMetadata, TimeProfilerChan,
use servo_util::workqueue::{WorkQueue, WorkUnit, WorkerProxy};
use std::mem;
use std::ptr;
use std::sync::atomic::{AtomicInt, Relaxed, SeqCst};
use std::sync::atomic::{AtomicInt, Ordering};
#[allow(dead_code)]
fn static_assertion(node: UnsafeLayoutNode) {
@ -108,7 +108,7 @@ pub trait ParallelPreorderDomTraversal : PreorderDomTraversal {
{
let mut layout_data_ref = node.mutate_layout_data();
let layout_data = layout_data_ref.as_mut().expect("no layout data");
layout_data.data.parallel.children_count.store(child_count as int, Relaxed);
layout_data.data.parallel.children_count.store(child_count as int, Ordering::Relaxed);
}
// Possibly enqueue the children.
@ -173,7 +173,7 @@ trait ParallelPostorderDomTraversal : PostorderDomTraversal {
.data
.parallel
.children_count
.fetch_sub(1, SeqCst) == 1 {
.fetch_sub(1, Ordering::SeqCst) == 1 {
// We were the last child of our parent. Construct flows for our parent.
} else {
// Get out of here and find another node to work on.
@ -231,7 +231,7 @@ trait ParallelPostorderFlowTraversal : PostorderFlowTraversal {
let base = flow::mut_base(flow.deref_mut());
// Reset the count of children for the next layout traversal.
base.parallel.children_count.store(base.children.len() as int, Relaxed);
base.parallel.children_count.store(base.children.len() as int, Ordering::Relaxed);
// Possibly enqueue the parent.
let unsafe_parent = base.parallel.parent;
@ -245,7 +245,7 @@ trait ParallelPostorderFlowTraversal : PostorderFlowTraversal {
// on with our parent; otherwise, we've gotta wait.
let parent: &mut FlowRef = mem::transmute(&unsafe_parent);
let parent_base = flow::mut_base(parent.deref_mut());
if parent_base.parallel.children_count.fetch_sub(1, SeqCst) == 1 {
if parent_base.parallel.children_count.fetch_sub(1, Ordering::SeqCst) == 1 {
// We were the last child of our parent. Reflow our parent.
unsafe_flow = unsafe_parent
} else {

View file

@ -2,7 +2,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::sync::atomic::{AtomicUint, INIT_ATOMIC_UINT, SeqCst};
use std::sync::atomic::{AtomicUint, INIT_ATOMIC_UINT, Ordering};
use std::rc::Rc;
use std::cell::RefCell;
@ -15,7 +15,7 @@ pub fn tid() -> uint {
TASK_LOCAL_TID.with(|ref k| {
let ret =
match *k.borrow() {
None => unsafe { next_tid.fetch_add(1, SeqCst) },
None => unsafe { next_tid.fetch_add(1, Ordering::SeqCst) },
Some(x) => x,
};

View file

@ -14,7 +14,7 @@ use libc::funcs::posix88::unistd::usleep;
use rand::{Rng, XorShiftRng};
use std::mem;
use std::rand::weak_rng;
use std::sync::atomic::{AtomicUint, SeqCst};
use std::sync::atomic::{AtomicUint, Ordering};
use deque::{Abort, BufferPool, Data, Empty, Stealer, Worker};
/// A unit of work.
@ -157,7 +157,7 @@ impl<QueueData: Send, WorkData: Send> WorkerThread<QueueData, WorkData> {
// The work is done. Now decrement the count of outstanding work items. If this was
// the last work unit in the queue, then send a message on the channel.
unsafe {
if (*ref_count).fetch_sub(1, SeqCst) == 1 {
if (*ref_count).fetch_sub(1, Ordering::SeqCst) == 1 {
self.chan.send(SupervisorMsg::Finished)
}
}
@ -181,7 +181,7 @@ impl<'a, QueueData: 'static, WorkData: Send> WorkerProxy<'a, QueueData, WorkData
#[inline]
pub fn push(&mut self, work_unit: WorkUnit<QueueData, WorkData>) {
unsafe {
drop((*self.ref_count).fetch_add(1, SeqCst));
drop((*self.ref_count).fetch_add(1, Ordering::SeqCst));
}
self.worker.push(work_unit);
}

View file

@ -18,7 +18,7 @@ use libc::c_int;
use servo_util::opts;
use std::borrow::ToOwned;
use std::cell::{Cell, RefCell};
use std::sync::atomic::{AtomicInt, SeqCst};
use std::sync::atomic::{AtomicInt, Ordering};
thread_local!(pub static ID_COUNTER: AtomicInt = AtomicInt::new(0))
thread_local!(pub static BROWSERS: RefCell<Vec<CefBrowser>> = RefCell::new(vec!()))
@ -105,7 +105,7 @@ impl ServoCefBrowser {
};
let id = ID_COUNTER.with(|counter| {
counter.fetch_add(1, SeqCst)
counter.fetch_add(1, Ordering::SeqCst)
});
ServoCefBrowser {