Timers are scheduled by a dedicated per-constellation thread.

This commit is contained in:
benshu 2015-08-30 01:45:07 +02:00
parent 674589c370
commit 553a0dbefd
21 changed files with 786 additions and 334 deletions

View file

@ -41,6 +41,7 @@ use profile_traits::mem;
use profile_traits::time;
use script_traits::{CompositorEvent, ConstellationControlMsg, LayoutControlMsg};
use script_traits::{ScriptState, ScriptTaskFactory};
use script_traits::{TimerEventRequest};
use std::borrow::ToOwned;
use std::collections::HashMap;
use std::io::{self, Write};
@ -49,6 +50,7 @@ use std::mem::replace;
use std::process;
use std::sync::mpsc::{Receiver, Sender, channel};
use style_traits::viewport::ViewportConstraints;
use timer_scheduler::TimerScheduler;
use url::Url;
use util::cursor::Cursor;
use util::geometry::PagePx;
@ -135,6 +137,8 @@ pub struct Constellation<LTF, STF> {
/// A list of in-process senders to `WebGLPaintTask`s.
webgl_paint_tasks: Vec<Sender<CanvasMsg>>,
scheduler_chan: Sender<TimerEventRequest>,
}
/// State needed to construct a constellation.
@ -280,6 +284,7 @@ impl<LTF: LayoutTaskFactory, STF: ScriptTaskFactory> Constellation<LTF, STF> {
webdriver: WebDriverData::new(),
canvas_paint_tasks: Vec::new(),
webgl_paint_tasks: Vec::new(),
scheduler_chan: TimerScheduler::start(),
};
let namespace_id = constellation.next_pipeline_namespace_id();
PipelineNamespace::install(namespace_id);
@ -317,6 +322,7 @@ impl<LTF: LayoutTaskFactory, STF: ScriptTaskFactory> Constellation<LTF, STF> {
id: pipeline_id,
parent_info: parent_info,
constellation_chan: self.chan.clone(),
scheduler_chan: self.scheduler_chan.clone(),
compositor_proxy: self.compositor_proxy.clone_compositor_proxy(),
devtools_chan: self.devtools_chan.clone(),
image_cache_task: self.image_cache_task.clone(),

View file

@ -6,6 +6,7 @@
#![feature(iter_cmp)]
#![feature(slice_bytes)]
#![feature(vec_push_all)]
#![feature(mpsc_select)]
#![feature(plugin)]
#![plugin(plugins)]
@ -54,6 +55,7 @@ mod compositor_layer;
mod headless;
mod scrolling;
mod surface_map;
mod timer_scheduler;
pub mod compositor_task;
pub mod constellation;
pub mod pipeline;

View file

@ -24,6 +24,7 @@ use profile_traits::mem as profile_mem;
use profile_traits::time;
use script_traits::{ConstellationControlMsg, InitialScriptState};
use script_traits::{LayoutControlMsg, NewLayoutInfo, ScriptTaskFactory};
use script_traits::{TimerEventRequest};
use std::any::Any;
use std::mem;
use std::sync::mpsc::{Receiver, Sender, channel};
@ -75,6 +76,8 @@ pub struct InitialPipelineState {
pub parent_info: Option<(PipelineId, SubpageId)>,
/// A channel to the associated constellation.
pub constellation_chan: ConstellationChan,
/// A channel to schedule timer events.
pub scheduler_chan: Sender<TimerEventRequest>,
/// A channel to the compositor.
pub compositor_proxy: Box<CompositorProxy + 'static + Send>,
/// A channel to the developer tools, if applicable.
@ -181,6 +184,7 @@ impl Pipeline {
id: state.id,
parent_info: state.parent_info,
constellation_chan: state.constellation_chan,
scheduler_chan: state.scheduler_chan,
compositor_proxy: state.compositor_proxy,
devtools_chan: script_to_devtools_chan,
image_cache_task: state.image_cache_task,
@ -316,6 +320,7 @@ pub struct PipelineContent {
id: PipelineId,
parent_info: Option<(PipelineId, SubpageId)>,
constellation_chan: ConstellationChan,
scheduler_chan: Sender<TimerEventRequest>,
compositor_proxy: Box<CompositorProxy + Send + 'static>,
devtools_chan: Option<IpcSender<ScriptToDevtoolsControlMsg>>,
image_cache_task: ImageCacheTask,
@ -361,6 +366,7 @@ impl PipelineContent {
control_chan: self.script_chan.clone(),
control_port: mem::replace(&mut self.script_port, None).unwrap(),
constellation_chan: self.constellation_chan.clone(),
scheduler_chan: self.scheduler_chan.clone(),
failure_info: self.failure.clone(),
resource_task: self.resource_task,
storage_task: self.storage_task.clone(),

View file

@ -0,0 +1,221 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use euclid::length::Length;
use num::traits::Saturating;
use script_traits::{MsDuration, NsDuration, precise_time_ms, precise_time_ns};
use script_traits::{TimerEvent, TimerEventRequest};
use std::cell::RefCell;
use std::cmp::{self, Ord};
use std::collections::BinaryHeap;
use std::sync::Arc;
use std::sync::atomic::{self, AtomicBool};
use std::sync::mpsc::{channel, Receiver, Select, Sender};
use std::thread::{self, spawn, Thread};
use util::task::spawn_named;
/// A quick hack to work around the removal of [`std::old_io::timer::Timer`](
/// http://doc.rust-lang.org/1.0.0-beta/std/old_io/timer/struct.Timer.html )
struct CancelableOneshotTimer {
thread: Thread,
canceled: Arc<AtomicBool>,
port: Receiver<()>,
}
impl CancelableOneshotTimer {
fn new(duration: MsDuration) -> CancelableOneshotTimer {
let (tx, rx) = channel();
let canceled = Arc::new(AtomicBool::new(false));
let canceled_clone = canceled.clone();
let thread = spawn(move || {
let due_time = precise_time_ms() + duration;
let mut park_time = duration;
loop {
thread::park_timeout_ms(park_time.get() as u32);
if canceled_clone.load(atomic::Ordering::Relaxed) {
return;
}
// park_timeout_ms does not guarantee parking for the
// given amout. We might have woken up early.
let current_time = precise_time_ms();
if current_time >= due_time {
let _ = tx.send(());
return;
}
park_time = due_time - current_time;
}
}).thread().clone();
CancelableOneshotTimer {
thread: thread,
canceled: canceled,
port: rx,
}
}
fn port(&self) -> &Receiver<()> {
&self.port
}
fn cancel(&self) {
self.canceled.store(true, atomic::Ordering::Relaxed);
self.thread.unpark();
}
}
pub struct TimerScheduler {
port: Receiver<TimerEventRequest>,
scheduled_events: RefCell<BinaryHeap<ScheduledEvent>>,
timer: RefCell<Option<CancelableOneshotTimer>>,
}
struct ScheduledEvent {
request: TimerEventRequest,
for_time: NsDuration,
}
impl Ord for ScheduledEvent {
fn cmp(&self, other: &ScheduledEvent) -> cmp::Ordering {
self.for_time.cmp(&other.for_time).reverse()
}
}
impl PartialOrd for ScheduledEvent {
fn partial_cmp(&self, other: &ScheduledEvent) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Eq for ScheduledEvent {}
impl PartialEq for ScheduledEvent {
fn eq(&self, other: &ScheduledEvent) -> bool {
self as *const ScheduledEvent == other as *const ScheduledEvent
}
}
enum Task {
HandleRequest(TimerEventRequest),
DispatchDueEvents,
}
impl TimerScheduler {
pub fn start() -> Sender<TimerEventRequest> {
let (chan, port) = channel();
let timer_scheduler = TimerScheduler {
port: port,
scheduled_events: RefCell::new(BinaryHeap::new()),
timer: RefCell::new(None),
};
spawn_named("TimerScheduler".to_owned(), move || {
timer_scheduler.run_event_loop();
});
chan
}
fn run_event_loop(&self) {
loop {
match self.receive_next_task() {
Some(Task::HandleRequest(request)) => self.handle_request(request),
Some(Task::DispatchDueEvents) => self.dispatch_due_events(),
None => break,
}
}
}
#[allow(unsafe_code)]
fn receive_next_task(&self) -> Option<Task> {
let port = &self.port;
let timer = self.timer.borrow();
let timer_port = timer.as_ref().map(|timer| timer.port());
if let Some(ref timer_port) = timer_port {
let sel = Select::new();
let mut scheduler_handle = sel.handle(port);
let mut timer_handle = sel.handle(timer_port);
unsafe {
scheduler_handle.add();
timer_handle.add();
}
let ret = sel.wait();
if ret == scheduler_handle.id() {
port.recv().ok().map(|req| Task::HandleRequest(req))
} else if ret == timer_handle.id() {
timer_port.recv().ok().map(|_| Task::DispatchDueEvents)
} else {
panic!("unexpected select result!")
}
} else {
port.recv().ok().map(|req| Task::HandleRequest(req))
}
}
fn handle_request(&self, request: TimerEventRequest) {
let TimerEventRequest(_, _, _, duration_ms) = request;
let duration_ns = Length::new(duration_ms.get() * 1000 * 1000);
let schedule_for = precise_time_ns() + duration_ns;
let previously_earliest = self.scheduled_events.borrow().peek()
.map(|scheduled| scheduled.for_time)
.unwrap_or(Length::new(u64::max_value()));
self.scheduled_events.borrow_mut().push(ScheduledEvent {
request: request,
for_time: schedule_for,
});
if schedule_for < previously_earliest {
self.start_timer_for_next_event();
}
}
fn dispatch_due_events(&self) {
let now = precise_time_ns();
{
let mut events = self.scheduled_events.borrow_mut();
while !events.is_empty() && events.peek().as_ref().unwrap().for_time <= now {
let event = events.pop().unwrap();
let TimerEventRequest(chan, source, id, _) = event.request;
let _ = chan.send(TimerEvent(source, id));
}
}
self.start_timer_for_next_event();
}
fn start_timer_for_next_event(&self) {
let events = self.scheduled_events.borrow();
let next_event = events.peek();
let mut timer = self.timer.borrow_mut();
if let Some(ref mut timer) = *timer {
timer.cancel();
}
*timer = next_event.map(|next_event| {
let delay_ns = next_event.for_time.get().saturating_sub(precise_time_ns().get());
// Round up, we'd rather be late than early…
let delay_ms = Length::new(delay_ns.saturating_add(999999) / (1000 * 1000));
CancelableOneshotTimer::new(delay_ms)
});
}
}