mirror of
https://github.com/servo/servo.git
synced 2025-08-04 21:20:23 +01:00
Add a command line option (-n) to use native threading, for testing.
I added this to test the impact of green threading.
This commit is contained in:
parent
b6c9b65355
commit
e783666a19
3 changed files with 45 additions and 27 deletions
|
@ -59,6 +59,7 @@ pub extern "C" fn cef_run_message_loop() {
|
|||
headless: false,
|
||||
hard_fail: false,
|
||||
bubble_widths_separately: false,
|
||||
native_threading: false
|
||||
};
|
||||
native::start(0, 0 as **u8, proc() {
|
||||
servo::run(opts);
|
||||
|
|
|
@ -51,23 +51,15 @@ extern crate core_graphics;
|
|||
#[cfg(target_os="macos")]
|
||||
extern crate core_text;
|
||||
|
||||
#[cfg(not(test))]
|
||||
use compositing::{CompositorChan, CompositorTask};
|
||||
#[cfg(not(test))]
|
||||
use constellation::Constellation;
|
||||
#[cfg(not(test))]
|
||||
use servo_msg::constellation_msg::{ConstellationChan, InitLoadUrlMsg};
|
||||
|
||||
#[cfg(not(test))]
|
||||
use servo_net::image_cache_task::{ImageCacheTask, SyncImageCacheTask};
|
||||
#[cfg(not(test))]
|
||||
use servo_net::resource_task::ResourceTask;
|
||||
#[cfg(not(test))]
|
||||
use servo_util::time::Profiler;
|
||||
|
||||
#[cfg(not(test))]
|
||||
use servo_util::opts;
|
||||
#[cfg(not(test))]
|
||||
use servo_util::url::parse_url;
|
||||
|
||||
|
||||
|
@ -75,9 +67,7 @@ use servo_util::url::parse_url;
|
|||
use std::os;
|
||||
#[cfg(not(test), target_os="android")]
|
||||
use std::str;
|
||||
#[cfg(not(test))]
|
||||
use std::task::TaskOpts;
|
||||
#[cfg(not(test))]
|
||||
use url::Url;
|
||||
|
||||
|
||||
|
@ -161,11 +151,41 @@ pub extern "C" fn android_start(argc: int, argv: **u8) -> int {
|
|||
})
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
fn spawn_main(opts: opts::Opts,
|
||||
compositor_port: Receiver<compositing::Msg>,
|
||||
profiler_chan: servo_util::time::ProfilerChan,
|
||||
result_port: Receiver<ConstellationChan>,
|
||||
p: proc(): Send) {
|
||||
if !opts.native_threading {
|
||||
let mut pool_config = green::PoolConfig::new();
|
||||
pool_config.event_loop_factory = rustuv::event_loop;
|
||||
let mut pool = green::SchedPool::new(pool_config);
|
||||
|
||||
pool.spawn(TaskOpts::new(), p);
|
||||
|
||||
let constellation_chan = result_port.recv();
|
||||
|
||||
debug!("preparing to enter main loop");
|
||||
CompositorTask::create(opts,
|
||||
compositor_port,
|
||||
constellation_chan,
|
||||
profiler_chan);
|
||||
|
||||
pool.shutdown();
|
||||
|
||||
} else {
|
||||
native::task::spawn(p);
|
||||
let constellation_chan = result_port.recv();
|
||||
|
||||
debug!("preparing to enter main loop");
|
||||
CompositorTask::create(opts,
|
||||
compositor_port,
|
||||
constellation_chan,
|
||||
profiler_chan);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn run(opts: opts::Opts) {
|
||||
let mut pool_config = green::PoolConfig::new();
|
||||
pool_config.event_loop_factory = rustuv::event_loop;
|
||||
let mut pool = green::SchedPool::new(pool_config);
|
||||
|
||||
let (compositor_port, compositor_chan) = CompositorChan::new();
|
||||
let profiler_chan = Profiler::create(opts.profiler_period);
|
||||
|
@ -174,7 +194,7 @@ pub fn run(opts: opts::Opts) {
|
|||
let profiler_chan_clone = profiler_chan.clone();
|
||||
|
||||
let (result_chan, result_port) = channel();
|
||||
pool.spawn(TaskOpts::new(), proc() {
|
||||
spawn_main(opts.clone(), compositor_port, profiler_chan, result_port, proc() {
|
||||
let opts = &opts_clone;
|
||||
// Create a Servo instance.
|
||||
let resource_task = ResourceTask();
|
||||
|
@ -210,15 +230,5 @@ pub fn run(opts: opts::Opts) {
|
|||
// Send the constallation Chan as the result
|
||||
result_chan.send(constellation_chan);
|
||||
});
|
||||
|
||||
let constellation_chan = result_port.recv();
|
||||
|
||||
debug!("preparing to enter main loop");
|
||||
CompositorTask::create(opts,
|
||||
compositor_port,
|
||||
constellation_chan,
|
||||
profiler_chan);
|
||||
|
||||
pool.shutdown();
|
||||
}
|
||||
|
||||
|
|
|
@ -61,6 +61,9 @@ pub struct Opts {
|
|||
/// may wish to turn this flag on in order to benchmark style recalculation against other
|
||||
/// browser engines.
|
||||
pub bubble_widths_separately: bool,
|
||||
|
||||
/// Use native threads instead of green threads
|
||||
pub native_threading: bool
|
||||
}
|
||||
|
||||
fn print_usage(app: &str, opts: &[getopts::OptGroup]) {
|
||||
|
@ -77,7 +80,7 @@ pub fn from_cmdline_args(args: &[String]) -> Option<Opts> {
|
|||
let app_name = args[0].to_str();
|
||||
let args = args.tail();
|
||||
|
||||
let opts = vec!(
|
||||
let opts = vec![
|
||||
getopts::optflag("c", "cpu", "CPU rendering"),
|
||||
getopts::optopt("o", "output", "Output file", "output.png"),
|
||||
getopts::optopt("r", "rendering", "Rendering backend", "direct2d|core-graphics|core-graphics-accelerated|cairo|skia."),
|
||||
|
@ -90,8 +93,9 @@ pub fn from_cmdline_args(args: &[String]) -> Option<Opts> {
|
|||
getopts::optflag("z", "headless", "Headless mode"),
|
||||
getopts::optflag("f", "hard-fail", "Exit on task failure instead of displaying about:failure"),
|
||||
getopts::optflag("b", "bubble-widths", "Bubble intrinsic widths separately like other engines"),
|
||||
getopts::optflag("n", "native-threading", "Use native threading instead of green threading"),
|
||||
getopts::optflag("h", "help", "Print this message")
|
||||
);
|
||||
];
|
||||
|
||||
let opt_match = match getopts::getopts(args, opts.as_slice()) {
|
||||
Ok(m) => m,
|
||||
|
@ -169,6 +173,8 @@ pub fn from_cmdline_args(args: &[String]) -> Option<Opts> {
|
|||
None => cmp::max(rt::default_sched_threads() * 3 / 4, 1),
|
||||
};
|
||||
|
||||
let native_threading = opt_match.opt_present("h") || opt_match.opt_present("help");
|
||||
|
||||
Some(Opts {
|
||||
urls: urls,
|
||||
render_backend: render_backend,
|
||||
|
@ -183,5 +189,6 @@ pub fn from_cmdline_args(args: &[String]) -> Option<Opts> {
|
|||
headless: opt_match.opt_present("z"),
|
||||
hard_fail: opt_match.opt_present("f"),
|
||||
bubble_widths_separately: opt_match.opt_present("b"),
|
||||
native_threading: native_threading
|
||||
})
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue