Create a run_with_memory_reporting method to reduce the boilerplate associated with registering memory reporters.

This commit is contained in:
Ms2ger 2015-08-04 16:32:01 +02:00
parent 4726f58d15
commit bf3ecaa901
6 changed files with 68 additions and 80 deletions

View file

@ -16,8 +16,7 @@ use euclid::Matrix4;
use euclid::point::Point2D; use euclid::point::Point2D;
use euclid::rect::Rect; use euclid::rect::Rect;
use euclid::size::Size2D; use euclid::size::Size2D;
use ipc_channel::ipc::{self, IpcSender}; use ipc_channel::ipc::IpcSender;
use ipc_channel::router::ROUTER;
use layers::platform::surface::{NativeDisplay, NativeSurface}; use layers::platform::surface::{NativeDisplay, NativeSurface};
use layers::layers::{BufferRequest, LayerBuffer, LayerBufferSet}; use layers::layers::{BufferRequest, LayerBuffer, LayerBufferSet};
use msg::compositor_msg::{Epoch, FrameTreeId, LayerId, LayerKind}; use msg::compositor_msg::{Epoch, FrameTreeId, LayerId, LayerKind};
@ -25,7 +24,7 @@ use msg::compositor_msg::{LayerProperties, PaintListener, ScrollPolicy};
use msg::constellation_msg::Msg as ConstellationMsg; use msg::constellation_msg::Msg as ConstellationMsg;
use msg::constellation_msg::{ConstellationChan, Failure, PipelineId}; use msg::constellation_msg::{ConstellationChan, Failure, PipelineId};
use msg::constellation_msg::PipelineExitType; use msg::constellation_msg::PipelineExitType;
use profile_traits::mem::{self, Reporter, ReporterRequest, ReportsChan}; use profile_traits::mem::{self, ReportsChan};
use profile_traits::time::{self, profile}; use profile_traits::time::{self, profile};
use rand::{self, Rng}; use rand::{self, Rng};
use skia::gl_context::GLContext; use skia::gl_context::GLContext;
@ -167,25 +166,10 @@ impl<C> PaintTask<C> where C: PaintListener + Send + 'static {
canvas_map: HashMap::new() canvas_map: HashMap::new()
}; };
// Register the memory reporter.
let reporter_name = format!("paint-reporter-{}", id.0); let reporter_name = format!("paint-reporter-{}", id.0);
let (reporter_sender, reporter_receiver) = mem_profiler_chan.run_with_memory_reporting(|| {
ipc::channel::<ReporterRequest>().unwrap();
let paint_chan_for_reporter = chrome_to_paint_chan.clone();
ROUTER.add_route(reporter_receiver.to_opaque(), box move |message| {
// Just injects an appropriate event into the paint task's queue.
let request: ReporterRequest = message.to().unwrap();
paint_chan_for_reporter.send(ChromeToPaintMsg::CollectReports(
request.reports_channel)).unwrap();
});
mem_profiler_chan.send(mem::ProfilerMsg::RegisterReporter(
reporter_name.clone(),
Reporter(reporter_sender)));
paint_task.start(); paint_task.start();
}, reporter_name, chrome_to_paint_chan, ChromeToPaintMsg::CollectReports);
let msg = mem::ProfilerMsg::UnregisterReporter(reporter_name);
mem_profiler_chan.send(msg);
// Tell all the worker threads to shut down. // Tell all the worker threads to shut down.
for worker_thread in paint_task.worker_threads.iter_mut() { for worker_thread in paint_task.worker_threads.iter_mut() {

View file

@ -47,7 +47,7 @@ use log;
use msg::compositor_msg::{Epoch, ScrollPolicy, LayerId}; use msg::compositor_msg::{Epoch, ScrollPolicy, LayerId};
use msg::constellation_msg::Msg as ConstellationMsg; use msg::constellation_msg::Msg as ConstellationMsg;
use msg::constellation_msg::{ConstellationChan, Failure, PipelineExitType, PipelineId}; use msg::constellation_msg::{ConstellationChan, Failure, PipelineExitType, PipelineId};
use profile_traits::mem::{self, Report, Reporter, ReporterRequest, ReportKind, ReportsChan}; use profile_traits::mem::{self, Report, ReportKind, ReportsChan};
use profile_traits::time::{self, ProfilerMetadata, profile}; use profile_traits::time::{self, ProfilerMetadata, profile};
use profile_traits::time::{TimerMetadataFrameType, TimerMetadataReflowType}; use profile_traits::time::{TimerMetadataFrameType, TimerMetadataReflowType};
use net_traits::{load_bytes_iter, PendingAsyncLoad}; use net_traits::{load_bytes_iter, PendingAsyncLoad};
@ -257,25 +257,10 @@ impl LayoutTaskFactory for LayoutTask {
time_profiler_chan, time_profiler_chan,
mem_profiler_chan.clone()); mem_profiler_chan.clone());
// Create a memory reporter thread.
let reporter_name = format!("layout-reporter-{}", id.0); let reporter_name = format!("layout-reporter-{}", id.0);
let (reporter_sender, reporter_receiver) = mem_profiler_chan.run_with_memory_reporting(|| {
ipc::channel::<ReporterRequest>().unwrap();
let layout_chan_for_reporter = layout_chan.clone();
ROUTER.add_route(reporter_receiver.to_opaque(), box move |message| {
// Just injects an appropriate event into the layout task's queue.
let request: ReporterRequest = message.to().unwrap();
layout_chan_for_reporter.0.send(Msg::CollectReports(request.reports_channel))
.unwrap();
});
mem_profiler_chan.send(mem::ProfilerMsg::RegisterReporter(
reporter_name.clone(),
Reporter(reporter_sender)));
layout.start(); layout.start();
}, reporter_name, layout_chan.0, Msg::CollectReports);
let msg = mem::ProfilerMsg::UnregisterReporter(reporter_name);
mem_profiler_chan.send(msg);
} }
shutdown_chan.send(()).unwrap(); shutdown_chan.send(()).unwrap();
}, ConstellationMsg::Failure(failure_msg), con_chan); }, ConstellationMsg::Failure(failure_msg), con_chan);

View file

@ -6,6 +6,7 @@
//! rest of Servo. These APIs are here instead of in `profile` so that these //! rest of Servo. These APIs are here instead of in `profile` so that these
//! modules won't have to depend on `profile`. //! modules won't have to depend on `profile`.
#![feature(box_syntax)]
#![feature(custom_derive, plugin)] #![feature(custom_derive, plugin)]
#![plugin(serde_macros)] #![plugin(serde_macros)]

View file

@ -6,7 +6,23 @@
#![deny(missing_docs)] #![deny(missing_docs)]
use ipc_channel::ipc::IpcSender; use ipc_channel::ipc::{self, IpcSender};
use ipc_channel::router::ROUTER;
use std::sync::mpsc::Sender;
use std::marker::Send;
/// A trait to abstract away the various kinds of message senders we use.
pub trait OpaqueSender<T> {
/// Send a message.
fn send(&self, message: T);
}
impl<T> OpaqueSender<T> for Sender<T> {
fn send(&self, message: T) {
Sender::send(self, message).unwrap();
}
}
/// Front-end representation of the profiler used to communicate with the /// Front-end representation of the profiler used to communicate with the
/// profiler. /// profiler.
@ -21,6 +37,31 @@ impl ProfilerChan {
let ProfilerChan(ref c) = *self; let ProfilerChan(ref c) = *self;
c.send(msg).unwrap(); c.send(msg).unwrap();
} }
/// Runs `f()` with memory profiling.
pub fn run_with_memory_reporting<F, M, T, C>(&self, f: F,
reporter_name: String,
channel_for_reporter: C,
msg: M)
where F: FnOnce(),
M: Fn(ReportsChan) -> T + Send + 'static,
T: Send + 'static,
C: OpaqueSender<T> + Send + 'static
{
// Register the memory reporter.
let (reporter_sender, reporter_receiver) = ipc::channel().unwrap();
ROUTER.add_route(reporter_receiver.to_opaque(), box move |message| {
// Just injects an appropriate event into the paint task's queue.
let request: ReporterRequest = message.to().unwrap();
channel_for_reporter.send(msg(request.reports_channel));
});
self.send(ProfilerMsg::RegisterReporter(reporter_name.clone(),
Reporter(reporter_sender)));
f();
self.send(ProfilerMsg::UnregisterReporter(reporter_name));
}
} }
/// The various kinds of memory measurement. /// The various kinds of memory measurement.

View file

@ -28,12 +28,11 @@ use script_task::StackRootTLS;
use devtools_traits::DevtoolScriptControlMsg; use devtools_traits::DevtoolScriptControlMsg;
use msg::constellation_msg::PipelineId; use msg::constellation_msg::PipelineId;
use net_traits::load_whole_resource; use net_traits::load_whole_resource;
use profile_traits::mem::{self, Reporter, ReporterRequest};
use util::task::spawn_named; use util::task::spawn_named;
use util::task_state; use util::task_state;
use util::task_state::{SCRIPT, IN_WORKER}; use util::task_state::{SCRIPT, IN_WORKER};
use ipc_channel::ipc::{self, IpcReceiver}; use ipc_channel::ipc::IpcReceiver;
use ipc_channel::router::ROUTER; use ipc_channel::router::ROUTER;
use js::jsapi::{JSContext, RootedValue, HandleValue}; use js::jsapi::{JSContext, RootedValue, HandleValue};
use js::jsapi::{JSAutoRequest, JSAutoCompartment}; use js::jsapi::{JSAutoRequest, JSAutoCompartment};
@ -192,26 +191,12 @@ impl DedicatedWorkerGlobalScope {
scope.execute_script(source); scope.execute_script(source);
} }
// Register this task as a memory reporter.
let reporter_name = format!("worker-reporter-{}", random::<u64>()); let reporter_name = format!("worker-reporter-{}", random::<u64>());
let (reporter_sender, reporter_receiver) = ipc::channel().unwrap(); scope.mem_profiler_chan().run_with_memory_reporting(|| {
ROUTER.add_route(reporter_receiver.to_opaque(), box move |reporter_request| {
// Just injects an appropriate event into the worker task's queue.
let reporter_request: ReporterRequest = reporter_request.to().unwrap();
parent_sender.send(ScriptMsg::CollectReports(
reporter_request.reports_channel)).unwrap()
});
scope.mem_profiler_chan().send(mem::ProfilerMsg::RegisterReporter(
reporter_name.clone(),
Reporter(reporter_sender)));
while let Ok(event) = global.receive_event() { while let Ok(event) = global.receive_event() {
global.handle_event(event); global.handle_event(event);
} }
}, reporter_name, parent_sender, ScriptMsg::CollectReports);
// Unregister this task as a memory reporter.
let msg = mem::ProfilerMsg::UnregisterReporter(reporter_name);
scope.mem_profiler_chan().send(msg);
}); });
} }
} }

View file

@ -73,7 +73,7 @@ use net_traits::LoadData as NetLoadData;
use net_traits::{AsyncResponseTarget, ResourceTask, LoadConsumer, ControlMsg, Metadata}; use net_traits::{AsyncResponseTarget, ResourceTask, LoadConsumer, ControlMsg, Metadata};
use net_traits::image_cache_task::{ImageCacheChan, ImageCacheTask, ImageCacheResult}; use net_traits::image_cache_task::{ImageCacheChan, ImageCacheTask, ImageCacheResult};
use net_traits::storage_task::StorageTask; use net_traits::storage_task::StorageTask;
use profile_traits::mem::{self, Report, Reporter, ReporterRequest, ReportKind, ReportsChan}; use profile_traits::mem::{self, Report, ReportKind, ReportsChan, OpaqueSender};
use string_cache::Atom; use string_cache::Atom;
use util::str::DOMString; use util::str::DOMString;
use util::task::spawn_named_with_send_on_failure; use util::task::spawn_named_with_send_on_failure;
@ -216,6 +216,12 @@ pub trait ScriptChan {
fn clone(&self) -> Box<ScriptChan+Send>; fn clone(&self) -> Box<ScriptChan+Send>;
} }
impl OpaqueSender<ScriptMsg> for Box<ScriptChan+Send> {
fn send(&self, msg: ScriptMsg) {
ScriptChan::send(&**self, msg).unwrap();
}
}
/// An interface for receiving ScriptMsg values in an event loop. Used for synchronous DOM /// An interface for receiving ScriptMsg values in an event loop. Used for synchronous DOM
/// APIs that need to abstract over multiple kinds of event loops (worker/main thread) with /// APIs that need to abstract over multiple kinds of event loops (worker/main thread) with
/// different Receiver interfaces. /// different Receiver interfaces.
@ -437,24 +443,10 @@ impl ScriptTaskFactory for ScriptTask {
load_data.url.clone()); load_data.url.clone());
script_task.start_page_load(new_load, load_data); script_task.start_page_load(new_load, load_data);
// Register this task as a memory reporter.
let reporter_name = format!("script-reporter-{}", id.0); let reporter_name = format!("script-reporter-{}", id.0);
let (reporter_sender, reporter_receiver) = ipc::channel().unwrap(); mem_profiler_chan.run_with_memory_reporting(|| {
ROUTER.add_route(reporter_receiver.to_opaque(), box move |reporter_request| {
// Just injects an appropriate event into the worker task's queue.
let reporter_request: ReporterRequest = reporter_request.to().unwrap();
channel_for_reporter.send(ScriptMsg::CollectReports(
reporter_request.reports_channel)).unwrap()
});
let reporter = Reporter(reporter_sender);
let msg = mem::ProfilerMsg::RegisterReporter(reporter_name.clone(), reporter);
mem_profiler_chan.send(msg);
script_task.start(); script_task.start();
}, reporter_name, channel_for_reporter, ScriptMsg::CollectReports);
// Unregister this task as a memory reporter.
let msg = mem::ProfilerMsg::UnregisterReporter(reporter_name);
mem_profiler_chan.send(msg);
// This must always be the very last operation performed before the task completes // This must always be the very last operation performed before the task completes
failsafe.neuter(); failsafe.neuter();