ensure clean shutdown of all threads running JS

This commit is contained in:
Gregory Terzian 2020-06-24 15:07:48 +08:00
parent 0b61cfc3ae
commit 44ebca72da
25 changed files with 565 additions and 232 deletions

View file

@ -3,14 +3,17 @@
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::sampler::{NativeStack, Sampler};
use crossbeam_channel::{after, unbounded, Receiver, Sender};
use crossbeam_channel::{after, never, unbounded, Receiver, Sender};
use ipc_channel::ipc::{IpcReceiver, IpcSender};
use ipc_channel::router::ROUTER;
use msg::constellation_msg::MonitoredComponentId;
use msg::constellation_msg::{
BackgroundHangMonitor, BackgroundHangMonitorClone, BackgroundHangMonitorRegister,
BackgroundHangMonitor, BackgroundHangMonitorClone, BackgroundHangMonitorExitSignal,
BackgroundHangMonitorRegister,
};
use msg::constellation_msg::{
BackgroundHangMonitorControlMsg, HangAlert, HangAnnotation, HangMonitorAlert,
};
use msg::constellation_msg::{HangAlert, HangAnnotation, HangMonitorAlert, SamplerControlMsg};
use std::cell::Cell;
use std::collections::{HashMap, VecDeque};
use std::thread;
@ -19,23 +22,32 @@ use std::time::{Duration, Instant};
#[derive(Clone)]
pub struct HangMonitorRegister {
sender: Sender<(MonitoredComponentId, MonitoredComponentMsg)>,
monitoring_enabled: bool,
}
impl HangMonitorRegister {
/// Start a new hang monitor worker, and return a handle to register components for monitoring.
pub fn init(
constellation_chan: IpcSender<HangMonitorAlert>,
control_port: IpcReceiver<SamplerControlMsg>,
control_port: IpcReceiver<BackgroundHangMonitorControlMsg>,
monitoring_enabled: bool,
) -> Box<dyn BackgroundHangMonitorRegister> {
let (sender, port) = unbounded();
let _ = thread::Builder::new().spawn(move || {
let mut monitor =
BackgroundHangMonitorWorker::new(constellation_chan, control_port, port);
let mut monitor = BackgroundHangMonitorWorker::new(
constellation_chan,
control_port,
port,
monitoring_enabled,
);
while monitor.run() {
// Monitoring until all senders have been dropped...
}
});
Box::new(HangMonitorRegister { sender })
Box::new(HangMonitorRegister {
sender,
monitoring_enabled,
})
}
}
@ -48,8 +60,13 @@ impl BackgroundHangMonitorRegister for HangMonitorRegister {
component_id: MonitoredComponentId,
transient_hang_timeout: Duration,
permanent_hang_timeout: Duration,
exit_signal: Option<Box<dyn BackgroundHangMonitorExitSignal>>,
) -> Box<dyn BackgroundHangMonitor> {
let bhm_chan = BackgroundHangMonitorChan::new(self.sender.clone(), component_id);
let bhm_chan = BackgroundHangMonitorChan::new(
self.sender.clone(),
component_id,
self.monitoring_enabled,
);
#[cfg(all(
target_os = "windows",
@ -71,6 +88,7 @@ impl BackgroundHangMonitorRegister for HangMonitorRegister {
thread::current().name().map(str::to_owned),
transient_hang_timeout,
permanent_hang_timeout,
exit_signal,
));
Box::new(bhm_chan)
}
@ -85,7 +103,13 @@ impl BackgroundHangMonitorClone for HangMonitorRegister {
/// Messages sent from monitored components to the monitor.
pub enum MonitoredComponentMsg {
/// Register component for monitoring,
Register(Box<dyn Sampler>, Option<String>, Duration, Duration),
Register(
Box<dyn Sampler>,
Option<String>,
Duration,
Duration,
Option<Box<dyn BackgroundHangMonitorExitSignal>>,
),
/// Unregister component for monitoring.
Unregister,
/// Notify start of new activity for a given component,
@ -101,17 +125,20 @@ pub struct BackgroundHangMonitorChan {
sender: Sender<(MonitoredComponentId, MonitoredComponentMsg)>,
component_id: MonitoredComponentId,
disconnected: Cell<bool>,
monitoring_enabled: bool,
}
impl BackgroundHangMonitorChan {
pub fn new(
sender: Sender<(MonitoredComponentId, MonitoredComponentMsg)>,
component_id: MonitoredComponentId,
monitoring_enabled: bool,
) -> Self {
BackgroundHangMonitorChan {
sender,
component_id: component_id,
disconnected: Default::default(),
monitoring_enabled,
}
}
@ -128,13 +155,17 @@ impl BackgroundHangMonitorChan {
impl BackgroundHangMonitor for BackgroundHangMonitorChan {
fn notify_activity(&self, annotation: HangAnnotation) {
if self.monitoring_enabled {
let msg = MonitoredComponentMsg::NotifyActivity(annotation);
self.send(msg);
}
}
fn notify_wait(&self) {
if self.monitoring_enabled {
let msg = MonitoredComponentMsg::NotifyWait;
self.send(msg);
}
}
fn unregister(&self) {
let msg = MonitoredComponentMsg::Unregister;
self.send(msg);
@ -150,6 +181,7 @@ struct MonitoredComponent {
sent_transient_alert: bool,
sent_permanent_alert: bool,
is_waiting: bool,
exit_signal: Option<Box<dyn BackgroundHangMonitorExitSignal>>,
}
struct Sample(MonitoredComponentId, Instant, NativeStack);
@ -159,20 +191,22 @@ pub struct BackgroundHangMonitorWorker {
monitored_components: HashMap<MonitoredComponentId, MonitoredComponent>,
constellation_chan: IpcSender<HangMonitorAlert>,
port: Receiver<(MonitoredComponentId, MonitoredComponentMsg)>,
control_port: Receiver<SamplerControlMsg>,
control_port: Receiver<BackgroundHangMonitorControlMsg>,
sampling_duration: Option<Duration>,
sampling_max_duration: Option<Duration>,
last_sample: Instant,
creation: Instant,
sampling_baseline: Instant,
samples: VecDeque<Sample>,
monitoring_enabled: bool,
}
impl BackgroundHangMonitorWorker {
pub fn new(
constellation_chan: IpcSender<HangMonitorAlert>,
control_port: IpcReceiver<SamplerControlMsg>,
control_port: IpcReceiver<BackgroundHangMonitorControlMsg>,
port: Receiver<(MonitoredComponentId, MonitoredComponentMsg)>,
monitoring_enabled: bool,
) -> Self {
let control_port = ROUTER.route_ipc_receiver_to_new_crossbeam_receiver(control_port);
Self {
@ -187,6 +221,7 @@ impl BackgroundHangMonitorWorker {
sampling_baseline: Instant::now(),
creation: Instant::now(),
samples: Default::default(),
monitoring_enabled,
}
}
@ -232,13 +267,19 @@ impl BackgroundHangMonitorWorker {
}
pub fn run(&mut self) -> bool {
let timeout = if let Some(duration) = self.sampling_duration {
duration
let tick = if let Some(duration) = self.sampling_duration {
let duration = duration
.checked_sub(Instant::now() - self.last_sample)
.unwrap_or_else(|| Duration::from_millis(0))
.unwrap_or_else(|| Duration::from_millis(0));
after(duration)
} else {
Duration::from_millis(100)
if self.monitoring_enabled {
after(Duration::from_millis(100))
} else {
never()
}
};
let received = select! {
recv(self.port) -> event => {
match event {
@ -249,24 +290,38 @@ impl BackgroundHangMonitorWorker {
},
recv(self.control_port) -> event => {
match event {
Ok(SamplerControlMsg::Enable(rate, max_duration)) => {
Ok(BackgroundHangMonitorControlMsg::EnableSampler(rate, max_duration)) => {
println!("Enabling profiler.");
self.sampling_duration = Some(rate);
self.sampling_max_duration = Some(max_duration);
self.sampling_baseline = Instant::now();
None
}
Ok(SamplerControlMsg::Disable) => {
},
Ok(BackgroundHangMonitorControlMsg::DisableSampler) => {
println!("Disabling profiler.");
self.finish_sampled_profile();
self.sampling_duration = None;
None
return true;
},
Ok(BackgroundHangMonitorControlMsg::Exit(sender)) => {
for component in self.monitored_components.values() {
if let Some(signal) = component.exit_signal.as_ref() {
signal.signal_to_exit();
}
}
// Confirm exit with to the constellation.
let _ = sender.send(());
// Also exit the BHM.
return false;
},
Err(_) => return false,
}
}
recv(after(timeout)) -> _ => None,
recv(tick) -> _ => None,
};
if let Some(msg) = received {
self.handle_msg(msg);
while let Ok(another_msg) = self.port.try_recv() {
@ -297,6 +352,7 @@ impl BackgroundHangMonitorWorker {
name,
transient_hang_timeout,
permanent_hang_timeout,
exit_signal,
),
) => {
let component = MonitoredComponent {
@ -308,6 +364,7 @@ impl BackgroundHangMonitorWorker {
sent_transient_alert: false,
sent_permanent_alert: false,
is_waiting: true,
exit_signal,
};
if let Some(name) = name {
self.component_names.insert(component_id.clone(), name);

View file

@ -9,8 +9,13 @@ use background_hang_monitor::HangMonitorRegister;
use ipc_channel::ipc;
use msg::constellation_msg::ScriptHangAnnotation;
use msg::constellation_msg::TEST_PIPELINE_ID;
use msg::constellation_msg::{HangAlert, HangAnnotation, HangMonitorAlert};
use msg::constellation_msg::{
BackgroundHangMonitorControlMsg, BackgroundHangMonitorExitSignal, HangAlert, HangAnnotation,
HangMonitorAlert,
};
use msg::constellation_msg::{MonitoredComponentId, MonitoredComponentType};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::sync::Mutex;
use std::thread;
use std::time::Duration;
@ -27,12 +32,16 @@ fn test_hang_monitoring() {
ipc::channel().expect("ipc channel failure");
let (_sampler_sender, sampler_receiver) = ipc::channel().expect("ipc channel failure");
let background_hang_monitor_register =
HangMonitorRegister::init(background_hang_monitor_ipc_sender.clone(), sampler_receiver);
let background_hang_monitor_register = HangMonitorRegister::init(
background_hang_monitor_ipc_sender.clone(),
sampler_receiver,
true,
);
let background_hang_monitor = background_hang_monitor_register.register_component(
MonitoredComponentId(TEST_PIPELINE_ID, MonitoredComponentType::Script),
Duration::from_millis(10),
Duration::from_millis(1000),
None,
);
// Start an activity.
@ -125,12 +134,16 @@ fn test_hang_monitoring_unregister() {
ipc::channel().expect("ipc channel failure");
let (_sampler_sender, sampler_receiver) = ipc::channel().expect("ipc channel failure");
let background_hang_monitor_register =
HangMonitorRegister::init(background_hang_monitor_ipc_sender.clone(), sampler_receiver);
let background_hang_monitor_register = HangMonitorRegister::init(
background_hang_monitor_ipc_sender.clone(),
sampler_receiver,
true,
);
let background_hang_monitor = background_hang_monitor_register.register_component(
MonitoredComponentId(TEST_PIPELINE_ID, MonitoredComponentType::Script),
Duration::from_millis(10),
Duration::from_millis(1000),
None,
);
// Start an activity.
@ -146,3 +159,50 @@ fn test_hang_monitoring_unregister() {
// No new alert yet
assert!(background_hang_monitor_receiver.try_recv().is_err());
}
#[test]
fn test_hang_monitoring_exit_signal() {
let _lock = SERIAL.lock().unwrap();
let (background_hang_monitor_ipc_sender, _background_hang_monitor_receiver) =
ipc::channel().expect("ipc channel failure");
let (control_sender, control_receiver) = ipc::channel().expect("ipc channel failure");
struct BHMExitSignal {
closing: Arc<AtomicBool>,
}
impl BackgroundHangMonitorExitSignal for BHMExitSignal {
fn signal_to_exit(&self) {
self.closing.store(true, Ordering::SeqCst);
}
}
let closing = Arc::new(AtomicBool::new(false));
let signal = BHMExitSignal {
closing: closing.clone(),
};
// Init a worker, without active monitoring.
let background_hang_monitor_register = HangMonitorRegister::init(
background_hang_monitor_ipc_sender.clone(),
control_receiver,
false,
);
let _background_hang_monitor = background_hang_monitor_register.register_component(
MonitoredComponentId(TEST_PIPELINE_ID, MonitoredComponentType::Script),
Duration::from_millis(10),
Duration::from_millis(1000),
Some(Box::new(signal)),
);
let (exit_sender, exit_receiver) = ipc::channel().expect("Failed to create IPC channel!");
// Send the exit message.
let _ = control_sender.send(BackgroundHangMonitorControlMsg::Exit(exit_sender));
// Assert we receive a confirmation back.
assert!(exit_receiver.recv().is_ok());
// Assert we get the exit signal.
while !closing.load(Ordering::SeqCst) {}
}

View file

@ -126,7 +126,9 @@ use keyboard_types::KeyboardEvent;
use layout_traits::LayoutThreadFactory;
use log::{Level, LevelFilter, Log, Metadata, Record};
use media::{GLPlayerThreads, WindowGLContext};
use msg::constellation_msg::{BackgroundHangMonitorRegister, HangMonitorAlert, SamplerControlMsg};
use msg::constellation_msg::{
BackgroundHangMonitorControlMsg, BackgroundHangMonitorRegister, HangMonitorAlert,
};
use msg::constellation_msg::{
BroadcastChannelRouterId, MessagePortId, MessagePortRouterId, PipelineNamespace,
PipelineNamespaceId, PipelineNamespaceRequest, TraversalDirection,
@ -294,16 +296,18 @@ pub struct Constellation<Message, LTF, STF, SWF> {
/// None when in multiprocess mode.
background_monitor_register: Option<Box<dyn BackgroundHangMonitorRegister>>,
/// Channels to control all sampling profilers.
sampling_profiler_control: Vec<IpcSender<SamplerControlMsg>>,
/// Channels to control all background-hang monitors.
/// TODO: store them on the relevant BrowsingContextGroup,
/// so that they could be controlled on a "per-tab/event-loop" basis.
background_monitor_control_senders: Vec<IpcSender<BackgroundHangMonitorControlMsg>>,
/// A channel for the background hang monitor to send messages
/// to the constellation.
background_hang_monitor_sender: Option<IpcSender<HangMonitorAlert>>,
background_hang_monitor_sender: IpcSender<HangMonitorAlert>,
/// A channel for the constellation to receiver messages
/// from the background hang monitor.
background_hang_monitor_receiver: Option<Receiver<Result<HangMonitorAlert, IpcError>>>,
background_hang_monitor_receiver: Receiver<Result<HangMonitorAlert, IpcError>>,
/// An IPC channel for layout threads to send messages to the constellation.
/// This is the layout threads' view of `layout_receiver`.
@ -783,41 +787,27 @@ where
ipc_scheduler_receiver,
);
let (background_hang_monitor_sender, background_hang_monitor_receiver) =
if opts::get().background_hang_monitor {
let (bhm_sender, ipc_bhm_receiver) =
let (background_hang_monitor_sender, ipc_bhm_receiver) =
ipc::channel().expect("ipc channel failure");
(
Some(bhm_sender),
Some(route_ipc_receiver_to_new_mpsc_receiver_preserving_errors(
ipc_bhm_receiver,
)),
)
} else {
(None, None)
};
let background_hang_monitor_receiver =
route_ipc_receiver_to_new_mpsc_receiver_preserving_errors(ipc_bhm_receiver);
// If we are in multiprocess mode,
// a dedicated per-process hang monitor will be initialized later inside the content process.
// See run_content_process in servo/lib.rs
let (background_monitor_register, sampler_chan) =
if opts::multiprocess() || !opts::get().background_hang_monitor {
let (background_monitor_register, bhm_control_chans) = if opts::multiprocess() {
(None, vec![])
} else {
let (sampling_profiler_control, sampling_profiler_port) =
let (bhm_control_chan, bhm_control_port) =
ipc::channel().expect("ipc channel failure");
if let Some(bhm_sender) = background_hang_monitor_sender.clone() {
(
Some(HangMonitorRegister::init(
bhm_sender,
sampling_profiler_port,
background_hang_monitor_sender.clone(),
bhm_control_port,
opts::get().background_hang_monitor,
)),
vec![sampling_profiler_control],
vec![bhm_control_chan],
)
} else {
warn!("No BHM sender found in BHM mode.");
(None, vec![])
}
};
let (ipc_layout_sender, ipc_layout_receiver) =
@ -871,7 +861,7 @@ where
background_hang_monitor_sender,
background_hang_monitor_receiver,
background_monitor_register,
sampling_profiler_control: sampler_chan,
background_monitor_control_senders: bhm_control_chans,
layout_sender: ipc_layout_sender,
script_receiver: script_receiver,
compositor_receiver: compositor_receiver,
@ -1197,8 +1187,8 @@ where
Err(e) => return self.handle_send_error(pipeline_id, e),
};
if let Some(sampler_chan) = pipeline.sampler_control_chan {
self.sampling_profiler_control.push(sampler_chan);
if let Some(chan) = pipeline.bhm_control_chan {
self.background_monitor_control_senders.push(chan);
}
if let Some(host) = host {
@ -1363,7 +1353,7 @@ where
recv(self.script_receiver) -> msg => {
msg.expect("Unexpected script channel panic in constellation").map(Request::Script)
}
recv(self.background_hang_monitor_receiver.as_ref().unwrap_or(&never())) -> msg => {
recv(self.background_hang_monitor_receiver) -> msg => {
msg.expect("Unexpected BHM channel panic in constellation").map(Request::BackgroundHangMonitor)
}
recv(self.compositor_receiver) -> msg => {
@ -1629,15 +1619,18 @@ where
},
FromCompositorMsg::SetCursor(cursor) => self.handle_set_cursor_msg(cursor),
FromCompositorMsg::EnableProfiler(rate, max_duration) => {
for chan in &self.sampling_profiler_control {
if let Err(e) = chan.send(SamplerControlMsg::Enable(rate, max_duration)) {
for chan in &self.background_monitor_control_senders {
if let Err(e) = chan.send(BackgroundHangMonitorControlMsg::EnableSampler(
rate,
max_duration,
)) {
warn!("error communicating with sampling profiler: {}", e);
}
}
},
FromCompositorMsg::DisableProfiler => {
for chan in &self.sampling_profiler_control {
if let Err(e) = chan.send(SamplerControlMsg::Disable) {
for chan in &self.background_monitor_control_senders {
if let Err(e) = chan.send(BackgroundHangMonitorControlMsg::DisableSampler) {
warn!("error communicating with sampling profiler: {}", e);
}
}
@ -2658,6 +2651,22 @@ where
self.mem_profiler_chan.send(mem::ProfilerMsg::Exit);
// Tell all BHMs to exit,
// and to ensure their monitored components exit
// even when currently hanging(on JS or sync XHR).
// This must be done before starting the process of closing all pipelines.
for chan in &self.background_monitor_control_senders {
let (exit_sender, exit_receiver) =
ipc::channel().expect("Failed to create IPC channel!");
if let Err(e) = chan.send(BackgroundHangMonitorControlMsg::Exit(exit_sender)) {
warn!("error communicating with bhm: {}", e);
continue;
}
if exit_receiver.recv().is_err() {
warn!("Failed to receive exit confirmation from BHM.");
}
}
// Close the top-level browsing contexts
let browsing_context_ids: Vec<BrowsingContextId> = self
.browsing_contexts

View file

@ -21,7 +21,9 @@ use layout_traits::LayoutThreadFactory;
use media::WindowGLContext;
use metrics::PaintTimeMetrics;
use msg::constellation_msg::TopLevelBrowsingContextId;
use msg::constellation_msg::{BackgroundHangMonitorRegister, HangMonitorAlert, SamplerControlMsg};
use msg::constellation_msg::{
BackgroundHangMonitorControlMsg, BackgroundHangMonitorRegister, HangMonitorAlert,
};
use msg::constellation_msg::{BrowsingContextId, HistoryStateId};
use msg::constellation_msg::{
PipelineId, PipelineNamespace, PipelineNamespaceId, PipelineNamespaceRequest,
@ -128,7 +130,7 @@ pub struct InitialPipelineState {
pub background_monitor_register: Option<Box<dyn BackgroundHangMonitorRegister>>,
/// A channel for the background hang monitor to send messages to the constellation.
pub background_hang_monitor_to_constellation_chan: Option<IpcSender<HangMonitorAlert>>,
pub background_hang_monitor_to_constellation_chan: IpcSender<HangMonitorAlert>,
/// A channel for the layout thread to send messages to the constellation.
pub layout_to_constellation_chan: IpcSender<LayoutMsg>,
@ -205,7 +207,7 @@ pub struct InitialPipelineState {
pub struct NewPipeline {
pub pipeline: Pipeline,
pub sampler_control_chan: Option<IpcSender<SamplerControlMsg>>,
pub bhm_control_chan: Option<IpcSender<BackgroundHangMonitorControlMsg>>,
}
impl Pipeline {
@ -220,7 +222,7 @@ impl Pipeline {
// probably requires a general low-memory strategy.
let (pipeline_chan, pipeline_port) = ipc::channel().expect("Pipeline main chan");
let (script_chan, sampler_chan) = match state.event_loop {
let (script_chan, bhm_control_chan) = match state.event_loop {
Some(script_chan) => {
let new_layout_info = NewLayoutInfo {
parent_info: state.parent_pipeline_id,
@ -279,7 +281,7 @@ impl Pipeline {
background_hang_monitor_to_constellation_chan: state
.background_hang_monitor_to_constellation_chan
.clone(),
sampling_profiler_port: None,
bhm_control_port: None,
scheduler_chan: state.scheduler_chan,
devtools_chan: script_to_devtools_chan,
bluetooth_thread: state.bluetooth_thread,
@ -309,34 +311,26 @@ impl Pipeline {
// Spawn the child process.
//
// Yes, that's all there is to it!
let sampler_chan = if opts::multiprocess() {
let (sampler_chan, sampler_port) = ipc::channel().expect("Sampler chan");
unprivileged_pipeline_content.sampling_profiler_port = Some(sampler_port);
let bhm_control_chan = if opts::multiprocess() {
let (bhm_control_chan, bhm_control_port) =
ipc::channel().expect("Sampler chan");
unprivileged_pipeline_content.bhm_control_port = Some(bhm_control_port);
let _ = unprivileged_pipeline_content.spawn_multiprocess()?;
Some(sampler_chan)
Some(bhm_control_chan)
} else {
// Should not be None in single-process mode.
if opts::get().background_hang_monitor {
let register = state.background_monitor_register.expect(
"Couldn't start content, no background monitor has been initiated",
);
let register = state
.background_monitor_register
.expect("Couldn't start content, no background monitor has been initiated");
unprivileged_pipeline_content.start_all::<Message, LTF, STF>(
false,
Some(register),
register,
state.event_loop_waker,
);
None
} else {
unprivileged_pipeline_content.start_all::<Message, LTF, STF>(
false,
None,
state.event_loop_waker,
);
None
}
};
(EventLoop::new(script_chan), sampler_chan)
(EventLoop::new(script_chan), bhm_control_chan)
},
};
@ -353,7 +347,7 @@ impl Pipeline {
);
Ok(NewPipeline {
pipeline,
sampler_control_chan: sampler_chan,
bhm_control_chan,
})
}
@ -494,8 +488,8 @@ pub struct UnprivilegedPipelineContent {
opener: Option<BrowsingContextId>,
namespace_request_sender: IpcSender<PipelineNamespaceRequest>,
script_to_constellation_chan: ScriptToConstellationChan,
background_hang_monitor_to_constellation_chan: Option<IpcSender<HangMonitorAlert>>,
sampling_profiler_port: Option<IpcReceiver<SamplerControlMsg>>,
background_hang_monitor_to_constellation_chan: IpcSender<HangMonitorAlert>,
bhm_control_port: Option<IpcReceiver<BackgroundHangMonitorControlMsg>>,
layout_to_constellation_chan: IpcSender<LayoutMsg>,
scheduler_chan: IpcSender<TimerSchedulerMsg>,
devtools_chan: Option<IpcSender<ScriptToDevtoolsControlMsg>>,
@ -526,7 +520,7 @@ impl UnprivilegedPipelineContent {
pub fn start_all<Message, LTF, STF>(
self,
wait_for_completion: bool,
background_hang_monitor_register: Option<Box<dyn BackgroundHangMonitorRegister>>,
background_hang_monitor_register: Box<dyn BackgroundHangMonitorRegister>,
event_loop_waker: Option<Box<dyn EventLoopWaker>>,
) where
LTF: LayoutThreadFactory<Message = Message>,
@ -634,17 +628,12 @@ impl UnprivilegedPipelineContent {
pub fn register_with_background_hang_monitor(
&mut self,
) -> Option<Box<dyn BackgroundHangMonitorRegister>> {
self.background_hang_monitor_to_constellation_chan
.clone()
.map(|bhm| {
) -> Box<dyn BackgroundHangMonitorRegister> {
HangMonitorRegister::init(
bhm.clone(),
self.sampling_profiler_port
.take()
.expect("no sampling profiler?"),
self.background_hang_monitor_to_constellation_chan.clone(),
self.bhm_control_port.take().expect("no sampling profiler?"),
opts::get().background_hang_monitor,
)
})
}
pub fn script_to_constellation_chan(&self) -> &ScriptToConstellationChan {

View file

@ -153,7 +153,7 @@ pub struct LayoutThread {
font_cache_sender: IpcSender<()>,
/// A means of communication with the background hang monitor.
background_hang_monitor: Option<Box<dyn BackgroundHangMonitor>>,
background_hang_monitor: Box<dyn BackgroundHangMonitor>,
/// The channel on which messages can be sent to the constellation.
constellation_chan: IpcSender<ConstellationMsg>,
@ -266,7 +266,7 @@ impl LayoutThreadFactory for LayoutThread {
is_iframe: bool,
chan: (Sender<Msg>, Receiver<Msg>),
pipeline_port: IpcReceiver<LayoutControlMsg>,
background_hang_monitor_register: Option<Box<dyn BackgroundHangMonitorRegister>>,
background_hang_monitor_register: Box<dyn BackgroundHangMonitorRegister>,
constellation_chan: IpcSender<ConstellationMsg>,
script_chan: IpcSender<ConstellationControlMsg>,
image_cache: Arc<dyn ImageCache>,
@ -299,13 +299,13 @@ impl LayoutThreadFactory for LayoutThread {
// Ensures layout thread is destroyed before we send shutdown message
let sender = chan.0;
let background_hang_monitor = background_hang_monitor_register.map(|bhm| {
bhm.register_component(
let background_hang_monitor = background_hang_monitor_register
.register_component(
MonitoredComponentId(id, MonitoredComponentType::Layout),
Duration::from_millis(1000),
Duration::from_millis(5000),
)
});
None,
);
let layout = LayoutThread::new(
id,
@ -483,7 +483,7 @@ impl LayoutThread {
is_iframe: bool,
port: Receiver<Msg>,
pipeline_port: IpcReceiver<LayoutControlMsg>,
background_hang_monitor: Option<Box<dyn BackgroundHangMonitor>>,
background_hang_monitor: Box<dyn BackgroundHangMonitor>,
constellation_chan: IpcSender<ConstellationMsg>,
script_chan: IpcSender<ConstellationControlMsg>,
image_cache: Arc<dyn ImageCache>,
@ -656,8 +656,7 @@ impl LayoutThread {
Msg::SetNavigationStart(..) => LayoutHangAnnotation::SetNavigationStart,
};
self.background_hang_monitor
.as_ref()
.map(|bhm| bhm.notify_activity(HangAnnotation::Layout(hang_annotation)));
.notify_activity(HangAnnotation::Layout(hang_annotation));
}
/// Receives and dispatches messages from the script and constellation threads
@ -669,9 +668,7 @@ impl LayoutThread {
}
// Notify the background-hang-monitor we are waiting for an event.
self.background_hang_monitor
.as_ref()
.map(|bhm| bhm.notify_wait());
self.background_hang_monitor.notify_wait();
let request = select! {
recv(self.pipeline_port) -> msg => Request::FromPipeline(msg.unwrap()),
@ -926,9 +923,7 @@ impl LayoutThread {
);
self.root_flow.borrow_mut().take();
self.background_hang_monitor
.as_ref()
.map(|bhm| bhm.unregister());
self.background_hang_monitor.unregister();
}
fn handle_add_stylesheet(&self, stylesheet: &Stylesheet, guard: &SharedRwLockReadGuard) {

View file

@ -134,7 +134,7 @@ pub struct LayoutThread {
font_cache_sender: IpcSender<()>,
/// A means of communication with the background hang monitor.
background_hang_monitor: Option<Box<dyn BackgroundHangMonitor>>,
background_hang_monitor: Box<dyn BackgroundHangMonitor>,
/// The channel on which messages can be sent to the script thread.
script_chan: IpcSender<ConstellationControlMsg>,
@ -234,7 +234,7 @@ impl LayoutThreadFactory for LayoutThread {
is_iframe: bool,
chan: (Sender<Msg>, Receiver<Msg>),
pipeline_port: IpcReceiver<LayoutControlMsg>,
background_hang_monitor_register: Option<Box<dyn BackgroundHangMonitorRegister>>,
background_hang_monitor_register: Box<dyn BackgroundHangMonitorRegister>,
constellation_chan: IpcSender<ConstellationMsg>,
script_chan: IpcSender<ConstellationControlMsg>,
image_cache: Arc<dyn ImageCache>,
@ -267,13 +267,13 @@ impl LayoutThreadFactory for LayoutThread {
// Ensures layout thread is destroyed before we send shutdown message
let sender = chan.0;
let background_hang_monitor = background_hang_monitor_register.map(|bhm| {
bhm.register_component(
let background_hang_monitor = background_hang_monitor_register
.register_component(
MonitoredComponentId(id, MonitoredComponentType::Layout),
Duration::from_millis(1000),
Duration::from_millis(5000),
)
});
None,
);
let layout = LayoutThread::new(
id,
@ -450,7 +450,7 @@ impl LayoutThread {
is_iframe: bool,
port: Receiver<Msg>,
pipeline_port: IpcReceiver<LayoutControlMsg>,
background_hang_monitor: Option<Box<dyn BackgroundHangMonitor>>,
background_hang_monitor: Box<dyn BackgroundHangMonitor>,
constellation_chan: IpcSender<ConstellationMsg>,
script_chan: IpcSender<ConstellationControlMsg>,
image_cache: Arc<dyn ImageCache>,
@ -619,8 +619,7 @@ impl LayoutThread {
Msg::SetNavigationStart(..) => LayoutHangAnnotation::SetNavigationStart,
};
self.background_hang_monitor
.as_ref()
.map(|bhm| bhm.notify_activity(HangAnnotation::Layout(hang_annotation)));
.notify_activity(HangAnnotation::Layout(hang_annotation));
}
/// Receives and dispatches messages from the script and constellation threads
@ -632,9 +631,7 @@ impl LayoutThread {
}
// Notify the background-hang-monitor we are waiting for an event.
self.background_hang_monitor
.as_ref()
.map(|bhm| bhm.notify_wait());
self.background_hang_monitor.notify_wait();
let request = select! {
recv(self.pipeline_port) -> msg => Request::FromPipeline(msg.unwrap()),
@ -851,9 +848,7 @@ impl LayoutThread {
}
fn exit_now(&mut self) {
self.background_hang_monitor
.as_ref()
.map(|bhm| bhm.unregister());
self.background_hang_monitor.unregister();
}
fn handle_add_stylesheet(&self, stylesheet: &Stylesheet, guard: &SharedRwLockReadGuard) {

View file

@ -36,7 +36,7 @@ pub trait LayoutThreadFactory {
is_iframe: bool,
chan: (Sender<Self::Message>, Receiver<Self::Message>),
pipeline_port: IpcReceiver<LayoutControlMsg>,
background_hang_monitor: Option<Box<dyn BackgroundHangMonitorRegister>>,
background_hang_monitor: Box<dyn BackgroundHangMonitorRegister>,
constellation_chan: IpcSender<ConstellationMsg>,
script_chan: IpcSender<ConstellationControlMsg>,
image_cache: Arc<dyn ImageCache>,

View file

@ -679,6 +679,7 @@ pub trait BackgroundHangMonitorRegister: BackgroundHangMonitorClone + Send {
component: MonitoredComponentId,
transient_hang_timeout: Duration,
permanent_hang_timeout: Duration,
exit_signal: Option<Box<dyn BackgroundHangMonitorExitSignal>>,
) -> Box<dyn BackgroundHangMonitor>;
}
@ -702,10 +703,21 @@ pub trait BackgroundHangMonitor {
fn unregister(&self);
}
/// A means for the BHM to signal a monitored component to exit.
/// Useful when the component is hanging, and cannot be notified via the usual way.
/// The component should implement this in a way allowing for the signal to be received when hanging,
/// if at all.
pub trait BackgroundHangMonitorExitSignal: Send {
/// Called by the BHM, to notify the monitored component to exit.
fn signal_to_exit(&self);
}
/// Messages to control the sampling profiler.
#[derive(Deserialize, Serialize)]
pub enum SamplerControlMsg {
pub enum BackgroundHangMonitorControlMsg {
/// Enable the sampler, with a given sampling rate and max total sampling duration.
Enable(Duration, Duration),
Disable,
EnableSampler(Duration, Duration),
DisableSampler,
/// Exit, and propagate the signal to monitored components.
Exit(IpcSender<()>),
}

View file

@ -42,7 +42,7 @@ use crate::dom::gpucommandencoder::GPUCommandEncoderState;
use crate::dom::htmlimageelement::SourceSet;
use crate::dom::htmlmediaelement::{HTMLMediaElementFetchContext, MediaFrameRenderer};
use crate::dom::identityhub::Identities;
use crate::script_runtime::StreamConsumer;
use crate::script_runtime::{ContextForRequestInterrupt, StreamConsumer};
use crate::script_thread::IncompleteParserContexts;
use crate::task::TaskBox;
use app_units::Au;
@ -502,6 +502,7 @@ unsafe_no_jsmanaged_fields!(TimelineMarkerType);
unsafe_no_jsmanaged_fields!(WorkerId);
unsafe_no_jsmanaged_fields!(BufferQueue, QuirksMode, StrTendril);
unsafe_no_jsmanaged_fields!(Runtime);
unsafe_no_jsmanaged_fields!(ContextForRequestInterrupt);
unsafe_no_jsmanaged_fields!(HeaderMap, Method);
unsafe_no_jsmanaged_fields!(WindowProxyHandler);
unsafe_no_jsmanaged_fields!(UntrustedNodeAddress, OpaqueNode);

View file

@ -30,7 +30,8 @@ use crate::fetch::load_whole_resource;
use crate::realms::{enter_realm, AlreadyInRealm, InRealm};
use crate::script_runtime::ScriptThreadEventCategory::WorkerEvent;
use crate::script_runtime::{
new_child_runtime, CommonScriptMsg, JSContext as SafeJSContext, Runtime, ScriptChan, ScriptPort,
new_child_runtime, CommonScriptMsg, ContextForRequestInterrupt, JSContext as SafeJSContext,
Runtime, ScriptChan, ScriptPort,
};
use crate::task_queue::{QueuedTask, QueuedTaskConversion, TaskQueue};
use crate::task_source::networking::NetworkingTaskSource;
@ -256,7 +257,7 @@ impl DedicatedWorkerGlobalScope {
worker_url,
runtime,
from_devtools_receiver,
Some(closing),
closing,
gpu_id_hub,
),
task_queue: TaskQueue::new(receiver, own_sender.clone()),
@ -324,6 +325,7 @@ impl DedicatedWorkerGlobalScope {
browsing_context: Option<BrowsingContextId>,
gpu_id_hub: Arc<Mutex<Identities>>,
control_receiver: Receiver<DedicatedWorkerControlMsg>,
context_sender: Sender<ContextForRequestInterrupt>,
) -> JoinHandle<()> {
let serialized_worker_url = worker_url.to_string();
let name = format!("WebWorker for {}", serialized_worker_url);
@ -377,6 +379,8 @@ impl DedicatedWorkerGlobalScope {
new_child_runtime(parent, Some(task_source))
};
let _ = context_sender.send(ContextForRequestInterrupt::new(runtime.cx()));
let (devtools_mpsc_chan, devtools_mpsc_port) = unbounded();
ROUTER.route_ipc_receiver_to_crossbeam_sender(
from_devtools_receiver,

View file

@ -52,7 +52,9 @@ use crate::dom::workletglobalscope::WorkletGlobalScope;
use crate::microtask::{Microtask, MicrotaskQueue, UserMicrotask};
use crate::realms::{enter_realm, AlreadyInRealm, InRealm};
use crate::script_module::ModuleTree;
use crate::script_runtime::{CommonScriptMsg, JSContext as SafeJSContext, ScriptChan, ScriptPort};
use crate::script_runtime::{
CommonScriptMsg, ContextForRequestInterrupt, JSContext as SafeJSContext, ScriptChan, ScriptPort,
};
use crate::script_thread::{MainThreadScriptChan, ScriptThread};
use crate::task::TaskCanceller;
use crate::task_source::dom_manipulation::DOMManipulationTaskSource;
@ -130,6 +132,8 @@ pub struct AutoCloseWorker {
/// A sender of control messages,
/// currently only used to signal shutdown.
control_sender: Sender<DedicatedWorkerControlMsg>,
/// The context to request an interrupt on the worker thread.
context: ContextForRequestInterrupt,
}
impl Drop for AutoCloseWorker {
@ -146,6 +150,8 @@ impl Drop for AutoCloseWorker {
warn!("Couldn't send an exit message to a dedicated worker.");
}
self.context.request_interrupt();
// TODO: step 2 and 3.
// Step 4 is unnecessary since we don't use actual ports for dedicated workers.
if self
@ -2049,6 +2055,7 @@ impl GlobalScope {
closing: Arc<AtomicBool>,
join_handle: JoinHandle<()>,
control_sender: Sender<DedicatedWorkerControlMsg>,
context: ContextForRequestInterrupt,
) {
self.list_auto_close_worker
.borrow_mut()
@ -2056,6 +2063,7 @@ impl GlobalScope {
closing,
join_handle: Some(join_handle),
control_sender: control_sender,
context,
});
}
@ -2713,6 +2721,20 @@ impl GlobalScope {
unreachable!();
}
/// Returns a boolean indicating whether the event-loop
/// where this global is running on can continue running JS.
pub fn can_continue_running(&self) -> bool {
if self.downcast::<Window>().is_some() {
return ScriptThread::can_continue_running();
}
if let Some(worker) = self.downcast::<WorkerGlobalScope>() {
return !worker.is_closing();
}
// TODO: plug worklets into this.
true
}
/// Returns the task canceller of this global to ensure that everything is
/// properly cancelled when the global scope is destroyed.
pub fn task_canceller(&self, name: TaskSourceName) -> TaskCanceller {
@ -2730,12 +2752,15 @@ impl GlobalScope {
/// Perform a microtask checkpoint.
pub fn perform_a_microtask_checkpoint(&self) {
// Only perform the checkpoint if we're not shutting down.
if self.can_continue_running() {
self.microtask_queue.checkpoint(
self.get_cx(),
|_| Some(DomRoot::from_ref(self)),
vec![DomRoot::from_ref(self)],
);
}
}
/// Enqueue a microtask for subsequent execution.
pub fn enqueue_microtask(&self, job: Microtask) {
@ -2761,8 +2786,9 @@ impl GlobalScope {
}
/// Process a single event as if it were the next event
/// in the thread queue for this global scope.
pub fn process_event(&self, msg: CommonScriptMsg) {
/// in the queue for the event-loop where this global scope is running on.
/// Returns a boolean indicating whether further events should be processed.
pub fn process_event(&self, msg: CommonScriptMsg) -> bool {
if self.is::<Window>() {
return ScriptThread::process_event(msg);
}

View file

@ -25,7 +25,8 @@ use crate::dom::workerglobalscope::WorkerGlobalScope;
use crate::fetch::load_whole_resource;
use crate::realms::{enter_realm, AlreadyInRealm, InRealm};
use crate::script_runtime::{
new_rt_and_cx, CommonScriptMsg, JSContext as SafeJSContext, Runtime, ScriptChan,
new_rt_and_cx, CommonScriptMsg, ContextForRequestInterrupt, JSContext as SafeJSContext,
Runtime, ScriptChan,
};
use crate::task_queue::{QueuedTask, QueuedTaskConversion, TaskQueue};
use crate::task_source::TaskSourceName;
@ -44,6 +45,7 @@ use script_traits::{ScopeThings, ServiceWorkerMsg, WorkerGlobalScopeInit, Worker
use servo_config::pref;
use servo_rand::random;
use servo_url::ServoUrl;
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
use std::thread::{self, JoinHandle};
use std::time::{Duration, Instant};
@ -225,6 +227,7 @@ impl ServiceWorkerGlobalScope {
swmanager_sender: IpcSender<ServiceWorkerMsg>,
scope_url: ServoUrl,
control_receiver: Receiver<ServiceWorkerControlMsg>,
closing: Arc<AtomicBool>,
) -> ServiceWorkerGlobalScope {
ServiceWorkerGlobalScope {
workerglobalscope: WorkerGlobalScope::new_inherited(
@ -234,7 +237,7 @@ impl ServiceWorkerGlobalScope {
worker_url,
runtime,
from_devtools_receiver,
None,
closing,
Arc::new(Mutex::new(Identities::new())),
),
task_queue: TaskQueue::new(receiver, own_sender.clone()),
@ -258,6 +261,7 @@ impl ServiceWorkerGlobalScope {
swmanager_sender: IpcSender<ServiceWorkerMsg>,
scope_url: ServoUrl,
control_receiver: Receiver<ServiceWorkerControlMsg>,
closing: Arc<AtomicBool>,
) -> DomRoot<ServiceWorkerGlobalScope> {
let cx = runtime.cx();
let scope = Box::new(ServiceWorkerGlobalScope::new_inherited(
@ -271,6 +275,7 @@ impl ServiceWorkerGlobalScope {
swmanager_sender,
scope_url,
control_receiver,
closing,
));
unsafe { ServiceWorkerGlobalScopeBinding::Wrap(SafeJSContext::from_ptr(cx), scope) }
}
@ -285,6 +290,8 @@ impl ServiceWorkerGlobalScope {
swmanager_sender: IpcSender<ServiceWorkerMsg>,
scope_url: ServoUrl,
control_receiver: Receiver<ServiceWorkerControlMsg>,
context_sender: Sender<ContextForRequestInterrupt>,
closing: Arc<AtomicBool>,
) -> JoinHandle<()> {
let ScopeThings {
script_url,
@ -300,6 +307,8 @@ impl ServiceWorkerGlobalScope {
.spawn(move || {
thread_state::initialize(ThreadState::SCRIPT | ThreadState::IN_WORKER);
let runtime = new_rt_and_cx(None);
let _ = context_sender.send(ContextForRequestInterrupt::new(runtime.cx()));
let roots = RootCollection::new();
let _stack_roots = ThreadLocalStackRoots::new(&roots);
@ -330,6 +339,7 @@ impl ServiceWorkerGlobalScope {
swmanager_sender,
scope_url,
control_receiver,
closing,
);
let referrer = referrer_url

View file

@ -125,6 +125,7 @@ impl Worker {
let init = prepare_workerscope_init(global, Some(devtools_sender), Some(worker_id));
let (control_sender, control_receiver) = unbounded();
let (context_sender, context_receiver) = unbounded();
let join_handle = DedicatedWorkerGlobalScope::run_worker_scope(
init,
@ -142,9 +143,14 @@ impl Worker {
browsing_context,
global.wgpu_id_hub(),
control_receiver,
context_sender,
);
global.track_worker(closing, join_handle, control_sender);
let context = context_receiver
.recv()
.expect("Couldn't receive a context for worker.");
global.track_worker(closing, join_handle, control_sender, context);
Ok(worker)
}

View file

@ -98,7 +98,7 @@ pub struct WorkerGlobalScope {
worker_id: WorkerId,
worker_url: DomRefCell<ServoUrl>,
#[ignore_malloc_size_of = "Arc"]
closing: Option<Arc<AtomicBool>>,
closing: Arc<AtomicBool>,
#[ignore_malloc_size_of = "Defined in js"]
runtime: DomRefCell<Option<Runtime>>,
location: MutNullableDom<WorkerLocation>,
@ -126,7 +126,7 @@ impl WorkerGlobalScope {
worker_url: ServoUrl,
runtime: Runtime,
from_devtools_receiver: Receiver<DevtoolScriptControlMsg>,
closing: Option<Arc<AtomicBool>>,
closing: Arc<AtomicBool>,
gpu_id_hub: Arc<Mutex<Identities>>,
) -> Self {
// Install a pipeline-namespace in the current thread.
@ -193,11 +193,7 @@ impl WorkerGlobalScope {
}
pub fn is_closing(&self) -> bool {
if let Some(ref closing) = self.closing {
closing.load(Ordering::SeqCst)
} else {
false
}
self.closing.load(Ordering::SeqCst)
}
pub fn get_url(&self) -> Ref<ServoUrl> {
@ -494,7 +490,13 @@ impl WorkerGlobalScope {
}
}
pub fn process_event(&self, msg: CommonScriptMsg) {
/// Process a single event as if it were the next event
/// in the queue for this worker event-loop.
/// Returns a boolean indicating whether further events should be processed.
pub fn process_event(&self, msg: CommonScriptMsg) -> bool {
if self.is_closing() {
return false;
}
match msg {
CommonScriptMsg::Task(_, task, _, _) => task.run_box(),
CommonScriptMsg::CollectReports(reports_chan) => {
@ -504,11 +506,10 @@ impl WorkerGlobalScope {
reports_chan.send(reports);
},
}
true
}
pub fn close(&self) {
if let Some(ref closing) = self.closing {
closing.store(true, Ordering::SeqCst);
}
self.closing.store(true, Ordering::SeqCst);
}
}

View file

@ -1544,7 +1544,10 @@ impl XMLHttpRequest {
if let Some(script_port) = script_port {
loop {
global.process_event(script_port.recv().unwrap());
if !global.process_event(script_port.recv().unwrap()) {
// We're exiting.
return Err(Error::Abort);
}
let context = context.lock().unwrap();
let sync_status = context.sync_status.borrow();
if let Some(ref status) = *sync_status {

View file

@ -147,4 +147,8 @@ impl MicrotaskQueue {
pub fn empty(&self) -> bool {
self.microtask_queue.borrow().is_empty()
}
pub fn clear(&self) {
self.microtask_queue.borrow_mut().clear();
}
}

View file

@ -52,7 +52,10 @@ use js::jsapi::{BuildIdCharVector, DisableIncrementalGC, GCDescription, GCProgre
use js::jsapi::{Dispatchable as JSRunnable, Dispatchable_MaybeShuttingDown};
use js::jsapi::{HandleObject, Heap, JobQueue};
use js::jsapi::{JSContext as RawJSContext, JSTracer, SetDOMCallbacks, SetGCSliceCallback};
use js::jsapi::{JSGCInvocationKind, JSGCStatus, JS_AddExtraGCRootsTracer, JS_SetGCCallback};
use js::jsapi::{
JSGCInvocationKind, JSGCStatus, JS_AddExtraGCRootsTracer, JS_RequestInterruptCallback,
JS_SetGCCallback,
};
use js::jsapi::{JSGCMode, JSGCParamKey, JS_SetGCParameter, JS_SetGlobalJitCompilerOption};
use js::jsapi::{
JSJitCompilerOption, JS_SetOffthreadIonCompilationEnabled, JS_SetParallelParsingEnabled,
@ -845,6 +848,34 @@ unsafe fn set_gc_zeal_options(cx: *mut RawJSContext) {
#[cfg(not(feature = "debugmozjs"))]
unsafe fn set_gc_zeal_options(_: *mut RawJSContext) {}
#[repr(transparent)]
/// A wrapper around a JSContext that is Send,
/// enabling an interrupt to be requested
/// from a thread other than the one running JS using that context.
pub struct ContextForRequestInterrupt(*mut RawJSContext);
impl ContextForRequestInterrupt {
pub fn new(context: *mut RawJSContext) -> ContextForRequestInterrupt {
ContextForRequestInterrupt(context)
}
#[allow(unsafe_code)]
/// Can be called from any thread, to request the callback set by
/// JS_AddInterruptCallback to be called
/// on the thread where that context is running.
pub fn request_interrupt(&self) {
unsafe {
JS_RequestInterruptCallback(self.0);
}
}
}
#[allow(unsafe_code)]
/// It is safe to call `JS_RequestInterruptCallback(cx)` from any thread.
/// See the docs for the corresponding `requestInterrupt` method,
/// at `mozjs/js/src/vm/JSContext.h`.
unsafe impl Send for ContextForRequestInterrupt {}
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct JSContext(*mut RawJSContext);

View file

@ -63,7 +63,9 @@ use crate::dom::workletglobalscope::WorkletGlobalScopeInit;
use crate::fetch::FetchCanceller;
use crate::microtask::{Microtask, MicrotaskQueue};
use crate::realms::enter_realm;
use crate::script_runtime::{get_reports, new_rt_and_cx, JSContext, Runtime, ScriptPort};
use crate::script_runtime::{
get_reports, new_rt_and_cx, ContextForRequestInterrupt, JSContext, Runtime, ScriptPort,
};
use crate::script_runtime::{CommonScriptMsg, ScriptChan, ScriptThreadEventCategory};
use crate::task_manager::TaskManager;
use crate::task_queue::{QueuedTask, QueuedTaskConversion, TaskQueue};
@ -97,14 +99,17 @@ use ipc_channel::ipc::{self, IpcSender};
use ipc_channel::router::ROUTER;
use js::glue::GetWindowProxyClass;
use js::jsapi::JS_SetWrapObjectCallbacks;
use js::jsapi::{JSTracer, SetWindowProxyClass};
use js::jsapi::{
JSContext as UnsafeJSContext, JSTracer, JS_AddInterruptCallback, SetWindowProxyClass,
};
use js::jsval::UndefinedValue;
use js::rust::ParentRuntime;
use media::WindowGLContext;
use metrics::{PaintTimeMetrics, MAX_TASK_NS};
use mime::{self, Mime};
use msg::constellation_msg::{
BackgroundHangMonitor, BackgroundHangMonitorRegister, ScriptHangAnnotation,
BackgroundHangMonitor, BackgroundHangMonitorExitSignal, BackgroundHangMonitorRegister,
ScriptHangAnnotation,
};
use msg::constellation_msg::{BrowsingContextId, HistoryStateId, PipelineId};
use msg::constellation_msg::{HangAnnotation, MonitoredComponentId, MonitoredComponentType};
@ -149,7 +154,7 @@ use std::option::Option;
use std::ptr;
use std::rc::Rc;
use std::result::Result;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread;
use std::time::{Duration, SystemTime};
@ -531,9 +536,11 @@ pub struct ScriptThread {
task_queue: TaskQueue<MainThreadScriptMsg>,
/// A handle to register associated layout threads for hang-monitoring.
background_hang_monitor_register: Option<Box<dyn BackgroundHangMonitorRegister>>,
background_hang_monitor_register: Box<dyn BackgroundHangMonitorRegister>,
/// The dedicated means of communication with the background-hang-monitor for this script-thread.
background_hang_monitor: Option<Box<dyn BackgroundHangMonitor>>,
background_hang_monitor: Box<dyn BackgroundHangMonitor>,
/// A flag set to `true` by the BHM on exit, and checked from within the interrupt handler.
closing: Arc<AtomicBool>,
/// A channel to hand out to script thread-based entities that need to be able to enqueue
/// events in the event queue.
@ -686,6 +693,27 @@ pub struct ScriptThread {
webgpu_port: RefCell<Option<Receiver<WebGPUMsg>>>,
}
struct BHMExitSignal {
closing: Arc<AtomicBool>,
js_context: ContextForRequestInterrupt,
}
impl BackgroundHangMonitorExitSignal for BHMExitSignal {
fn signal_to_exit(&self) {
self.closing.store(true, Ordering::SeqCst);
self.js_context.request_interrupt();
}
}
#[allow(unsafe_code)]
unsafe extern "C" fn interrupt_callback(_cx: *mut UnsafeJSContext) -> bool {
let res = ScriptThread::can_continue_running();
if !res {
ScriptThread::prepare_for_shutdown();
}
res
}
/// In the event of thread panic, all data on the stack runs its destructor. However, there
/// are no reachable, owning pointers to the DOM memory, so it never gets freed by default
/// when the script thread fails. The ScriptMemoryFailsafe uses the destructor bomb pattern
@ -818,6 +846,20 @@ impl ScriptThread {
})
}
pub fn can_continue_running() -> bool {
SCRIPT_THREAD_ROOT.with(|root| {
let script_thread = unsafe { &*root.get().unwrap() };
script_thread.can_continue_running_inner()
})
}
pub fn prepare_for_shutdown() {
SCRIPT_THREAD_ROOT.with(|root| {
let script_thread = unsafe { &*root.get().unwrap() };
script_thread.prepare_for_shutdown_inner();
})
}
pub fn set_mutation_observer_microtask_queued(value: bool) {
SCRIPT_THREAD_ROOT.with(|root| {
let script_thread = unsafe { &*root.get().unwrap() };
@ -876,13 +918,22 @@ impl ScriptThread {
})
}
pub fn process_event(msg: CommonScriptMsg) {
/// Process a single event as if it were the next event
/// in the queue for this window event-loop.
/// Returns a boolean indicating whether further events should be processed.
pub fn process_event(msg: CommonScriptMsg) -> bool {
SCRIPT_THREAD_ROOT.with(|root| {
if let Some(script_thread) = root.get() {
let script_thread = unsafe { &*script_thread };
if !script_thread.can_continue_running_inner() {
return false;
} else {
script_thread.handle_msg_from_script(MainThreadScriptMsg::Common(msg));
return true;
}
});
}
false
})
}
// https://html.spec.whatwg.org/multipage/#await-a-stable-state
@ -1231,6 +1282,7 @@ impl ScriptThread {
unsafe {
JS_SetWrapObjectCallbacks(cx, &WRAP_CALLBACKS);
SetWindowProxyClass(cx, GetWindowProxyClass());
JS_AddInterruptCallback(cx, Some(interrupt_callback));
}
// Ask the router to proxy IPC messages from the devtools to us.
@ -1242,13 +1294,18 @@ impl ScriptThread {
let task_queue = TaskQueue::new(port, chan.clone());
let background_hang_monitor = state.background_hang_monitor_register.clone().map(|bhm| {
bhm.register_component(
let closing = Arc::new(AtomicBool::new(false));
let background_hang_monitor_exit_signal = BHMExitSignal {
closing: closing.clone(),
js_context: ContextForRequestInterrupt::new(cx),
};
let background_hang_monitor = state.background_hang_monitor_register.register_component(
MonitoredComponentId(state.id.clone(), MonitoredComponentType::Script),
Duration::from_millis(1000),
Duration::from_millis(5000),
)
});
Some(Box::new(background_hang_monitor_exit_signal)),
);
// Ask the router to proxy IPC messages from the control port to us.
let control_port = ROUTER.route_ipc_receiver_to_new_crossbeam_receiver(state.control_port);
@ -1270,6 +1327,7 @@ impl ScriptThread {
background_hang_monitor_register: state.background_hang_monitor_register,
background_hang_monitor,
closing,
chan: MainThreadScriptChan(chan.clone()),
dom_manipulation_task_sender: boxed_script_sender.clone(),
@ -1349,6 +1407,23 @@ impl ScriptThread {
unsafe { JSContext::from_ptr(self.js_runtime.cx()) }
}
/// Check if we are closing.
fn can_continue_running_inner(&self) -> bool {
if self.closing.load(Ordering::SeqCst) {
return false;
}
true
}
/// We are closing, ensure no script can run and potentially hang.
fn prepare_for_shutdown_inner(&self) {
let docs = self.documents.borrow();
for (_, document) in docs.iter() {
let window = document.window();
window.ignore_all_tasks();
}
}
/// Starts the script thread. After calling this method, the script thread will loop receiving
/// messages on its port.
pub fn start(&self) {
@ -1386,9 +1461,7 @@ impl ScriptThread {
let mut sequential = vec![];
// Notify the background-hang-monitor we are waiting for an event.
self.background_hang_monitor
.as_ref()
.map(|bhm| bhm.notify_wait());
self.background_hang_monitor.notify_wait();
// Receive at least one message so we don't spinloop.
debug!("Waiting for event.");
@ -1530,6 +1603,24 @@ impl ScriptThread {
let category = self.categorize_msg(&msg);
let pipeline_id = self.message_to_pipeline(&msg);
if self.closing.load(Ordering::SeqCst) {
// If we've received the closed signal from the BHM, only handle exit messages.
match msg {
FromConstellation(ConstellationControlMsg::ExitScriptThread) => {
self.handle_exit_script_thread_msg();
return false;
},
FromConstellation(ConstellationControlMsg::ExitPipeline(
pipeline_id,
discard_browsing_context,
)) => {
self.handle_exit_pipeline_msg(pipeline_id, discard_browsing_context);
},
_ => {},
}
continue;
}
let result = self.profile_event(category, pipeline_id, move || {
match msg {
FromConstellation(ConstellationControlMsg::ExitScriptThread) => {
@ -1546,12 +1637,12 @@ impl ScriptThread {
None
});
// https://html.spec.whatwg.org/multipage/#event-loop-processing-model step 6
self.perform_a_microtask_checkpoint();
if let Some(retval) = result {
return retval;
}
// https://html.spec.whatwg.org/multipage/#event-loop-processing-model step 6
self.perform_a_microtask_checkpoint();
}
{
@ -1665,8 +1756,7 @@ impl ScriptThread {
ScriptThreadEventCategory::WebGPUMsg => ScriptHangAnnotation::WebGPUMsg,
};
self.background_hang_monitor
.as_ref()
.map(|bhm| bhm.notify_activity(HangAnnotation::Script(hang_annotation)));
.notify_activity(HangAnnotation::Script(hang_annotation));
}
fn message_to_pipeline(&self, msg: &MixedMessage) -> Option<PipelineId> {
@ -2855,9 +2945,7 @@ impl ScriptThread {
self.handle_exit_pipeline_msg(pipeline_id, DiscardBrowsingContext::Yes);
}
self.background_hang_monitor
.as_ref()
.map(|bhm| bhm.unregister());
self.background_hang_monitor.unregister();
// If we're in multiprocess mode, shut-down the IPC router for this process.
if opts::multiprocess() {
@ -3867,6 +3955,8 @@ impl ScriptThread {
}
fn perform_a_microtask_checkpoint(&self) {
// Only perform the checkpoint if we're not shutting down.
if self.can_continue_running_inner() {
let globals = self
.documents
.borrow()
@ -3880,6 +3970,7 @@ impl ScriptThread {
globals,
)
}
}
}
impl Drop for ScriptThread {

View file

@ -12,6 +12,7 @@ use crate::dom::serviceworkerglobalscope::{
ServiceWorkerControlMsg, ServiceWorkerGlobalScope, ServiceWorkerScriptMsg,
};
use crate::dom::serviceworkerregistration::longest_prefix_match;
use crate::script_runtime::ContextForRequestInterrupt;
use crossbeam_channel::{unbounded, Receiver, RecvError, Sender};
use ipc_channel::ipc::{self, IpcSender};
use ipc_channel::router::ROUTER;
@ -26,6 +27,8 @@ use servo_config::pref;
use servo_url::ImmutableOrigin;
use servo_url::ServoUrl;
use std::collections::HashMap;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread::{self, JoinHandle};
enum Message {
@ -93,6 +96,15 @@ impl Drop for ServiceWorkerRegistration {
warn!("Failed to send exit message to service worker scope.");
}
self.closing
.take()
.expect("No close flag for worker")
.store(true, Ordering::SeqCst);
self.context
.take()
.expect("No context to request interrupt.")
.request_interrupt();
// TODO: Step 1, 2 and 3.
if self
.join_handle
@ -121,6 +133,10 @@ struct ServiceWorkerRegistration {
control_sender: Option<Sender<ServiceWorkerControlMsg>>,
/// A handle to join on the worker thread.
join_handle: Option<JoinHandle<()>>,
/// A context to request an interrupt.
context: Option<ContextForRequestInterrupt>,
/// The closing flag for the worker.
closing: Option<Arc<AtomicBool>>,
}
impl ServiceWorkerRegistration {
@ -132,6 +148,8 @@ impl ServiceWorkerRegistration {
installing_worker: None,
join_handle: None,
control_sender: None,
context: None,
closing: None,
}
}
@ -139,12 +157,20 @@ impl ServiceWorkerRegistration {
&mut self,
join_handle: JoinHandle<()>,
control_sender: Sender<ServiceWorkerControlMsg>,
context: ContextForRequestInterrupt,
closing: Arc<AtomicBool>,
) {
assert!(self.join_handle.is_none());
self.join_handle = Some(join_handle);
assert!(self.control_sender.is_none());
self.control_sender = Some(control_sender);
assert!(self.context.is_none());
self.context = Some(context);
assert!(self.closing.is_none());
self.closing = Some(closing);
}
/// <https://w3c.github.io/ServiceWorker/#get-newest-worker>
@ -378,11 +404,11 @@ impl ServiceWorkerManager {
// Very roughly steps 5 to 18.
// TODO: implement all steps precisely.
let (new_worker, join_handle, control_sender) =
let (new_worker, join_handle, control_sender, context, closing) =
update_serviceworker(self.own_sender.clone(), job.scope_url.clone(), scope_things);
// Since we've just started the worker thread, ensure we can shut it down later.
registration.note_worker_thread(join_handle, control_sender);
registration.note_worker_thread(join_handle, control_sender, context, closing);
// Step 19, run Install.
@ -422,12 +448,16 @@ fn update_serviceworker(
ServiceWorker,
JoinHandle<()>,
Sender<ServiceWorkerControlMsg>,
ContextForRequestInterrupt,
Arc<AtomicBool>,
) {
let (sender, receiver) = unbounded();
let (_devtools_sender, devtools_receiver) = ipc::channel().unwrap();
let worker_id = ServiceWorkerId::new();
let (control_sender, control_receiver) = unbounded();
let (context_sender, context_receiver) = unbounded();
let closing = Arc::new(AtomicBool::new(false));
let join_handle = ServiceWorkerGlobalScope::run_serviceworker_scope(
scope_things.clone(),
@ -437,12 +467,20 @@ fn update_serviceworker(
own_sender,
scope_url.clone(),
control_receiver,
context_sender,
closing.clone(),
);
let context = context_receiver
.recv()
.expect("Couldn't receive a context for worker.");
(
ServiceWorker::new(scope_things.script_url, sender, worker_id),
join_handle,
control_sender,
context,
closing,
)
}

View file

@ -69,7 +69,7 @@ impl fmt::Debug for dyn TaskBox {
/// Encapsulated state required to create cancellable tasks from non-script threads.
#[derive(Clone)]
pub struct TaskCanceller {
pub cancelled: Option<Arc<AtomicBool>>,
pub cancelled: Arc<AtomicBool>,
}
impl TaskCanceller {
@ -88,7 +88,7 @@ impl TaskCanceller {
/// A task that can be cancelled by toggling a shared flag.
pub struct CancellableTask<T: TaskOnce> {
cancelled: Option<Arc<AtomicBool>>,
cancelled: Arc<AtomicBool>,
inner: T,
}
@ -97,9 +97,7 @@ where
T: TaskOnce,
{
fn is_cancelled(&self) -> bool {
self.cancelled
.as_ref()
.map_or(false, |cancelled| cancelled.load(Ordering::SeqCst))
self.cancelled.load(Ordering::SeqCst)
}
}

View file

@ -182,7 +182,7 @@ impl TaskManager {
let mut flags = self.task_cancellers.borrow_mut();
let cancel_flag = flags.entry(name).or_insert(Default::default());
TaskCanceller {
cancelled: Some(cancel_flag.clone()),
cancelled: cancel_flag.clone(),
}
}
}

View file

@ -218,6 +218,13 @@ impl OneshotTimers {
}
for timer in timers_to_run {
// Since timers can be coalesced together inside a task,
// this loop can keep running, including after an interrupt of the JS,
// and prevent a clean-shutdown of a JS-running thread.
// This check prevents such a situation.
if !global.can_continue_running() {
return;
}
let callback = timer.callback;
callback.invoke(global, &self.js_timers);
}

View file

@ -222,7 +222,7 @@ pub struct LayoutThreadInit {
pub is_parent: bool,
pub layout_pair: (Sender<Msg>, Receiver<Msg>),
pub pipeline_port: IpcReceiver<LayoutControlMsg>,
pub background_hang_monitor_register: Option<Box<dyn BackgroundHangMonitorRegister>>,
pub background_hang_monitor_register: Box<dyn BackgroundHangMonitorRegister>,
pub constellation_chan: IpcSender<ConstellationMsg>,
pub script_chan: IpcSender<ConstellationControlMsg>,
pub image_cache: Arc<dyn ImageCache>,

View file

@ -644,7 +644,7 @@ pub struct InitialScriptState {
/// A channel on which messages can be sent to the constellation from script.
pub script_to_constellation_chan: ScriptToConstellationChan,
/// A handle to register script-(and associated layout-)threads for hang monitoring.
pub background_hang_monitor_register: Option<Box<dyn BackgroundHangMonitorRegister>>,
pub background_hang_monitor_register: Box<dyn BackgroundHangMonitorRegister>,
/// A sender for the layout thread to communicate to the constellation.
pub layout_to_constellation_chan: IpcSender<LayoutMsg>,
/// A channel to schedule timer events.

View file

@ -1051,11 +1051,7 @@ pub fn run_content_process(token: String) {
set_logger(content.script_to_constellation_chan().clone());
let background_hang_monitor_register = if opts::get().background_hang_monitor {
content.register_with_background_hang_monitor()
} else {
None
};
let background_hang_monitor_register = content.register_with_background_hang_monitor();
content.start_all::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,