introduce a pipeline namespace installer

This commit is contained in:
Gregory Terzian 2019-08-13 14:31:10 +02:00
parent 47aa1ccaa2
commit 34008a317b
7 changed files with 140 additions and 7 deletions

3
Cargo.lock generated
View file

@ -2967,8 +2967,11 @@ dependencies = [
name = "msg"
version = "0.0.1"
dependencies = [
"ipc-channel 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"malloc_size_of 0.0.1",
"malloc_size_of_derive 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.88 (registry+https://github.com/rust-lang/crates.io-index)",
"size_of_test 0.0.1",
"webrender_api 0.60.0 (git+https://github.com/servo/webrender)",

View file

@ -129,7 +129,9 @@ use msg::constellation_msg::{
BrowsingContextGroupId, BrowsingContextId, HistoryStateId, PipelineId,
TopLevelBrowsingContextId,
};
use msg::constellation_msg::{PipelineNamespace, PipelineNamespaceId, TraversalDirection};
use msg::constellation_msg::{
PipelineNamespace, PipelineNamespaceId, PipelineNamespaceRequest, TraversalDirection,
};
use net_traits::pub_domains::reg_host;
use net_traits::request::RequestBuilder;
use net_traits::storage_thread::{StorageThreadMsg, StorageType};
@ -217,6 +219,12 @@ struct BrowsingContextGroup {
/// the `script` crate). Script and layout communicate using a `Message`
/// type.
pub struct Constellation<Message, LTF, STF> {
/// An ipc-sender/threaded-receiver pair
/// to facilitate installing pipeline namespaces in threads
/// via a per-process installer.
namespace_receiver: Receiver<Result<PipelineNamespaceRequest, IpcError>>,
namespace_sender: IpcSender<PipelineNamespaceRequest>,
/// An IPC channel for script threads to send messages to the constellation.
/// This is the script threads' view of `script_receiver`.
script_sender: IpcSender<(PipelineId, FromScriptMsg)>,
@ -672,6 +680,12 @@ where
let script_receiver =
route_ipc_receiver_to_new_mpsc_receiver_preserving_errors(ipc_script_receiver);
let (namespace_sender, ipc_namespace_receiver) =
ipc::channel().expect("ipc channel failure");
let namespace_receiver = route_ipc_receiver_to_new_mpsc_receiver_preserving_errors(
ipc_namespace_receiver,
);
let (background_hang_monitor_sender, ipc_bhm_receiver) =
ipc::channel().expect("ipc channel failure");
let background_hang_monitor_receiver =
@ -709,6 +723,8 @@ where
PipelineNamespace::install(PipelineNamespaceId(1));
let mut constellation: Constellation<Message, LTF, STF> = Constellation {
namespace_receiver,
namespace_sender,
script_sender: ipc_script_sender,
background_hang_monitor_sender,
background_hang_monitor_receiver,
@ -987,6 +1003,8 @@ where
sender: self.script_sender.clone(),
pipeline_id: pipeline_id,
},
namespace_request_sender: self.namespace_sender.clone(),
pipeline_namespace_id: self.next_pipeline_namespace_id(),
background_monitor_register: self.background_monitor_register.clone(),
background_hang_monitor_to_constellation_chan: self
.background_hang_monitor_sender
@ -1005,7 +1023,6 @@ where
event_loop,
load_data,
device_pixel_ratio: self.window_size.device_pixel_ratio,
pipeline_namespace_id: self.next_pipeline_namespace_id(),
prev_visibility: is_visible,
webrender_api_sender: self.webrender_api_sender.clone(),
webrender_document: self.webrender_document,
@ -1155,6 +1172,7 @@ where
fn handle_request(&mut self) {
#[derive(Debug)]
enum Request {
PipelineNamespace(PipelineNamespaceRequest),
Script((PipelineId, FromScriptMsg)),
BackgroundHangMonitor(HangMonitorAlert),
Compositor(FromCompositorMsg),
@ -1175,6 +1193,9 @@ where
// being called. If this happens, there's not much we can do
// other than panic.
let request = select! {
recv(self.namespace_receiver) -> msg => {
msg.expect("Unexpected script channel panic in constellation").map(Request::PipelineNamespace)
}
recv(self.script_receiver) -> msg => {
msg.expect("Unexpected script channel panic in constellation").map(Request::Script)
}
@ -1203,6 +1224,9 @@ where
};
match request {
Request::PipelineNamespace(message) => {
self.handle_request_for_pipeline_namespace(message)
},
Request::Compositor(message) => self.handle_request_from_compositor(message),
Request::Script(message) => {
self.handle_request_from_script(message);
@ -1222,6 +1246,11 @@ where
}
}
fn handle_request_for_pipeline_namespace(&mut self, request: PipelineNamespaceRequest) {
let PipelineNamespaceRequest(sender) = request;
let _ = sender.send(self.next_pipeline_namespace_id());
}
fn handle_request_from_background_hang_monitor(&self, message: HangMonitorAlert) {
match message {
HangMonitorAlert::Profile(bytes) => self

View file

@ -22,7 +22,10 @@ use media::WindowGLContext;
use metrics::PaintTimeMetrics;
use msg::constellation_msg::TopLevelBrowsingContextId;
use msg::constellation_msg::{BackgroundHangMonitorRegister, HangMonitorAlert, SamplerControlMsg};
use msg::constellation_msg::{BrowsingContextId, HistoryStateId, PipelineId, PipelineNamespaceId};
use msg::constellation_msg::{BrowsingContextId, HistoryStateId};
use msg::constellation_msg::{
PipelineId, PipelineNamespace, PipelineNamespaceId, PipelineNamespaceRequest,
};
use net::image_cache::ImageCacheImpl;
use net_traits::image_cache::ImageCache;
use net_traits::{IpcSend, ResourceThreads};
@ -121,6 +124,9 @@ pub struct InitialPipelineState {
/// A channel to the associated constellation.
pub script_to_constellation_chan: ScriptToConstellationChan,
/// A sender to request pipeline namespace ids.
pub namespace_request_sender: IpcSender<PipelineNamespaceRequest>,
/// A handle to register components for hang monitoring.
/// None when in multiprocess mode.
pub background_monitor_register: Option<Box<dyn BackgroundHangMonitorRegister>>,
@ -164,15 +170,15 @@ pub struct InitialPipelineState {
/// Information about the device pixel ratio.
pub device_pixel_ratio: Scale<f32, CSSPixel, DevicePixel>,
/// The ID of the pipeline namespace for this script thread.
pub pipeline_namespace_id: PipelineNamespaceId,
/// The event loop to run in, if applicable.
pub event_loop: Option<Rc<EventLoop>>,
/// Information about the page to load.
pub load_data: LoadData,
/// The ID of the pipeline namespace for this script thread.
pub pipeline_namespace_id: PipelineNamespaceId,
/// Whether the browsing context in which pipeline is embedded is visible
/// for the purposes of scheduling and resource management. This field is
/// only used to notify script and compositor threads after spawning
@ -278,6 +284,7 @@ impl Pipeline {
parent_pipeline_id: state.parent_pipeline_id,
opener: state.opener,
script_to_constellation_chan: state.script_to_constellation_chan.clone(),
namespace_request_sender: state.namespace_request_sender,
background_hang_monitor_to_constellation_chan: state
.background_hang_monitor_to_constellation_chan
.clone(),
@ -484,6 +491,7 @@ pub struct UnprivilegedPipelineContent {
browsing_context_id: BrowsingContextId,
parent_pipeline_id: Option<PipelineId>,
opener: Option<BrowsingContextId>,
namespace_request_sender: IpcSender<PipelineNamespaceRequest>,
script_to_constellation_chan: ScriptToConstellationChan,
background_hang_monitor_to_constellation_chan: IpcSender<HangMonitorAlert>,
sampling_profiler_port: Option<IpcReceiver<SamplerControlMsg>>,
@ -522,6 +530,10 @@ impl UnprivilegedPipelineContent {
LTF: LayoutThreadFactory<Message = Message>,
STF: ScriptThreadFactory<Message = Message>,
{
// Setup pipeline-namespace-installing for all threads in this process.
// Idempotent in single-process mode.
PipelineNamespace::set_installer_sender(self.namespace_request_sender);
let image_cache = Arc::new(ImageCacheImpl::new(self.webrender_api_sender.create_api()));
let paint_time_metrics = PaintTimeMetrics::new(
self.id,

View file

@ -13,8 +13,11 @@ test = false
doctest = false
[dependencies]
lazy_static = "1"
ipc-channel = "0.11"
malloc_size_of = { path = "../malloc_size_of" }
malloc_size_of_derive = "0.1"
parking_lot = "0.8"
serde = "1.0.60"
webrender_api = {git = "https://github.com/servo/webrender", features = ["ipc"]}

View file

@ -5,10 +5,13 @@
//! The high-level interface from script to constellation. Using this abstract interface helps
//! reduce coupling between these two components.
use ipc_channel::ipc::{self, IpcReceiver, IpcSender};
use parking_lot::Mutex;
use std::cell::Cell;
use std::fmt;
use std::mem;
use std::num::NonZeroU32;
use std::sync::Arc;
use std::time::Duration;
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
@ -17,12 +20,71 @@ pub enum TraversalDirection {
Back(usize),
}
#[derive(Debug, Deserialize, Serialize)]
/// Request a pipeline-namespace id from the constellation.
pub struct PipelineNamespaceRequest(pub IpcSender<PipelineNamespaceId>);
/// A per-process installer of pipeline-namespaces.
pub struct PipelineNamespaceInstaller {
request_sender: Option<IpcSender<PipelineNamespaceRequest>>,
namespace_sender: IpcSender<PipelineNamespaceId>,
namespace_receiver: IpcReceiver<PipelineNamespaceId>,
}
impl PipelineNamespaceInstaller {
pub fn new() -> Self {
let (namespace_sender, namespace_receiver) =
ipc::channel().expect("PipelineNamespaceInstaller ipc channel failure");
PipelineNamespaceInstaller {
request_sender: None,
namespace_sender: namespace_sender,
namespace_receiver: namespace_receiver,
}
}
/// Provide a request sender to send requests to the constellation.
pub fn set_sender(&mut self, sender: IpcSender<PipelineNamespaceRequest>) {
self.request_sender = Some(sender);
}
/// Install a namespace, requesting a new Id from the constellation.
pub fn install_namespace(&self) {
match self.request_sender.as_ref() {
Some(sender) => {
let _ = sender.send(PipelineNamespaceRequest(self.namespace_sender.clone()));
let namespace_id = self
.namespace_receiver
.recv()
.expect("The constellation to make a pipeline namespace id available");
PipelineNamespace::install(namespace_id);
},
None => unreachable!("PipelineNamespaceInstaller should have a request_sender setup"),
}
}
}
lazy_static! {
/// A per-process unique pipeline-namespace-installer.
/// Accessible via PipelineNamespace.
///
/// Use PipelineNamespace::set_installer_sender to initiate with a sender to the constellation,
/// when a new process has been created.
///
/// Use PipelineNamespace::fetch_install to install a unique pipeline-namespace from the calling thread.
static ref PIPELINE_NAMESPACE_INSTALLER: Arc<Mutex<PipelineNamespaceInstaller>> =
Arc::new(Mutex::new(PipelineNamespaceInstaller::new()));
}
/// Each pipeline ID needs to be unique. However, it also needs to be possible to
/// generate the pipeline ID from an iframe element (this simplifies a lot of other
/// code that makes use of pipeline IDs).
///
/// To achieve this, each pipeline index belongs to a particular namespace. There is
/// a namespace for the constellation thread, and also one for every script thread.
///
/// A namespace can be installed for any other thread in a process
/// where an pipeline-installer has been initialized.
///
/// This allows pipeline IDs to be generated by any of those threads without conflicting
/// with pipeline IDs created by other script threads or the constellation. The
/// constellation is the only code that is responsible for creating new *namespaces*.
@ -39,6 +101,7 @@ pub struct PipelineNamespace {
}
impl PipelineNamespace {
/// Install a namespace for a given Id.
pub fn install(namespace_id: PipelineNamespaceId) {
PIPELINE_NAMESPACE.with(|tls| {
assert!(tls.get().is_none());
@ -49,6 +112,25 @@ impl PipelineNamespace {
});
}
/// Setup the pipeline-namespace-installer, by providing it with a sender to the constellation.
/// Idempotent in single-process mode.
pub fn set_installer_sender(sender: IpcSender<PipelineNamespaceRequest>) {
PIPELINE_NAMESPACE_INSTALLER.lock().set_sender(sender);
}
/// Install a namespace in the current thread, without requiring having a namespace Id ready.
/// Panics if called more than once per thread.
pub fn auto_install() {
// Note that holding the lock for the duration of the call is irrelevant to performance,
// since a thread would have to block on the ipc-response from the constellation,
// with the constellation already acting as a global lock on namespace ids,
// and only being able to handle one request at a time.
//
// Hence, any other thread attempting to concurrently install a namespace
// would have to wait for the current call to finish, regardless of the lock held here.
PIPELINE_NAMESPACE_INSTALLER.lock().install_namespace();
}
fn next_index(&mut self) -> NonZeroU32 {
self.index += 1;
NonZeroU32::new(self.index).expect("pipeline id index wrapped!")

View file

@ -4,6 +4,8 @@
#![deny(unsafe_code)]
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate malloc_size_of;
#[macro_use]

View file

@ -44,7 +44,7 @@ use js::jsapi::JSAutoRealm;
use js::jsval::UndefinedValue;
use js::panic::maybe_resume_unwind;
use js::rust::{HandleValue, ParentRuntime};
use msg::constellation_msg::PipelineId;
use msg::constellation_msg::{PipelineId, PipelineNamespace};
use net_traits::request::{
CredentialsMode, Destination, ParserMetadata, RequestBuilder as NetRequestInit,
};
@ -123,6 +123,8 @@ impl WorkerGlobalScope {
timer_event_chan: IpcSender<TimerEvent>,
closing: Option<Arc<AtomicBool>>,
) -> Self {
// Install a pipeline-namespace in the current thread.
PipelineNamespace::auto_install();
Self {
globalscope: GlobalScope::new_inherited(
init.pipeline_id,