net: clean shutdown of fetch thread (#38421)

The fetch thread is currently not shut down, meaning it is one of the
threads counted in the "threads are still running after shutdown (bad)"
counter shown when Servo has shut-down.

This PR introduces a mechanism to start, and shut-down, one fetch thread
per process that requires one.

Testing: WPT tests and unit tests(for font context). Also manually
tested loading and closing "about:blank": this change indeed brings down
the count of threads still running after shutdown by one.

Fixes: https://github.com/servo/servo/issues/34887

---------

Signed-off-by: gterzian <2792687+gterzian@users.noreply.github.com>
This commit is contained in:
Gregory Terzian 2025-08-07 17:01:30 +08:00 committed by GitHub
parent ad805e3110
commit b23cf9c6cd
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 71 additions and 11 deletions

View file

@ -149,7 +149,10 @@ use media::WindowGLContext;
use net_traits::pub_domains::reg_host;
use net_traits::request::Referrer;
use net_traits::storage_thread::{StorageThreadMsg, StorageType};
use net_traits::{self, AsyncRuntime, IpcSend, ReferrerPolicy, ResourceThreads};
use net_traits::{
self, AsyncRuntime, IpcSend, ReferrerPolicy, ResourceThreads, exit_fetch_thread,
start_fetch_thread,
};
use profile_traits::mem::ProfilerMsg;
use profile_traits::{mem, time};
use script_traits::{
@ -726,6 +729,11 @@ where
/// The main event loop for the constellation.
fn run(&mut self) {
// Start a fetch thread.
// In single-process mode this will be the global fetch thread;
// in multi-process mode this will be used only by the canvas paint thread.
let join_handle = start_fetch_thread(&self.public_resource_threads.core_thread);
while !self.shutting_down || !self.pipelines.is_empty() {
// Randomly close a pipeline if --random-pipeline-closure-probability is set
// This is for testing the hardening of the constellation.
@ -733,6 +741,12 @@ where
self.handle_request();
}
self.handle_shutdown();
// Shut down the fetch thread started above.
exit_fetch_thread();
join_handle
.join()
.expect("Failed to join on the fetch thread in the constellation");
}
/// Generate a new pipeline id namespace.

View file

@ -35,8 +35,8 @@ use layout_api::{LayoutFactory, ScriptThreadFactory};
use log::{debug, error, warn};
use media::WindowGLContext;
use net::image_cache::ImageCacheImpl;
use net_traits::ResourceThreads;
use net_traits::image_cache::ImageCache;
use net_traits::{CoreResourceThread, ResourceThreads};
use profile::system_reporter;
use profile_traits::mem::{ProfilerMsg, Reporter};
use profile_traits::{mem as profile_mem, time};
@ -575,6 +575,10 @@ impl UnprivilegedPipelineContent {
&self.script_to_constellation_chan
}
pub fn core_resource_thread(&self) -> &CoreResourceThread {
&self.resource_threads.core_thread
}
pub fn opts(&self) -> Opts {
self.opts.clone()
}

View file

@ -9,8 +9,8 @@ mod font_context {
use std::collections::HashMap;
use std::ffi::OsStr;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::atomic::{AtomicI32, Ordering};
use std::sync::{Arc, Once};
use std::thread;
use app_units::Au;
@ -23,7 +23,7 @@ mod font_context {
SystemFontServiceProxySender, fallback_font_families,
};
use ipc_channel::ipc::{self, IpcReceiver};
use net_traits::ResourceThreads;
use net_traits::{ResourceThreads, exit_fetch_thread, start_fetch_thread};
use parking_lot::Mutex;
use servo_arc::Arc as ServoArc;
use style::ArcSlice;
@ -36,6 +36,8 @@ mod font_context {
use stylo_atoms::Atom;
use webrender_api::{FontInstanceKey, FontKey, IdNamespace};
static INIT: Once = Once::new();
struct TestContext {
context: FontContext,
system_font_service: Arc<MockSystemFontService>,
@ -53,6 +55,9 @@ mod font_context {
let mock_compositor_api = CrossProcessCompositorApi::dummy();
let proxy_clone = Arc::new(system_font_service_proxy.to_sender().to_proxy());
INIT.call_once(|| {
start_fetch_thread(&mock_resource_threads.core_thread);
});
Self {
context: FontContext::new(proxy_clone, mock_compositor_api, mock_resource_threads),
system_font_service,

View file

@ -92,6 +92,7 @@ use log::{Log, Metadata, Record, debug, error, warn};
use media::{GlApi, NativeDisplay, WindowGLContext};
use net::protocols::ProtocolRegistry;
use net::resource_thread::new_resource_threads;
use net_traits::{exit_fetch_thread, start_fetch_thread};
use profile::{mem as profile_mem, time as profile_time};
use profile_traits::mem::MemoryReportResult;
use profile_traits::{mem, time};
@ -1257,6 +1258,9 @@ pub fn run_content_process(token: String) {
UnprivilegedContent::Pipeline(mut content) => {
media_platform::init();
// Start the fetch thread for this content process.
let fetch_thread_join_handle = start_fetch_thread(content.core_resource_thread());
set_logger(content.script_to_constellation_chan().clone());
let (background_hang_monitor_register, join_handle) =
@ -1281,6 +1285,12 @@ pub fn run_content_process(token: String) {
join_handle
.join()
.expect("Failed to join on the BHM background thread.");
// Shut down the fetch thread started above.
exit_fetch_thread();
fetch_thread_join_handle
.join()
.expect("Failed to join on the fetch thread in the constellation");
},
UnprivilegedContent::ServiceWorker(content) => {
content.start::<ServiceWorkerManager>();

View file

@ -7,7 +7,7 @@
use std::collections::HashMap;
use std::fmt::Display;
use std::sync::{LazyLock, OnceLock};
use std::thread;
use std::thread::{self, JoinHandle};
use base::cross_process_instant::CrossProcessInstant;
use base::id::HistoryStateId;
@ -563,6 +563,8 @@ enum ToFetchThreadMessage {
/* callback */ BoxedFetchCallback,
),
FetchResponse(FetchResponseMsg),
/// Stop the background thread.
Exit,
}
pub type BoxedFetchCallback = Box<dyn FnMut(FetchResponseMsg) + Send + 'static>;
@ -586,7 +588,9 @@ struct FetchThread {
}
impl FetchThread {
fn spawn(core_resource_thread: &CoreResourceThread) -> Sender<ToFetchThreadMessage> {
fn spawn(
core_resource_thread: &CoreResourceThread,
) -> (Sender<ToFetchThreadMessage>, JoinHandle<()>) {
let (sender, receiver) = unbounded();
let (to_fetch_sender, from_fetch_sender) = ipc::channel().unwrap();
@ -600,7 +604,7 @@ impl FetchThread {
);
let core_resource_thread = core_resource_thread.clone();
thread::Builder::new()
let join_handle = thread::Builder::new()
.name("FetchThread".to_owned())
.spawn(move || {
let mut fetch_thread = FetchThread {
@ -612,7 +616,7 @@ impl FetchThread {
fetch_thread.run();
})
.expect("Thread spawning failed");
sender
(sender, join_handle)
}
fn run(&mut self) {
@ -659,6 +663,7 @@ impl FetchThread {
.core_resource_thread
.send(CoreResourceMsg::Cancel(request_ids));
},
ToFetchThreadMessage::Exit => break,
}
}
}
@ -666,15 +671,37 @@ impl FetchThread {
static FETCH_THREAD: OnceLock<Sender<ToFetchThreadMessage>> = OnceLock::new();
/// Starts a fetch thread,
/// and returns the join handle to it.
pub fn start_fetch_thread(core_resource_thread: &CoreResourceThread) -> JoinHandle<()> {
let (sender, join_handle) = FetchThread::spawn(core_resource_thread);
FETCH_THREAD
.set(sender)
.expect("Fetch thread should be set only once on start-up");
join_handle
}
/// Send the exit message to the background thread,
/// after which the caller can,
/// and should,
/// join on the thread.
pub fn exit_fetch_thread() {
let _ = FETCH_THREAD
.get()
.expect("Fetch thread should always be initialized on start-up")
.send(ToFetchThreadMessage::Exit);
}
/// Instruct the resource thread to make a new fetch request.
pub fn fetch_async(
core_resource_thread: &CoreResourceThread,
_core_resource_thread: &CoreResourceThread,
request: RequestBuilder,
response_init: Option<ResponseInit>,
callback: BoxedFetchCallback,
) {
let _ = FETCH_THREAD
.get_or_init(|| FetchThread::spawn(core_resource_thread))
.get()
.expect("Fetch thread should always be initialized on start-up")
.send(ToFetchThreadMessage::StartFetch(
request,
response_init,
@ -687,7 +714,7 @@ pub fn fetch_async(
pub fn cancel_async_fetch(request_ids: Vec<RequestId>) {
let _ = FETCH_THREAD
.get()
.expect("Tried to cancel request in process that hasn't started one.")
.expect("Fetch thread should always be initialized on start-up")
.send(ToFetchThreadMessage::Cancel(request_ids));
}