From 68a27946bf0c2a1398df03930c163e40468d016d Mon Sep 17 00:00:00 2001 From: Jonathan Schwender <55576758+jschwe@users.noreply.github.com> Date: Sat, 7 Dec 2024 06:25:17 +0100 Subject: [PATCH] Add prefs to limit threadpool sizes (#34478) * Add prefs to limit threadpool sizes Add preferences to control the size of threadpools, so that we can easily reduce the amount of runtime threads and test which pools benefit from more threads. Signed-off-by: Jonathan Schwender * Add pref for Webrender threadpool Add a preference to limit the size of the webrender threadpool. Note: WebRender by default calls hooks which register the threads with a profiler instance that the embedder can register with webrender. Servo currently doesn't register such a profiler with webrender, but in the future we might also want to profile the webrender threadpool. Signed-off-by: Jonathan Schwender --------- Signed-off-by: Jonathan Schwender --- Cargo.lock | 1 + components/config/prefs.rs | 22 ++++++++++++++++ components/net/async_runtime.rs | 30 +++++++++++++++++++--- components/net/image_cache.rs | 8 +++--- components/net/resource_thread.rs | 11 +++++--- components/net/tests/fetch.rs | 2 +- components/net/tests/filemanager_thread.rs | 2 +- components/servo/Cargo.toml | 1 + components/servo/lib.rs | 13 ++++++++++ ports/servoshell/Cargo.toml | 1 - resources/prefs.json | 5 ++++ 11 files changed, 82 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 59f61075ecd..5306a7b6ea1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4131,6 +4131,7 @@ dependencies = [ "net_traits", "profile", "profile_traits", + "rayon", "script", "script_layout_interface", "script_traits", diff --git a/components/config/prefs.rs b/components/config/prefs.rs index e76c6c3128b..6fb0b0342f6 100644 --- a/components/config/prefs.rs +++ b/components/config/prefs.rs @@ -198,6 +198,28 @@ mod gen { #[serde(rename = "fonts.default-monospace-size")] default_monospace_size: i64, }, + /// Allows customizing the different threadpools used by servo + threadpools: { + /// Number of workers per threadpool, if we fail to detect how much + /// parallelism is available at runtime. + fallback_worker_num: i64, + image_cache_workers: { + /// Maximum number of workers for the Image Cache thread pool + max: i64, + }, + async_runtime_workers: { + /// Maximum number of workers for the Networking async runtime thread pool + max: i64 + }, + resource_workers: { + /// Maximum number of workers for the Core Resource Manager + max: i64, + }, + webrender_workers: { + /// Maximum number of workers for webrender + max: i64, + }, + }, css: { animations: { testing: { diff --git a/components/net/async_runtime.rs b/components/net/async_runtime.rs index 144f4262835..7d1fb9daeaa 100644 --- a/components/net/async_runtime.rs +++ b/components/net/async_runtime.rs @@ -1,10 +1,32 @@ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ - +use std::cmp::Ord; +use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{LazyLock, Mutex}; +use std::thread; -use tokio::runtime::Runtime; +use tokio::runtime::{Builder, Runtime}; -pub static HANDLE: LazyLock>> = - LazyLock::new(|| Mutex::new(Some(Runtime::new().unwrap()))); +pub static HANDLE: LazyLock>> = LazyLock::new(|| { + Mutex::new(Some( + Builder::new_multi_thread() + .thread_name_fn(|| { + static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0); + let id = ATOMIC_ID.fetch_add(1, Ordering::Relaxed); + format!("tokio-runtime-{}", id) + }) + .worker_threads( + thread::available_parallelism() + .map(|i| i.get()) + .unwrap_or(servo_config::pref!(threadpools.fallback_worker_num) as usize) + .min( + servo_config::pref!(threadpools.async_runtime_workers.max).max(1) as usize, + ), + ) + .enable_io() + .enable_time() + .build() + .unwrap(), + )) +}); diff --git a/components/net/image_cache.rs b/components/net/image_cache.rs index 5df64cf2a67..d09d274948c 100644 --- a/components/net/image_cache.rs +++ b/components/net/image_cache.rs @@ -4,7 +4,6 @@ use std::collections::hash_map::Entry::{Occupied, Vacant}; use std::collections::HashMap; -use std::num::NonZeroUsize; use std::sync::{Arc, Mutex}; use std::{mem, thread}; @@ -425,8 +424,9 @@ impl ImageCache for ImageCacheImpl { // See https://doc.rust-lang.org/stable/std/thread/fn.available_parallelism.html // If no information can be obtained about the system, uses 4 threads as a default let thread_count = thread::available_parallelism() - .unwrap_or(NonZeroUsize::new(4).unwrap()) - .get(); + .map(|i| i.get()) + .unwrap_or(servo_config::pref!(threadpools.fallback_worker_num) as usize) + .min(servo_config::pref!(threadpools.async_runtime_workers.max).max(1) as usize); ImageCacheImpl { store: Arc::new(Mutex::new(ImageCacheStore { @@ -436,7 +436,7 @@ impl ImageCache for ImageCacheImpl { placeholder_url: ServoUrl::parse("chrome://resources/rippy.png").unwrap(), compositor_api, })), - thread_pool: CoreResourceThreadPool::new(thread_count), + thread_pool: CoreResourceThreadPool::new(thread_count, "ImageCache".to_string()), } } diff --git a/components/net/resource_thread.rs b/components/net/resource_thread.rs index df3f5160915..dc27a05d195 100644 --- a/components/net/resource_thread.rs +++ b/components/net/resource_thread.rs @@ -559,9 +559,10 @@ pub struct CoreResourceThreadPool { } impl CoreResourceThreadPool { - pub fn new(num_threads: usize) -> CoreResourceThreadPool { + pub fn new(num_threads: usize, pool_name: String) -> CoreResourceThreadPool { + debug!("Creating new CoreResourceThreadPool with {num_threads} threads!"); let pool = rayon::ThreadPoolBuilder::new() - .thread_name(|i| format!("CoreResourceThread#{i}")) + .thread_name(move |i| format!("{pool_name}#{i}")) .num_threads(num_threads) .build() .unwrap(); @@ -645,7 +646,11 @@ impl CoreResourceManager { ca_certificates: CACertificates, ignore_certificate_errors: bool, ) -> CoreResourceManager { - let pool = CoreResourceThreadPool::new(16); + let num_threads = thread::available_parallelism() + .map(|i| i.get()) + .unwrap_or(servo_config::pref!(threadpools.fallback_worker_num) as usize) + .min(servo_config::pref!(threadpools.resource_workers.max).max(1) as usize); + let pool = CoreResourceThreadPool::new(num_threads, "CoreResourceThreadPool".to_string()); let pool_handle = Arc::new(pool); CoreResourceManager { user_agent, diff --git a/components/net/tests/fetch.rs b/components/net/tests/fetch.rs index 796015941b2..79020a7c998 100644 --- a/components/net/tests/fetch.rs +++ b/components/net/tests/fetch.rs @@ -226,7 +226,7 @@ fn test_file() { .origin(url.origin()) .build(); - let pool = CoreResourceThreadPool::new(1); + let pool = CoreResourceThreadPool::new(1, "CoreResourceTestPool".to_string()); let pool_handle = Arc::new(pool); let mut context = new_fetch_context(None, None, Some(Arc::downgrade(&pool_handle))); let fetch_response = fetch_with_context(&mut request, &mut context); diff --git a/components/net/tests/filemanager_thread.rs b/components/net/tests/filemanager_thread.rs index 8cead55ba53..9a1b33becec 100644 --- a/components/net/tests/filemanager_thread.rs +++ b/components/net/tests/filemanager_thread.rs @@ -21,7 +21,7 @@ use crate::create_embedder_proxy; #[test] fn test_filemanager() { - let pool = CoreResourceThreadPool::new(1); + let pool = CoreResourceThreadPool::new(1, "CoreResourceTestPool".to_string()); let pool_handle = Arc::new(pool); let filemanager = FileManager::new(create_embedder_proxy(), Arc::downgrade(&pool_handle)); set_pref!(dom.testing.html_input_element.select_files.enabled, true); diff --git a/components/servo/Cargo.toml b/components/servo/Cargo.toml index cfa5568b3dc..98376e1ae8a 100644 --- a/components/servo/Cargo.toml +++ b/components/servo/Cargo.toml @@ -73,6 +73,7 @@ media = { path = "../media" } mozangle = { workspace = true } net = { path = "../net" } net_traits = { workspace = true } +rayon = { workspace = true } profile = { path = "../profile" } profile_traits = { workspace = true } script = { path = "../script" } diff --git a/components/servo/lib.rs b/components/servo/lib.rs index 12f9862e7e0..93eaf04e099 100644 --- a/components/servo/lib.rs +++ b/components/servo/lib.rs @@ -22,6 +22,7 @@ use std::cmp::max; use std::path::PathBuf; use std::rc::Rc; use std::sync::{Arc, Mutex}; +use std::thread; use std::vec::Drain; pub use base::id::TopLevelBrowsingContextId; @@ -352,6 +353,17 @@ where } else { UploadMethod::PixelBuffer(ONE_TIME_USAGE_HINT) }; + let worker_threads = thread::available_parallelism() + .map(|i| i.get()) + .unwrap_or(pref!(threadpools.fallback_worker_num) as usize) + .min(pref!(threadpools.webrender_workers.max).max(1) as usize); + let workers = Some(Arc::new( + rayon::ThreadPoolBuilder::new() + .num_threads(worker_threads) + .thread_name(|idx| format!("WRWorker#{}", idx)) + .build() + .unwrap(), + )); webrender::create_webrender_instance( webrender_gl.clone(), render_notifier, @@ -374,6 +386,7 @@ where allow_texture_swizzling: pref!(gfx.texture_swizzling.enabled), clear_color, upload_method, + workers, ..Default::default() }, None, diff --git a/ports/servoshell/Cargo.toml b/ports/servoshell/Cargo.toml index e53c4ead690..34054ee348f 100644 --- a/ports/servoshell/Cargo.toml +++ b/ports/servoshell/Cargo.toml @@ -102,7 +102,6 @@ surfman = { workspace = true, features = ["sm-angle-default"] } serde_json = { workspace = true } webxr = { workspace = true, optional = true } - [target.'cfg(not(any(target_os = "android", target_env = "ohos")))'.dependencies] # For optional feature servo_allocator/use-system-allocator servo_allocator = { path = "../../components/allocator" } diff --git a/resources/prefs.json b/resources/prefs.json index 891a8c49405..ae34fd3a663 100644 --- a/resources/prefs.json +++ b/resources/prefs.json @@ -124,5 +124,10 @@ "shell.native-orientation": "both", "shell.native-titlebar.enabled": true, "shell.searchpage": "https://duckduckgo.com/html/?q=%s", + "threadpools.async_runtime_workers.max": 6, + "threadpools.fallback_worker_num": 3, + "threadpools.image_cache_workers.max": 4, + "threadpools.resource_workers.max": 4, + "threadpools.webrender_workers.max": 4, "webgl.testing.context_creation_error": false }