mirror of
https://github.com/servo/servo.git
synced 2025-10-02 17:49:16 +01:00
storage: Move storage related backend threads to their own crate (#39418)
This PR moves storage related APIs (currently just webstorage and indexeddb) into their own crate. This reduces the congestion in the net thread. Related Zulip thread: https://servo.zulipchat.com/#narrow/channel/263398-general/topic/indexedDB.20location/with/535911631 Sub PRs: - [x] Move shared storage/net stuff to base (`IpcSend` and `CoreResourcePool`) #39419 --------- Signed-off-by: Ashwin Naren <arihant2math@gmail.com> Co-authored-by: Martin Robinson <mrobinson@igalia.com>
This commit is contained in:
parent
ba5f36b671
commit
d12dc23083
60 changed files with 378 additions and 280 deletions
|
@ -20,7 +20,6 @@ async-recursion = "1.1"
|
|||
async-tungstenite = { workspace = true }
|
||||
base = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
bincode = { workspace = true }
|
||||
bytes = "1"
|
||||
chrono = { workspace = true }
|
||||
compositing_traits = { workspace = true }
|
||||
|
@ -56,13 +55,10 @@ pixels = { path = "../pixels" }
|
|||
profile_traits = { workspace = true }
|
||||
rayon = { workspace = true }
|
||||
resvg = { workspace = true }
|
||||
rusqlite = { version = "0.37", features = ["bundled"] }
|
||||
rustc-hash = { workspace = true }
|
||||
rustls = { workspace = true }
|
||||
rustls-pemfile = { workspace = true }
|
||||
rustls-pki-types = { workspace = true }
|
||||
sea-query = { version = "1.0.0-rc.9", default-features = false, features = ["backend-sqlite", "derive"] }
|
||||
sea-query-rusqlite = { version = "0.8.0-rc.8" }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
servo_arc = { workspace = true }
|
||||
|
@ -89,7 +85,6 @@ futures = { version = "0.3", features = ["compat"] }
|
|||
hyper = { workspace = true, features = ["full"] }
|
||||
hyper-util = { workspace = true, features = ["server-graceful"] }
|
||||
rustls = { workspace = true, features = ["aws-lc-rs"] }
|
||||
tempfile = "3"
|
||||
|
||||
[[test]]
|
||||
name = "main"
|
||||
|
|
|
@ -11,6 +11,7 @@ use std::sync::{Arc, Mutex, RwLock, Weak};
|
|||
|
||||
use base::generic_channel;
|
||||
use base::id::WebViewId;
|
||||
use base::threadpool::ThreadPool;
|
||||
use embedder_traits::{EmbedderMsg, EmbedderProxy, FilterPattern};
|
||||
use headers::{ContentLength, ContentRange, ContentType, HeaderMap, HeaderMapExt, Range};
|
||||
use http::header::{self, HeaderValue};
|
||||
|
@ -33,7 +34,6 @@ use uuid::Uuid;
|
|||
|
||||
use crate::fetch::methods::{CancellationListener, Data, RangeRequestBounds};
|
||||
use crate::protocols::get_range_request_bounds;
|
||||
use crate::resource_thread::CoreResourceThreadPool;
|
||||
|
||||
pub const FILE_CHUNK_SIZE: usize = 32768; // 32 KB
|
||||
|
||||
|
@ -79,14 +79,11 @@ enum FileImpl {
|
|||
pub struct FileManager {
|
||||
embedder_proxy: EmbedderProxy,
|
||||
store: Arc<FileManagerStore>,
|
||||
thread_pool: Weak<CoreResourceThreadPool>,
|
||||
thread_pool: Weak<ThreadPool>,
|
||||
}
|
||||
|
||||
impl FileManager {
|
||||
pub fn new(
|
||||
embedder_proxy: EmbedderProxy,
|
||||
pool_handle: Weak<CoreResourceThreadPool>,
|
||||
) -> FileManager {
|
||||
pub fn new(embedder_proxy: EmbedderProxy, pool_handle: Weak<ThreadPool>) -> FileManager {
|
||||
FileManager {
|
||||
embedder_proxy,
|
||||
store: Arc::new(FileManagerStore::new()),
|
||||
|
|
|
@ -9,6 +9,7 @@ use std::sync::{Arc, Mutex};
|
|||
use std::{mem, thread};
|
||||
|
||||
use base::id::PipelineId;
|
||||
use base::threadpool::ThreadPool;
|
||||
use compositing_traits::{CrossProcessCompositorApi, ImageUpdate, SerializableImageData};
|
||||
use imsz::imsz_from_reader;
|
||||
use ipc_channel::ipc::{IpcSender, IpcSharedMemory};
|
||||
|
@ -35,8 +36,6 @@ use webrender_api::{
|
|||
ImageDescriptor, ImageDescriptorFlags, ImageFormat, ImageKey as WebRenderImageKey,
|
||||
};
|
||||
|
||||
use crate::resource_thread::CoreResourceThreadPool;
|
||||
|
||||
// We bake in rippy.png as a fallback, in case the embedder does not provide
|
||||
// a rippy resource. this version is 253 bytes large, don't exchange it against
|
||||
// something in higher resolution.
|
||||
|
@ -703,7 +702,7 @@ pub struct ImageCacheImpl {
|
|||
store: Arc<Mutex<ImageCacheStore>>,
|
||||
|
||||
/// Thread pool for image decoding
|
||||
thread_pool: Arc<CoreResourceThreadPool>,
|
||||
thread_pool: Arc<ThreadPool>,
|
||||
}
|
||||
|
||||
impl ImageCache for ImageCacheImpl {
|
||||
|
@ -730,10 +729,7 @@ impl ImageCache for ImageCacheImpl {
|
|||
pipeline_id: None,
|
||||
key_cache: KeyCache::new(),
|
||||
})),
|
||||
thread_pool: Arc::new(CoreResourceThreadPool::new(
|
||||
thread_count,
|
||||
"ImageCache".to_string(),
|
||||
)),
|
||||
thread_pool: Arc::new(ThreadPool::new(thread_count, "ImageCache".to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,63 +0,0 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use std::collections::VecDeque;
|
||||
|
||||
use net_traits::indexeddb_thread::{AsyncOperation, CreateObjectResult, IndexedDBTxnMode, KeyPath};
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
pub use self::sqlite::SqliteEngine;
|
||||
|
||||
mod sqlite;
|
||||
|
||||
pub struct KvsOperation {
|
||||
pub store_name: String,
|
||||
pub operation: AsyncOperation,
|
||||
}
|
||||
|
||||
pub struct KvsTransaction {
|
||||
// Mode could be used by a more optimal implementation of transactions
|
||||
// that has different allocated threadpools for reading and writing
|
||||
#[allow(unused)]
|
||||
pub mode: IndexedDBTxnMode,
|
||||
pub requests: VecDeque<KvsOperation>,
|
||||
}
|
||||
|
||||
pub trait KvsEngine {
|
||||
type Error: std::error::Error;
|
||||
|
||||
fn create_store(
|
||||
&self,
|
||||
store_name: &str,
|
||||
key_path: Option<KeyPath>,
|
||||
auto_increment: bool,
|
||||
) -> Result<CreateObjectResult, Self::Error>;
|
||||
|
||||
fn delete_store(&self, store_name: &str) -> Result<(), Self::Error>;
|
||||
|
||||
fn close_store(&self, store_name: &str) -> Result<(), Self::Error>;
|
||||
|
||||
fn delete_database(self) -> Result<(), Self::Error>;
|
||||
|
||||
fn process_transaction(
|
||||
&self,
|
||||
transaction: KvsTransaction,
|
||||
) -> oneshot::Receiver<Option<Vec<u8>>>;
|
||||
|
||||
fn has_key_generator(&self, store_name: &str) -> bool;
|
||||
fn key_path(&self, store_name: &str) -> Option<KeyPath>;
|
||||
|
||||
fn create_index(
|
||||
&self,
|
||||
store_name: &str,
|
||||
index_name: String,
|
||||
key_path: KeyPath,
|
||||
unique: bool,
|
||||
multi_entry: bool,
|
||||
) -> Result<CreateObjectResult, Self::Error>;
|
||||
fn delete_index(&self, store_name: &str, index_name: String) -> Result<(), Self::Error>;
|
||||
|
||||
fn version(&self) -> Result<u64, Self::Error>;
|
||||
fn set_version(&self, version: u64) -> Result<(), Self::Error>;
|
||||
}
|
|
@ -1,674 +0,0 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use ipc_channel::ipc::IpcSender;
|
||||
use log::{error, info};
|
||||
use net_traits::indexeddb_thread::{
|
||||
AsyncOperation, AsyncReadOnlyOperation, AsyncReadWriteOperation, BackendError, BackendResult,
|
||||
CreateObjectResult, IndexedDBKeyRange, IndexedDBKeyType, IndexedDBRecord, IndexedDBTxnMode,
|
||||
KeyPath, PutItemResult,
|
||||
};
|
||||
use rusqlite::{Connection, Error, OptionalExtension, params};
|
||||
use sea_query::{Condition, Expr, ExprTrait, IntoCondition, SqliteQueryBuilder};
|
||||
use sea_query_rusqlite::RusqliteBinder;
|
||||
use serde::Serialize;
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
use crate::indexeddb::engines::{KvsEngine, KvsTransaction};
|
||||
use crate::indexeddb::idb_thread::IndexedDBDescription;
|
||||
use crate::resource_thread::CoreResourceThreadPool;
|
||||
|
||||
mod create;
|
||||
mod database_model;
|
||||
mod object_data_model;
|
||||
mod object_store_index_model;
|
||||
mod object_store_model;
|
||||
|
||||
// These pragmas need to be set once
|
||||
const DB_INIT_PRAGMAS: [&str; 2] = ["PRAGMA journal_mode = WAL;", "PRAGMA encoding = 'UTF-16';"];
|
||||
|
||||
// These pragmas need to be run once per connection.
|
||||
const DB_PRAGMAS: [&str; 4] = [
|
||||
"PRAGMA synchronous = NORMAL;",
|
||||
"PRAGMA journal_size_limit = 67108864 -- 64 megabytes;",
|
||||
"PRAGMA mmap_size = 67108864 -- 64 megabytes;",
|
||||
"PRAGMA cache_size = 2000;",
|
||||
];
|
||||
|
||||
fn range_to_query(range: IndexedDBKeyRange) -> Condition {
|
||||
// Special case for optimization
|
||||
if let Some(singleton) = range.as_singleton() {
|
||||
let encoded = bincode::serialize(singleton).unwrap();
|
||||
return Expr::column(object_data_model::Column::Key)
|
||||
.eq(encoded)
|
||||
.into_condition();
|
||||
}
|
||||
let mut parts = vec![];
|
||||
if let Some(upper) = range.upper.as_ref() {
|
||||
let upper_bytes = bincode::serialize(upper).unwrap();
|
||||
let query = if range.upper_open {
|
||||
Expr::column(object_data_model::Column::Key).lt(upper_bytes)
|
||||
} else {
|
||||
Expr::column(object_data_model::Column::Key).lte(upper_bytes)
|
||||
};
|
||||
parts.push(query);
|
||||
}
|
||||
if let Some(lower) = range.lower.as_ref() {
|
||||
let lower_bytes = bincode::serialize(lower).unwrap();
|
||||
let query = if range.upper_open {
|
||||
Expr::column(object_data_model::Column::Key).gt(lower_bytes)
|
||||
} else {
|
||||
Expr::column(object_data_model::Column::Key).gte(lower_bytes)
|
||||
};
|
||||
parts.push(query);
|
||||
}
|
||||
let mut condition = Condition::all();
|
||||
for part in parts {
|
||||
condition = condition.add(part);
|
||||
}
|
||||
condition
|
||||
}
|
||||
|
||||
pub struct SqliteEngine {
|
||||
db_path: PathBuf,
|
||||
connection: Connection,
|
||||
read_pool: Arc<CoreResourceThreadPool>,
|
||||
write_pool: Arc<CoreResourceThreadPool>,
|
||||
}
|
||||
|
||||
impl SqliteEngine {
|
||||
// TODO: intake dual pools
|
||||
pub fn new(
|
||||
base_dir: &Path,
|
||||
db_info: &IndexedDBDescription,
|
||||
pool: Arc<CoreResourceThreadPool>,
|
||||
) -> Result<Self, Error> {
|
||||
let mut db_path = PathBuf::new();
|
||||
db_path.push(base_dir);
|
||||
db_path.push(db_info.as_path());
|
||||
let db_parent = db_path.clone();
|
||||
db_path.push("db.sqlite");
|
||||
|
||||
if !db_path.exists() {
|
||||
std::fs::create_dir_all(db_parent).unwrap();
|
||||
std::fs::File::create(&db_path).unwrap();
|
||||
}
|
||||
let connection = Self::init_db(&db_path, db_info)?;
|
||||
|
||||
for stmt in DB_PRAGMAS {
|
||||
// TODO: Handle errors properly
|
||||
let _ = connection.execute(stmt, ());
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
connection,
|
||||
db_path,
|
||||
read_pool: pool.clone(),
|
||||
write_pool: pool,
|
||||
})
|
||||
}
|
||||
|
||||
fn init_db(path: &Path, db_info: &IndexedDBDescription) -> Result<Connection, Error> {
|
||||
let connection = Connection::open(path)?;
|
||||
if connection.table_exists(None, "database")? {
|
||||
// Database already exists, no need to initialize
|
||||
return Ok(connection);
|
||||
}
|
||||
info!("Initializing indexeddb database at {:?}", path);
|
||||
for stmt in DB_INIT_PRAGMAS {
|
||||
// FIXME(arihant2math): this fails occasionally
|
||||
let _ = connection.execute(stmt, ());
|
||||
}
|
||||
create::create_tables(&connection)?;
|
||||
// From https://w3c.github.io/IndexedDB/#database-version:
|
||||
// "When a database is first created, its version is 0 (zero)."
|
||||
connection.execute(
|
||||
"INSERT INTO database (name, origin, version) VALUES (?, ?, ?)",
|
||||
params![
|
||||
db_info.name.to_owned(),
|
||||
db_info.origin.to_owned().ascii_serialization(),
|
||||
i64::from_ne_bytes(0_u64.to_ne_bytes())
|
||||
],
|
||||
)?;
|
||||
Ok(connection)
|
||||
}
|
||||
|
||||
fn get(
|
||||
connection: &Connection,
|
||||
store: object_store_model::Model,
|
||||
key_range: IndexedDBKeyRange,
|
||||
) -> Result<Option<object_data_model::Model>, Error> {
|
||||
let query = range_to_query(key_range);
|
||||
let (sql, values) = sea_query::Query::select()
|
||||
.from(object_data_model::Column::Table)
|
||||
.columns(vec![
|
||||
object_data_model::Column::ObjectStoreId,
|
||||
object_data_model::Column::Key,
|
||||
object_data_model::Column::Data,
|
||||
])
|
||||
.and_where(query.and(Expr::col(object_data_model::Column::ObjectStoreId).is(store.id)))
|
||||
.limit(1)
|
||||
.build_rusqlite(SqliteQueryBuilder);
|
||||
connection
|
||||
.prepare(&sql)?
|
||||
.query_one(&*values.as_params(), |row| {
|
||||
object_data_model::Model::try_from(row)
|
||||
})
|
||||
.optional()
|
||||
}
|
||||
|
||||
fn get_key(
|
||||
connection: &Connection,
|
||||
store: object_store_model::Model,
|
||||
key_range: IndexedDBKeyRange,
|
||||
) -> Result<Option<Vec<u8>>, Error> {
|
||||
Self::get(connection, store, key_range).map(|opt| opt.map(|model| model.key))
|
||||
}
|
||||
|
||||
fn get_item(
|
||||
connection: &Connection,
|
||||
store: object_store_model::Model,
|
||||
key_range: IndexedDBKeyRange,
|
||||
) -> Result<Option<Vec<u8>>, Error> {
|
||||
Self::get(connection, store, key_range).map(|opt| opt.map(|model| model.data))
|
||||
}
|
||||
|
||||
fn get_all(
|
||||
connection: &Connection,
|
||||
store: object_store_model::Model,
|
||||
key_range: IndexedDBKeyRange,
|
||||
count: Option<u32>,
|
||||
) -> Result<Vec<object_data_model::Model>, Error> {
|
||||
let query = range_to_query(key_range);
|
||||
let mut sql_query = sea_query::Query::select();
|
||||
sql_query
|
||||
.from(object_data_model::Column::Table)
|
||||
.columns(vec![
|
||||
object_data_model::Column::ObjectStoreId,
|
||||
object_data_model::Column::Key,
|
||||
object_data_model::Column::Data,
|
||||
])
|
||||
.and_where(query.and(Expr::col(object_data_model::Column::ObjectStoreId).is(store.id)));
|
||||
if let Some(count) = count {
|
||||
sql_query.limit(count as u64);
|
||||
}
|
||||
let (sql, values) = sql_query.build_rusqlite(SqliteQueryBuilder);
|
||||
let mut stmt = connection.prepare(&sql)?;
|
||||
let models = stmt
|
||||
.query_and_then(&*values.as_params(), |row| {
|
||||
object_data_model::Model::try_from(row)
|
||||
})?
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
Ok(models)
|
||||
}
|
||||
|
||||
fn get_all_keys(
|
||||
connection: &Connection,
|
||||
store: object_store_model::Model,
|
||||
key_range: IndexedDBKeyRange,
|
||||
count: Option<u32>,
|
||||
) -> Result<Vec<Vec<u8>>, Error> {
|
||||
Self::get_all(connection, store, key_range, count)
|
||||
.map(|models| models.into_iter().map(|m| m.key).collect())
|
||||
}
|
||||
|
||||
fn get_all_items(
|
||||
connection: &Connection,
|
||||
store: object_store_model::Model,
|
||||
key_range: IndexedDBKeyRange,
|
||||
count: Option<u32>,
|
||||
) -> Result<Vec<Vec<u8>>, Error> {
|
||||
Self::get_all(connection, store, key_range, count)
|
||||
.map(|models| models.into_iter().map(|m| m.data).collect())
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn get_all_records(
|
||||
connection: &Connection,
|
||||
store: object_store_model::Model,
|
||||
key_range: IndexedDBKeyRange,
|
||||
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, Error> {
|
||||
Self::get_all(connection, store, key_range, None)
|
||||
.map(|models| models.into_iter().map(|m| (m.key, m.data)).collect())
|
||||
}
|
||||
|
||||
fn put_item(
|
||||
connection: &Connection,
|
||||
store: object_store_model::Model,
|
||||
serialized_key: Vec<u8>,
|
||||
value: Vec<u8>,
|
||||
should_overwrite: bool,
|
||||
) -> Result<PutItemResult, Error> {
|
||||
let existing_item = connection
|
||||
.prepare("SELECT * FROM object_data WHERE key = ? AND object_store_id = ?")
|
||||
.and_then(|mut stmt| {
|
||||
stmt.query_row(params![serialized_key, store.id], |row| {
|
||||
object_data_model::Model::try_from(row)
|
||||
})
|
||||
.optional()
|
||||
})?;
|
||||
if should_overwrite || existing_item.is_none() {
|
||||
connection.execute(
|
||||
"INSERT INTO object_data (object_store_id, key, data) VALUES (?, ?, ?)",
|
||||
params![store.id, serialized_key, value],
|
||||
)?;
|
||||
Ok(PutItemResult::Success)
|
||||
} else {
|
||||
Ok(PutItemResult::CannotOverwrite)
|
||||
}
|
||||
}
|
||||
|
||||
fn delete_item(
|
||||
connection: &Connection,
|
||||
store: object_store_model::Model,
|
||||
serialized_key: Vec<u8>,
|
||||
) -> Result<(), Error> {
|
||||
connection.execute(
|
||||
"DELETE FROM object_data WHERE key = ? AND object_store_id = ?",
|
||||
params![serialized_key, store.id],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn clear(connection: &Connection, store: object_store_model::Model) -> Result<(), Error> {
|
||||
connection.execute(
|
||||
"DELETE FROM object_data WHERE object_store_id = ?",
|
||||
params![store.id],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn count(
|
||||
connection: &Connection,
|
||||
store: object_store_model::Model,
|
||||
key_range: IndexedDBKeyRange,
|
||||
) -> Result<usize, Error> {
|
||||
let query = range_to_query(key_range);
|
||||
let (sql, values) = sea_query::Query::select()
|
||||
.expr(Expr::col(object_data_model::Column::Key).count())
|
||||
.from(object_data_model::Column::Table)
|
||||
.and_where(query.and(Expr::col(object_data_model::Column::ObjectStoreId).is(store.id)))
|
||||
.build_rusqlite(SqliteQueryBuilder);
|
||||
connection
|
||||
.prepare(&sql)?
|
||||
.query_row(&*values.as_params(), |row| row.get(0))
|
||||
.map(|count: i64| count as usize)
|
||||
}
|
||||
|
||||
fn generate_key(
|
||||
connection: &Connection,
|
||||
store: &object_store_model::Model,
|
||||
) -> Result<IndexedDBKeyType, Error> {
|
||||
if store.auto_increment == 0 {
|
||||
unreachable!("Should be caught in the script thread");
|
||||
}
|
||||
// TODO: handle overflows, this also needs to be able to handle 2^53 as per spec
|
||||
let new_key = store.auto_increment + 1;
|
||||
connection.execute(
|
||||
"UPDATE object_store SET auto_increment = ? WHERE id = ?",
|
||||
params![new_key, store.id],
|
||||
)?;
|
||||
Ok(IndexedDBKeyType::Number(new_key as f64))
|
||||
}
|
||||
}
|
||||
|
||||
impl KvsEngine for SqliteEngine {
|
||||
type Error = Error;
|
||||
|
||||
fn create_store(
|
||||
&self,
|
||||
store_name: &str,
|
||||
key_path: Option<KeyPath>,
|
||||
auto_increment: bool,
|
||||
) -> Result<CreateObjectResult, Self::Error> {
|
||||
let mut stmt = self
|
||||
.connection
|
||||
.prepare("SELECT * FROM object_store WHERE name = ?")?;
|
||||
if stmt.exists(params![store_name.to_string()])? {
|
||||
// Store already exists
|
||||
return Ok(CreateObjectResult::AlreadyExists);
|
||||
}
|
||||
self.connection.execute(
|
||||
"INSERT INTO object_store (name, key_path, auto_increment) VALUES (?, ?, ?)",
|
||||
params![
|
||||
store_name.to_string(),
|
||||
key_path.map(|v| bincode::serialize(&v).unwrap()),
|
||||
auto_increment as i32
|
||||
],
|
||||
)?;
|
||||
|
||||
Ok(CreateObjectResult::Created)
|
||||
}
|
||||
|
||||
fn delete_store(&self, store_name: &str) -> Result<(), Self::Error> {
|
||||
let result = self.connection.execute(
|
||||
"DELETE FROM object_store WHERE name = ?",
|
||||
params![store_name.to_string()],
|
||||
)?;
|
||||
if result == 0 {
|
||||
Err(Error::QueryReturnedNoRows)
|
||||
} else if result > 1 {
|
||||
Err(Error::QueryReturnedMoreThanOneRow)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn close_store(&self, _store_name: &str) -> Result<(), Self::Error> {
|
||||
// TODO: do something
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn delete_database(self) -> Result<(), Self::Error> {
|
||||
// attempt to close the connection first
|
||||
let _ = self.connection.close();
|
||||
if self.db_path.exists() {
|
||||
if let Err(e) = std::fs::remove_dir_all(self.db_path.parent().unwrap()) {
|
||||
error!("Failed to delete database: {:?}", e);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_transaction(
|
||||
&self,
|
||||
transaction: KvsTransaction,
|
||||
) -> oneshot::Receiver<Option<Vec<u8>>> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
let spawning_pool = if transaction.mode == IndexedDBTxnMode::Readonly {
|
||||
self.read_pool.clone()
|
||||
} else {
|
||||
self.write_pool.clone()
|
||||
};
|
||||
let path = self.db_path.clone();
|
||||
spawning_pool.spawn(move || {
|
||||
let connection = Connection::open(path).unwrap();
|
||||
for request in transaction.requests {
|
||||
let object_store = connection
|
||||
.prepare("SELECT * FROM object_store WHERE name = ?")
|
||||
.and_then(|mut stmt| {
|
||||
stmt.query_row(params![request.store_name.to_string()], |row| {
|
||||
object_store_model::Model::try_from(row)
|
||||
})
|
||||
.optional()
|
||||
});
|
||||
fn process_object_store<T>(
|
||||
object_store: Result<Option<object_store_model::Model>, Error>,
|
||||
sender: &IpcSender<BackendResult<T>>,
|
||||
) -> Result<object_store_model::Model, ()>
|
||||
where
|
||||
T: Serialize,
|
||||
{
|
||||
match object_store {
|
||||
Ok(Some(store)) => Ok(store),
|
||||
Ok(None) => {
|
||||
let _ = sender.send(Err(BackendError::StoreNotFound));
|
||||
Err(())
|
||||
},
|
||||
Err(e) => {
|
||||
let _ = sender.send(Err(BackendError::DbErr(format!("{:?}", e))));
|
||||
Err(())
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
match request.operation {
|
||||
AsyncOperation::ReadWrite(AsyncReadWriteOperation::PutItem {
|
||||
sender,
|
||||
key,
|
||||
value,
|
||||
should_overwrite,
|
||||
}) => {
|
||||
let Ok(object_store) = process_object_store(object_store, &sender) else {
|
||||
continue;
|
||||
};
|
||||
let key = match key
|
||||
.map(Ok)
|
||||
.unwrap_or_else(|| Self::generate_key(&connection, &object_store))
|
||||
{
|
||||
Ok(key) => key,
|
||||
Err(e) => {
|
||||
let _ = sender.send(Err(BackendError::DbErr(format!("{:?}", e))));
|
||||
continue;
|
||||
},
|
||||
};
|
||||
let serialized_key: Vec<u8> = bincode::serialize(&key).unwrap();
|
||||
let _ = sender.send(
|
||||
Self::put_item(
|
||||
&connection,
|
||||
object_store,
|
||||
serialized_key,
|
||||
value,
|
||||
should_overwrite,
|
||||
)
|
||||
.map_err(|e| BackendError::DbErr(format!("{:?}", e))),
|
||||
);
|
||||
},
|
||||
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetItem {
|
||||
sender,
|
||||
key_range,
|
||||
}) => {
|
||||
let Ok(object_store) = process_object_store(object_store, &sender) else {
|
||||
continue;
|
||||
};
|
||||
let _ = sender.send(
|
||||
Self::get_item(&connection, object_store, key_range)
|
||||
.map_err(|e| BackendError::DbErr(format!("{:?}", e))),
|
||||
);
|
||||
},
|
||||
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetAllKeys {
|
||||
sender,
|
||||
key_range,
|
||||
count,
|
||||
}) => {
|
||||
let Ok(object_store) = process_object_store(object_store, &sender) else {
|
||||
continue;
|
||||
};
|
||||
let _ = sender.send(
|
||||
Self::get_all_keys(&connection, object_store, key_range, count)
|
||||
.map(|keys| {
|
||||
keys.into_iter()
|
||||
.map(|k| bincode::deserialize(&k).unwrap())
|
||||
.collect()
|
||||
})
|
||||
.map_err(|e| BackendError::DbErr(format!("{:?}", e))),
|
||||
);
|
||||
},
|
||||
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetAllItems {
|
||||
sender,
|
||||
key_range,
|
||||
count,
|
||||
}) => {
|
||||
let Ok(object_store) = process_object_store(object_store, &sender) else {
|
||||
continue;
|
||||
};
|
||||
let _ = sender.send(
|
||||
Self::get_all_items(&connection, object_store, key_range, count)
|
||||
.map_err(|e| BackendError::DbErr(format!("{:?}", e))),
|
||||
);
|
||||
},
|
||||
AsyncOperation::ReadWrite(AsyncReadWriteOperation::RemoveItem {
|
||||
sender,
|
||||
key,
|
||||
}) => {
|
||||
let Ok(object_store) = process_object_store(object_store, &sender) else {
|
||||
continue;
|
||||
};
|
||||
let serialized_key: Vec<u8> = bincode::serialize(&key).unwrap();
|
||||
let _ = sender.send(
|
||||
Self::delete_item(&connection, object_store, serialized_key)
|
||||
.map_err(|e| BackendError::DbErr(format!("{:?}", e))),
|
||||
);
|
||||
},
|
||||
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::Count {
|
||||
sender,
|
||||
key_range,
|
||||
}) => {
|
||||
let Ok(object_store) = process_object_store(object_store, &sender) else {
|
||||
continue;
|
||||
};
|
||||
let _ = sender.send(
|
||||
Self::count(&connection, object_store, key_range)
|
||||
.map(|r| r as u64)
|
||||
.map_err(|e| BackendError::DbErr(format!("{:?}", e))),
|
||||
);
|
||||
},
|
||||
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::Iterate {
|
||||
sender,
|
||||
key_range,
|
||||
}) => {
|
||||
let Ok(object_store) = process_object_store(object_store, &sender) else {
|
||||
continue;
|
||||
};
|
||||
let _ = sender.send(
|
||||
Self::get_all_records(&connection, object_store, key_range)
|
||||
.map(|records| {
|
||||
records
|
||||
.into_iter()
|
||||
.map(|(key, data)| IndexedDBRecord {
|
||||
key: bincode::deserialize(&key).unwrap(),
|
||||
primary_key: bincode::deserialize(&key).unwrap(),
|
||||
value: data,
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
.map_err(|e| BackendError::DbErr(format!("{:?}", e))),
|
||||
);
|
||||
},
|
||||
AsyncOperation::ReadWrite(AsyncReadWriteOperation::Clear(sender)) => {
|
||||
let Ok(object_store) = process_object_store(object_store, &sender) else {
|
||||
continue;
|
||||
};
|
||||
let _ = sender.send(
|
||||
Self::clear(&connection, object_store)
|
||||
.map_err(|e| BackendError::DbErr(format!("{:?}", e))),
|
||||
);
|
||||
},
|
||||
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetKey {
|
||||
sender,
|
||||
key_range,
|
||||
}) => {
|
||||
let Ok(object_store) = process_object_store(object_store, &sender) else {
|
||||
continue;
|
||||
};
|
||||
let _ = sender.send(
|
||||
Self::get_key(&connection, object_store, key_range)
|
||||
.map(|key| key.map(|k| bincode::deserialize(&k).unwrap()))
|
||||
.map_err(|e| BackendError::DbErr(format!("{:?}", e))),
|
||||
);
|
||||
},
|
||||
}
|
||||
}
|
||||
let _ = tx.send(None);
|
||||
});
|
||||
rx
|
||||
}
|
||||
|
||||
// TODO: we should be able to error out here, maybe change the trait definition?
|
||||
fn has_key_generator(&self, store_name: &str) -> bool {
|
||||
self.connection
|
||||
.prepare("SELECT * FROM object_store WHERE name = ?")
|
||||
.and_then(|mut stmt| {
|
||||
stmt.query_row(params![store_name.to_string()], |r| {
|
||||
let object_store = object_store_model::Model::try_from(r).unwrap();
|
||||
Ok(object_store.auto_increment)
|
||||
})
|
||||
})
|
||||
.optional()
|
||||
.unwrap()
|
||||
// TODO: Wrong (change trait definition for this function)
|
||||
.unwrap_or_default() !=
|
||||
0
|
||||
}
|
||||
|
||||
fn key_path(&self, store_name: &str) -> Option<KeyPath> {
|
||||
self.connection
|
||||
.prepare("SELECT * FROM object_store WHERE name = ?")
|
||||
.and_then(|mut stmt| {
|
||||
stmt.query_row(params![store_name.to_string()], |r| {
|
||||
let object_store = object_store_model::Model::try_from(r).unwrap();
|
||||
Ok(object_store
|
||||
.key_path
|
||||
.map(|key_path| bincode::deserialize(&key_path).unwrap()))
|
||||
})
|
||||
})
|
||||
.optional()
|
||||
.unwrap()
|
||||
// TODO: Wrong, same issues as has_key_generator
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
fn create_index(
|
||||
&self,
|
||||
store_name: &str,
|
||||
index_name: String,
|
||||
key_path: KeyPath,
|
||||
unique: bool,
|
||||
multi_entry: bool,
|
||||
) -> Result<CreateObjectResult, Self::Error> {
|
||||
let object_store = self.connection.query_row(
|
||||
"SELECT * FROM object_store WHERE name = ?",
|
||||
params![store_name.to_string()],
|
||||
|row| object_store_model::Model::try_from(row),
|
||||
)?;
|
||||
|
||||
let index_exists: bool = self.connection.query_row(
|
||||
"SELECT EXISTS(SELECT * FROM object_store_index WHERE name = ? AND object_store_id = ?)",
|
||||
params![index_name.to_string(), object_store.id],
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
if index_exists {
|
||||
return Ok(CreateObjectResult::AlreadyExists);
|
||||
}
|
||||
|
||||
self.connection.execute(
|
||||
"INSERT INTO object_store_index (object_store_id, name, key_path, unique_index, multi_entry_index)\
|
||||
VALUES (?, ?, ?, ?, ?)",
|
||||
params![
|
||||
object_store.id,
|
||||
index_name.to_string(),
|
||||
bincode::serialize(&key_path).unwrap(),
|
||||
unique,
|
||||
multi_entry,
|
||||
],
|
||||
)?;
|
||||
Ok(CreateObjectResult::Created)
|
||||
}
|
||||
|
||||
fn delete_index(&self, store_name: &str, index_name: String) -> Result<(), Self::Error> {
|
||||
let object_store = self.connection.query_row(
|
||||
"SELECT * FROM object_store WHERE name = ?",
|
||||
params![store_name.to_string()],
|
||||
|r| Ok(object_store_model::Model::try_from(r).unwrap()),
|
||||
)?;
|
||||
|
||||
// Delete the index if it exists
|
||||
let _ = self.connection.execute(
|
||||
"DELETE FROM object_store_index WHERE name = ? AND object_store_id = ?",
|
||||
params![index_name.to_string(), object_store.id],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn version(&self) -> Result<u64, Self::Error> {
|
||||
let version: i64 =
|
||||
self.connection
|
||||
.query_row("SELECT version FROM database LIMIT 1", [], |row| row.get(0))?;
|
||||
Ok(u64::from_ne_bytes(version.to_ne_bytes()))
|
||||
}
|
||||
|
||||
fn set_version(&self, version: u64) -> Result<(), Self::Error> {
|
||||
let rows_affected = self.connection.execute(
|
||||
"UPDATE database SET version = ?",
|
||||
params![i64::from_ne_bytes(version.to_ne_bytes())],
|
||||
)?;
|
||||
if rows_affected == 0 {
|
||||
return Err(Error::QueryReturnedNoRows);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
|
@ -1,82 +0,0 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
|
||||
// Adapted from:
|
||||
// https://github.com/mozilla-firefox/firefox/blob/ee102e926521b3e460293b0aea6b54b1a03f6f74/dom/indexedDB/DBSchema.cpp#L78
|
||||
|
||||
pub(crate) fn create_tables(conn: &rusqlite::Connection) -> Result<(), rusqlite::Error> {
|
||||
const DATABASE: &str = r#"
|
||||
create table database (
|
||||
name varchar not null
|
||||
primary key,
|
||||
origin varchar not null,
|
||||
version bigint default 0 not null
|
||||
) WITHOUT ROWID;"#;
|
||||
conn.execute(DATABASE, [])?;
|
||||
|
||||
const OBJECT_STORE: &str = r#"
|
||||
create table object_store (
|
||||
id integer not null
|
||||
primary key autoincrement,
|
||||
name varchar not null
|
||||
unique,
|
||||
key_path varbinary_blob,
|
||||
auto_increment integer default FALSE not null
|
||||
);"#;
|
||||
conn.execute(OBJECT_STORE, [])?;
|
||||
|
||||
const OBJECT_DATA: &str = r#"
|
||||
create table object_data (
|
||||
object_store_id integer not null
|
||||
references object_store,
|
||||
key blob not null,
|
||||
data blob not null,
|
||||
constraint "pk-object_data"
|
||||
primary key (object_store_id, key)
|
||||
) WITHOUT ROWID;"#;
|
||||
conn.execute(OBJECT_DATA, [])?;
|
||||
|
||||
const OBJECT_STORE_INDEX: &str = r#"
|
||||
create table object_store_index (
|
||||
id integer not null
|
||||
primary key autoincrement,
|
||||
object_store_id integer not null
|
||||
references object_store,
|
||||
name varchar not null
|
||||
unique,
|
||||
key_path varbinary_blob not null,
|
||||
unique_index boolean not null,
|
||||
multi_entry_index boolean not null
|
||||
);"#;
|
||||
conn.execute(OBJECT_STORE_INDEX, [])?;
|
||||
|
||||
const INDEX_DATA: &str = r#"
|
||||
CREATE TABLE index_data (
|
||||
index_id INTEGER NOT NULL,
|
||||
value BLOB NOT NULL,
|
||||
object_data_key BLOB NOT NULL,
|
||||
object_store_id INTEGER NOT NULL,
|
||||
value_locale BLOB,
|
||||
PRIMARY KEY (index_id, value, object_data_key)
|
||||
FOREIGN KEY (index_id) REFERENCES object_store_index(id),
|
||||
FOREIGN KEY (object_store_id, object_data_key)
|
||||
REFERENCES object_data(object_store_id, key)
|
||||
) WITHOUT ROWID;"#;
|
||||
conn.execute(INDEX_DATA, [])?;
|
||||
|
||||
const UNIQUE_INDEX_DATA: &str = r#"
|
||||
CREATE TABLE unique_index_data (
|
||||
index_id INTEGER NOT NULL,
|
||||
value BLOB NOT NULL,
|
||||
object_store_id INTEGER NOT NULL,
|
||||
object_data_key BLOB NOT NULL,
|
||||
value_locale BLOB,
|
||||
PRIMARY KEY (index_id, value),
|
||||
FOREIGN KEY (index_id) REFERENCES object_store_index(id),
|
||||
FOREIGN KEY (object_store_id, object_data_key)
|
||||
REFERENCES object_data(object_store_id, key)
|
||||
) WITHOUT ROWID;"#;
|
||||
conn.execute(UNIQUE_INDEX_DATA, [])?;
|
||||
Ok(())
|
||||
}
|
|
@ -1,24 +0,0 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
use sea_query::Iden;
|
||||
|
||||
#[derive(Iden)]
|
||||
#[expect(unused)]
|
||||
pub enum Column {
|
||||
#[iden = "database"]
|
||||
Table,
|
||||
Name,
|
||||
Origin,
|
||||
Version,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct Model {
|
||||
pub name: String,
|
||||
pub origin: String,
|
||||
pub version: i64,
|
||||
// TODO: Hold timestamp for vacuuming
|
||||
// TODO: implement vacuuming
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
use rusqlite::Row;
|
||||
use sea_query::Iden;
|
||||
|
||||
#[derive(Iden)]
|
||||
pub enum Column {
|
||||
#[iden = "object_data"]
|
||||
Table,
|
||||
ObjectStoreId,
|
||||
Key,
|
||||
Data,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct Model {
|
||||
pub object_store_id: i32,
|
||||
pub key: Vec<u8>,
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
||||
impl TryFrom<&Row<'_>> for Model {
|
||||
type Error = rusqlite::Error;
|
||||
|
||||
fn try_from(value: &Row) -> Result<Self, Self::Error> {
|
||||
Ok(Self {
|
||||
object_store_id: value.get(0)?,
|
||||
key: value.get(1)?,
|
||||
data: value.get(2)?,
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
use sea_query::Iden;
|
||||
|
||||
#[derive(Iden)]
|
||||
#[expect(unused)]
|
||||
pub enum Column {
|
||||
#[iden = "object_store_index"]
|
||||
Table,
|
||||
ObjectStoreId,
|
||||
Name,
|
||||
KeyPath,
|
||||
UniqueIndex,
|
||||
MultiEntryIndex,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct Model {
|
||||
pub id: i32,
|
||||
pub object_store_id: i32,
|
||||
pub name: String,
|
||||
pub key_path: Vec<u8>,
|
||||
pub unique_index: bool,
|
||||
pub multi_entry_index: bool,
|
||||
}
|
|
@ -1,37 +0,0 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
use rusqlite::Row;
|
||||
use sea_query::Iden;
|
||||
|
||||
#[derive(Iden)]
|
||||
#[expect(unused)]
|
||||
pub enum Column {
|
||||
#[iden = "object_store"]
|
||||
Table,
|
||||
Id,
|
||||
Name,
|
||||
KeyPath,
|
||||
AutoIncrement,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct Model {
|
||||
pub id: i32,
|
||||
pub name: String,
|
||||
pub key_path: Option<Vec<u8>>,
|
||||
pub auto_increment: i32,
|
||||
}
|
||||
|
||||
impl TryFrom<&Row<'_>> for Model {
|
||||
type Error = rusqlite::Error;
|
||||
|
||||
fn try_from(value: &Row) -> Result<Self, Self::Error> {
|
||||
Ok(Self {
|
||||
id: value.get(0)?,
|
||||
name: value.get(1)?,
|
||||
key_path: value.get(2)?,
|
||||
auto_increment: value.get(3)?,
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,444 +0,0 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
use std::borrow::ToOwned;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
|
||||
use ipc_channel::ipc::{self, IpcError, IpcReceiver, IpcSender};
|
||||
use log::{debug, warn};
|
||||
use net_traits::indexeddb_thread::{
|
||||
AsyncOperation, BackendError, BackendResult, CreateObjectResult, DbResult, IndexedDBThreadMsg,
|
||||
IndexedDBTxnMode, KeyPath, SyncOperation,
|
||||
};
|
||||
use rustc_hash::FxHashMap;
|
||||
use servo_config::pref;
|
||||
use servo_url::origin::ImmutableOrigin;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::indexeddb::engines::{KvsEngine, KvsOperation, KvsTransaction, SqliteEngine};
|
||||
use crate::resource_thread::CoreResourceThreadPool;
|
||||
|
||||
pub trait IndexedDBThreadFactory {
|
||||
fn new(config_dir: Option<PathBuf>) -> Self;
|
||||
}
|
||||
|
||||
impl IndexedDBThreadFactory for IpcSender<IndexedDBThreadMsg> {
|
||||
fn new(config_dir: Option<PathBuf>) -> IpcSender<IndexedDBThreadMsg> {
|
||||
let (chan, port) = ipc::channel().unwrap();
|
||||
|
||||
let mut idb_base_dir = PathBuf::new();
|
||||
if let Some(p) = config_dir {
|
||||
idb_base_dir.push(p);
|
||||
}
|
||||
idb_base_dir.push("IndexedDB");
|
||||
|
||||
thread::Builder::new()
|
||||
.name("IndexedDBManager".to_owned())
|
||||
.spawn(move || {
|
||||
IndexedDBManager::new(port, idb_base_dir).start();
|
||||
})
|
||||
.expect("Thread spawning failed");
|
||||
|
||||
chan
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Eq, Hash, PartialEq)]
|
||||
pub struct IndexedDBDescription {
|
||||
pub origin: ImmutableOrigin,
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
impl IndexedDBDescription {
|
||||
// randomly generated namespace for our purposes
|
||||
const NAMESPACE_SERVO_IDB: &uuid::Uuid = &Uuid::from_bytes([
|
||||
0x37, 0x9e, 0x56, 0xb0, 0x1a, 0x76, 0x44, 0xc2, 0xa0, 0xdb, 0xe2, 0x18, 0xc5, 0xc8, 0xa3,
|
||||
0x5d,
|
||||
]);
|
||||
// Converts the database description to a folder name where all
|
||||
// data for this database is stored
|
||||
pub(super) fn as_path(&self) -> PathBuf {
|
||||
let mut path = PathBuf::new();
|
||||
|
||||
// uuid v5 is deterministic
|
||||
let origin_uuid = Uuid::new_v5(
|
||||
Self::NAMESPACE_SERVO_IDB,
|
||||
self.origin.ascii_serialization().as_bytes(),
|
||||
);
|
||||
let db_name_uuid = Uuid::new_v5(Self::NAMESPACE_SERVO_IDB, self.name.as_bytes());
|
||||
path.push(origin_uuid.to_string());
|
||||
path.push(db_name_uuid.to_string());
|
||||
|
||||
path
|
||||
}
|
||||
}
|
||||
|
||||
struct IndexedDBEnvironment<E: KvsEngine> {
|
||||
engine: E,
|
||||
transactions: FxHashMap<u64, KvsTransaction>,
|
||||
serial_number_counter: u64,
|
||||
}
|
||||
|
||||
impl<E: KvsEngine> IndexedDBEnvironment<E> {
|
||||
fn new(engine: E) -> IndexedDBEnvironment<E> {
|
||||
IndexedDBEnvironment {
|
||||
engine,
|
||||
transactions: FxHashMap::default(),
|
||||
serial_number_counter: 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn queue_operation(
|
||||
&mut self,
|
||||
store_name: &str,
|
||||
serial_number: u64,
|
||||
mode: IndexedDBTxnMode,
|
||||
operation: AsyncOperation,
|
||||
) {
|
||||
self.transactions
|
||||
.entry(serial_number)
|
||||
.or_insert_with(|| KvsTransaction {
|
||||
requests: VecDeque::new(),
|
||||
mode,
|
||||
})
|
||||
.requests
|
||||
.push_back(KvsOperation {
|
||||
operation,
|
||||
store_name: String::from(store_name),
|
||||
});
|
||||
}
|
||||
|
||||
// Executes all requests for a transaction (without committing)
|
||||
fn start_transaction(&mut self, txn: u64, sender: Option<IpcSender<BackendResult<()>>>) {
|
||||
// FIXME:(arihant2math) find optimizations in this function
|
||||
// rather than on the engine level code (less repetition)
|
||||
if let Some(txn) = self.transactions.remove(&txn) {
|
||||
let _ = self.engine.process_transaction(txn).blocking_recv();
|
||||
}
|
||||
|
||||
// We have a sender if the transaction is started manually, and they
|
||||
// probably want to know when it is finished
|
||||
if let Some(sender) = sender {
|
||||
if sender.send(Ok(())).is_err() {
|
||||
warn!("IDBTransaction starter dropped its channel");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn has_key_generator(&self, store_name: &str) -> bool {
|
||||
self.engine.has_key_generator(store_name)
|
||||
}
|
||||
|
||||
fn key_path(&self, store_name: &str) -> Option<KeyPath> {
|
||||
self.engine.key_path(store_name)
|
||||
}
|
||||
|
||||
fn create_index(
|
||||
&self,
|
||||
store_name: &str,
|
||||
index_name: String,
|
||||
key_path: KeyPath,
|
||||
unique: bool,
|
||||
multi_entry: bool,
|
||||
) -> DbResult<CreateObjectResult> {
|
||||
self.engine
|
||||
.create_index(store_name, index_name, key_path, unique, multi_entry)
|
||||
.map_err(|err| format!("{err:?}"))
|
||||
}
|
||||
|
||||
fn delete_index(&self, store_name: &str, index_name: String) -> DbResult<()> {
|
||||
self.engine
|
||||
.delete_index(store_name, index_name)
|
||||
.map_err(|err| format!("{err:?}"))
|
||||
}
|
||||
|
||||
fn create_object_store(
|
||||
&mut self,
|
||||
store_name: &str,
|
||||
key_path: Option<KeyPath>,
|
||||
auto_increment: bool,
|
||||
) -> DbResult<CreateObjectResult> {
|
||||
self.engine
|
||||
.create_store(store_name, key_path, auto_increment)
|
||||
.map_err(|err| format!("{err:?}"))
|
||||
}
|
||||
|
||||
fn delete_object_store(&mut self, store_name: &str) -> DbResult<()> {
|
||||
let result = self.engine.delete_store(store_name);
|
||||
result.map_err(|err| format!("{err:?}"))
|
||||
}
|
||||
|
||||
fn delete_database(self, sender: IpcSender<BackendResult<()>>) {
|
||||
let result = self.engine.delete_database();
|
||||
let _ = sender.send(
|
||||
result
|
||||
.map_err(|err| format!("{err:?}"))
|
||||
.map_err(BackendError::from),
|
||||
);
|
||||
}
|
||||
|
||||
fn version(&self) -> DbResult<u64> {
|
||||
self.engine.version().map_err(|err| format!("{err:?}"))
|
||||
}
|
||||
|
||||
fn set_version(&mut self, version: u64) -> DbResult<()> {
|
||||
self.engine
|
||||
.set_version(version)
|
||||
.map_err(|err| format!("{err:?}"))
|
||||
}
|
||||
}
|
||||
|
||||
struct IndexedDBManager {
|
||||
port: IpcReceiver<IndexedDBThreadMsg>,
|
||||
idb_base_dir: PathBuf,
|
||||
databases: HashMap<IndexedDBDescription, IndexedDBEnvironment<SqliteEngine>>,
|
||||
thread_pool: Arc<CoreResourceThreadPool>,
|
||||
}
|
||||
|
||||
impl IndexedDBManager {
|
||||
fn new(port: IpcReceiver<IndexedDBThreadMsg>, idb_base_dir: PathBuf) -> IndexedDBManager {
|
||||
debug!("New indexedDBManager");
|
||||
|
||||
// Uses an estimate of the system cpus to process IndexedDB transactions
|
||||
// See https://doc.rust-lang.org/stable/std/thread/fn.available_parallelism.html
|
||||
// If no information can be obtained about the system, uses 4 threads as a default
|
||||
let thread_count = thread::available_parallelism()
|
||||
.map(|i| i.get())
|
||||
.unwrap_or(pref!(threadpools_fallback_worker_num) as usize)
|
||||
.min(pref!(threadpools_indexeddb_workers_max).max(1) as usize);
|
||||
|
||||
IndexedDBManager {
|
||||
port,
|
||||
idb_base_dir,
|
||||
databases: HashMap::new(),
|
||||
thread_pool: Arc::new(CoreResourceThreadPool::new(
|
||||
thread_count,
|
||||
"IndexedDB".to_string(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexedDBManager {
|
||||
fn start(&mut self) {
|
||||
loop {
|
||||
// FIXME:(arihant2math) No message *most likely* means that
|
||||
// the ipc sender has been dropped, so we break the look
|
||||
let message = match self.port.recv() {
|
||||
Ok(msg) => msg,
|
||||
Err(e) => match e {
|
||||
IpcError::Disconnected => {
|
||||
break;
|
||||
},
|
||||
other => {
|
||||
warn!("Error in IndexedDB thread: {:?}", other);
|
||||
continue;
|
||||
},
|
||||
},
|
||||
};
|
||||
match message {
|
||||
IndexedDBThreadMsg::Sync(operation) => {
|
||||
self.handle_sync_operation(operation);
|
||||
},
|
||||
IndexedDBThreadMsg::Async(origin, db_name, store_name, txn, mode, operation) => {
|
||||
if let Some(db) = self.get_database_mut(origin, db_name) {
|
||||
// Queues an operation for a transaction without starting it
|
||||
db.queue_operation(&store_name, txn, mode, operation);
|
||||
// FIXME:(arihant2math) Schedule transactions properly
|
||||
// while db.transactions.iter().any(|s| s.1.mode == IndexedDBTxnMode::Readwrite) {
|
||||
// std::hint::spin_loop();
|
||||
// }
|
||||
db.start_transaction(txn, None);
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_database(
|
||||
&self,
|
||||
origin: ImmutableOrigin,
|
||||
db_name: String,
|
||||
) -> Option<&IndexedDBEnvironment<SqliteEngine>> {
|
||||
let idb_description = IndexedDBDescription {
|
||||
origin,
|
||||
name: db_name,
|
||||
};
|
||||
|
||||
self.databases.get(&idb_description)
|
||||
}
|
||||
|
||||
fn get_database_mut(
|
||||
&mut self,
|
||||
origin: ImmutableOrigin,
|
||||
db_name: String,
|
||||
) -> Option<&mut IndexedDBEnvironment<SqliteEngine>> {
|
||||
let idb_description = IndexedDBDescription {
|
||||
origin,
|
||||
name: db_name,
|
||||
};
|
||||
|
||||
self.databases.get_mut(&idb_description)
|
||||
}
|
||||
|
||||
fn handle_sync_operation(&mut self, operation: SyncOperation) {
|
||||
match operation {
|
||||
SyncOperation::CloseDatabase(sender, origin, db_name) => {
|
||||
let idb_description = IndexedDBDescription {
|
||||
origin,
|
||||
name: db_name,
|
||||
};
|
||||
if let Some(_db) = self.databases.remove(&idb_description) {
|
||||
// TODO: maybe a close database function should be added to the trait and called here?
|
||||
}
|
||||
let _ = sender.send(Ok(()));
|
||||
},
|
||||
SyncOperation::OpenDatabase(sender, origin, db_name, version) => {
|
||||
let idb_description = IndexedDBDescription {
|
||||
origin,
|
||||
name: db_name,
|
||||
};
|
||||
|
||||
let idb_base_dir = self.idb_base_dir.as_path();
|
||||
|
||||
let version = version.unwrap_or(0);
|
||||
|
||||
match self.databases.entry(idb_description.clone()) {
|
||||
Entry::Vacant(e) => {
|
||||
let db = IndexedDBEnvironment::new(
|
||||
SqliteEngine::new(
|
||||
idb_base_dir,
|
||||
&idb_description,
|
||||
self.thread_pool.clone(),
|
||||
)
|
||||
.expect("Failed to create sqlite engine"),
|
||||
);
|
||||
let _ = sender.send(db.version().unwrap_or(version));
|
||||
e.insert(db);
|
||||
},
|
||||
Entry::Occupied(db) => {
|
||||
let _ = sender.send(db.get().version().unwrap_or(version));
|
||||
},
|
||||
}
|
||||
},
|
||||
SyncOperation::DeleteDatabase(sender, origin, db_name) => {
|
||||
// https://w3c.github.io/IndexedDB/#delete-a-database
|
||||
// Step 4. Let db be the database named name in storageKey,
|
||||
// if one exists. Otherwise, return 0 (zero).
|
||||
let idb_description = IndexedDBDescription {
|
||||
origin,
|
||||
name: db_name,
|
||||
};
|
||||
if let Some(db) = self.databases.remove(&idb_description) {
|
||||
db.delete_database(sender);
|
||||
} else {
|
||||
let _ = sender.send(Ok(()));
|
||||
}
|
||||
},
|
||||
SyncOperation::HasKeyGenerator(sender, origin, db_name, store_name) => {
|
||||
let result = self
|
||||
.get_database(origin, db_name)
|
||||
.map(|db| db.has_key_generator(&store_name));
|
||||
let _ = sender.send(result.ok_or(BackendError::DbNotFound));
|
||||
},
|
||||
SyncOperation::KeyPath(sender, origin, db_name, store_name) => {
|
||||
let result = self
|
||||
.get_database(origin, db_name)
|
||||
.map(|db| db.key_path(&store_name));
|
||||
let _ = sender.send(result.ok_or(BackendError::DbNotFound));
|
||||
},
|
||||
SyncOperation::CreateIndex(
|
||||
sender,
|
||||
origin,
|
||||
db_name,
|
||||
store_name,
|
||||
index_name,
|
||||
key_path,
|
||||
unique,
|
||||
multi_entry,
|
||||
) => {
|
||||
if let Some(db) = self.get_database(origin, db_name) {
|
||||
let result =
|
||||
db.create_index(&store_name, index_name, key_path, unique, multi_entry);
|
||||
let _ = sender.send(result.map_err(BackendError::from));
|
||||
} else {
|
||||
let _ = sender.send(Err(BackendError::DbNotFound));
|
||||
}
|
||||
},
|
||||
SyncOperation::DeleteIndex(sender, origin, db_name, store_name, index_name) => {
|
||||
if let Some(db) = self.get_database(origin, db_name) {
|
||||
let result = db.delete_index(&store_name, index_name);
|
||||
let _ = sender.send(result.map_err(BackendError::from));
|
||||
} else {
|
||||
let _ = sender.send(Err(BackendError::DbNotFound));
|
||||
}
|
||||
},
|
||||
SyncOperation::Commit(sender, _origin, _db_name, _txn) => {
|
||||
// FIXME:(arihant2math) This does nothing at the moment
|
||||
let _ = sender.send(Ok(()));
|
||||
},
|
||||
SyncOperation::UpgradeVersion(sender, origin, db_name, _txn, version) => {
|
||||
if let Some(db) = self.get_database_mut(origin, db_name) {
|
||||
if version > db.version().unwrap_or(0) {
|
||||
let _ = db.set_version(version);
|
||||
}
|
||||
// erroring out if the version is not upgraded can be and non-replicable
|
||||
let _ = sender.send(db.version().map_err(BackendError::from));
|
||||
} else {
|
||||
let _ = sender.send(Err(BackendError::DbNotFound));
|
||||
}
|
||||
},
|
||||
SyncOperation::CreateObjectStore(
|
||||
sender,
|
||||
origin,
|
||||
db_name,
|
||||
store_name,
|
||||
key_paths,
|
||||
auto_increment,
|
||||
) => {
|
||||
if let Some(db) = self.get_database_mut(origin, db_name) {
|
||||
let result = db.create_object_store(&store_name, key_paths, auto_increment);
|
||||
let _ = sender.send(result.map_err(BackendError::from));
|
||||
} else {
|
||||
let _ = sender.send(Err(BackendError::DbNotFound));
|
||||
}
|
||||
},
|
||||
SyncOperation::DeleteObjectStore(sender, origin, db_name, store_name) => {
|
||||
if let Some(db) = self.get_database_mut(origin, db_name) {
|
||||
let result = db.delete_object_store(&store_name);
|
||||
let _ = sender.send(result.map_err(BackendError::from));
|
||||
} else {
|
||||
let _ = sender.send(Err(BackendError::DbNotFound));
|
||||
}
|
||||
},
|
||||
SyncOperation::StartTransaction(sender, origin, db_name, txn) => {
|
||||
if let Some(db) = self.get_database_mut(origin, db_name) {
|
||||
db.start_transaction(txn, Some(sender));
|
||||
} else {
|
||||
let _ = sender.send(Err(BackendError::DbNotFound));
|
||||
}
|
||||
},
|
||||
SyncOperation::Version(sender, origin, db_name) => {
|
||||
if let Some(db) = self.get_database(origin, db_name) {
|
||||
let _ = sender.send(db.version().map_err(BackendError::from));
|
||||
} else {
|
||||
let _ = sender.send(Err(BackendError::DbNotFound));
|
||||
}
|
||||
},
|
||||
SyncOperation::RegisterNewTxn(sender, origin, db_name) => {
|
||||
if let Some(db) = self.get_database_mut(origin, db_name) {
|
||||
db.serial_number_counter += 1;
|
||||
let _ = sender.send(db.serial_number_counter);
|
||||
}
|
||||
},
|
||||
SyncOperation::Exit(sender) => {
|
||||
// FIXME:(rasviitanen) Nothing to do?
|
||||
let _ = sender.send(());
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
|
||||
pub use self::idb_thread::IndexedDBThreadFactory;
|
||||
|
||||
pub mod engines;
|
||||
|
||||
pub mod idb_thread;
|
|
@ -15,12 +15,10 @@ pub mod hsts;
|
|||
pub mod http_cache;
|
||||
pub mod http_loader;
|
||||
pub mod image_cache;
|
||||
pub mod indexeddb;
|
||||
pub mod local_directory_listing;
|
||||
pub mod protocols;
|
||||
pub mod request_interceptor;
|
||||
pub mod resource_thread;
|
||||
mod storage_thread;
|
||||
pub mod subresource_integrity;
|
||||
mod websocket_loader;
|
||||
|
||||
|
|
|
@ -11,10 +11,9 @@ use std::io::{self, BufReader};
|
|||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, Mutex, RwLock, Weak};
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
use base::generic_channel::GenericSender;
|
||||
use base::id::CookieStoreId;
|
||||
use base::threadpool::ThreadPool;
|
||||
use cookie::Cookie;
|
||||
use crossbeam_channel::Sender;
|
||||
use devtools_traits::DevtoolsControlMsg;
|
||||
|
@ -24,11 +23,9 @@ use ipc_channel::ipc::{self, IpcReceiver, IpcReceiverSet, IpcSender};
|
|||
use log::{debug, warn};
|
||||
use net_traits::blob_url_store::parse_blob_url;
|
||||
use net_traits::filemanager_thread::FileTokenCheck;
|
||||
use net_traits::indexeddb_thread::IndexedDBThreadMsg;
|
||||
use net_traits::pub_domains::public_suffix_list_size_of;
|
||||
use net_traits::request::{Destination, RequestBuilder, RequestId};
|
||||
use net_traits::response::{Response, ResponseInit};
|
||||
use net_traits::storage_thread::StorageThreadMsg;
|
||||
use net_traits::{
|
||||
AsyncRuntime, CookieAsyncResponse, CookieData, CookieSource, CoreResourceMsg,
|
||||
CoreResourceThread, CustomResponseMediator, DiscardFetch, FetchChannels, FetchTaskTarget,
|
||||
|
@ -60,10 +57,8 @@ use crate::filemanager_thread::FileManager;
|
|||
use crate::hsts::{self, HstsList};
|
||||
use crate::http_cache::HttpCache;
|
||||
use crate::http_loader::{HttpState, http_redirect_fetch};
|
||||
use crate::indexeddb::idb_thread::IndexedDBThreadFactory;
|
||||
use crate::protocols::ProtocolRegistry;
|
||||
use crate::request_interceptor::RequestInterceptor;
|
||||
use crate::storage_thread::StorageThreadFactory;
|
||||
use crate::websocket_loader;
|
||||
|
||||
/// Load a file with CA certificate and produce a RootCertStore with the results.
|
||||
|
@ -112,12 +107,9 @@ pub fn new_resource_threads(
|
|||
ignore_certificate_errors,
|
||||
protocols,
|
||||
);
|
||||
let idb: IpcSender<IndexedDBThreadMsg> = IndexedDBThreadFactory::new(config_dir.clone());
|
||||
let storage: GenericSender<StorageThreadMsg> =
|
||||
StorageThreadFactory::new(config_dir, mem_profiler_chan);
|
||||
(
|
||||
ResourceThreads::new(public_core, storage.clone(), idb.clone()),
|
||||
ResourceThreads::new(private_core, storage, idb),
|
||||
ResourceThreads::new(public_core),
|
||||
ResourceThreads::new(private_core),
|
||||
async_runtime,
|
||||
)
|
||||
}
|
||||
|
@ -579,141 +571,11 @@ pub struct CoreResourceManager {
|
|||
sw_managers: HashMap<ImmutableOrigin, IpcSender<CustomResponseMediator>>,
|
||||
filemanager: FileManager,
|
||||
request_interceptor: RequestInterceptor,
|
||||
thread_pool: Arc<CoreResourceThreadPool>,
|
||||
thread_pool: Arc<ThreadPool>,
|
||||
ca_certificates: CACertificates,
|
||||
ignore_certificate_errors: bool,
|
||||
}
|
||||
|
||||
/// The state of the thread-pool used by CoreResource.
|
||||
struct ThreadPoolState {
|
||||
/// The number of active workers.
|
||||
active_workers: u32,
|
||||
/// Whether the pool can spawn additional work.
|
||||
active: bool,
|
||||
}
|
||||
|
||||
impl ThreadPoolState {
|
||||
pub fn new() -> ThreadPoolState {
|
||||
ThreadPoolState {
|
||||
active_workers: 0,
|
||||
active: true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Is the pool still able to spawn new work?
|
||||
pub fn is_active(&self) -> bool {
|
||||
self.active
|
||||
}
|
||||
|
||||
/// How many workers are currently active?
|
||||
pub fn active_workers(&self) -> u32 {
|
||||
self.active_workers
|
||||
}
|
||||
|
||||
/// Prevent additional work from being spawned.
|
||||
pub fn switch_to_inactive(&mut self) {
|
||||
self.active = false;
|
||||
}
|
||||
|
||||
/// Add to the count of active workers.
|
||||
pub fn increment_active(&mut self) {
|
||||
self.active_workers += 1;
|
||||
}
|
||||
|
||||
/// Substract from the count of active workers.
|
||||
pub fn decrement_active(&mut self) {
|
||||
self.active_workers -= 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Threadpool used by Fetch and file operations.
|
||||
pub struct CoreResourceThreadPool {
|
||||
pool: rayon::ThreadPool,
|
||||
state: Arc<Mutex<ThreadPoolState>>,
|
||||
}
|
||||
|
||||
impl CoreResourceThreadPool {
|
||||
pub fn new(num_threads: usize, pool_name: String) -> CoreResourceThreadPool {
|
||||
debug!("Creating new CoreResourceThreadPool with {num_threads} threads!");
|
||||
let pool = rayon::ThreadPoolBuilder::new()
|
||||
.thread_name(move |i| format!("{pool_name}#{i}"))
|
||||
.num_threads(num_threads)
|
||||
.build()
|
||||
.unwrap();
|
||||
let state = Arc::new(Mutex::new(ThreadPoolState::new()));
|
||||
CoreResourceThreadPool { pool, state }
|
||||
}
|
||||
|
||||
/// Spawn work on the thread-pool, if still active.
|
||||
///
|
||||
/// There is no need to give feedback to the caller,
|
||||
/// because if we do not perform work,
|
||||
/// it is because the system as a whole is exiting.
|
||||
pub fn spawn<OP>(&self, work: OP)
|
||||
where
|
||||
OP: FnOnce() + Send + 'static,
|
||||
{
|
||||
{
|
||||
let mut state = self.state.lock().unwrap();
|
||||
if state.is_active() {
|
||||
state.increment_active();
|
||||
} else {
|
||||
// Don't spawn any work.
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
let state = self.state.clone();
|
||||
|
||||
self.pool.spawn(move || {
|
||||
{
|
||||
let mut state = state.lock().unwrap();
|
||||
if !state.is_active() {
|
||||
// Decrement number of active workers and return,
|
||||
// without doing any work.
|
||||
return state.decrement_active();
|
||||
}
|
||||
}
|
||||
// Perform work.
|
||||
work();
|
||||
{
|
||||
// Decrement number of active workers.
|
||||
let mut state = state.lock().unwrap();
|
||||
state.decrement_active();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// Prevent further work from being spawned,
|
||||
/// and wait until all workers are done,
|
||||
/// or a timeout of roughly one second has been reached.
|
||||
pub fn exit(&self) {
|
||||
{
|
||||
let mut state = self.state.lock().unwrap();
|
||||
state.switch_to_inactive();
|
||||
}
|
||||
let mut rounds = 0;
|
||||
loop {
|
||||
rounds += 1;
|
||||
{
|
||||
let state = self.state.lock().unwrap();
|
||||
let still_active = state.active_workers();
|
||||
|
||||
if still_active == 0 || rounds == 10 {
|
||||
if still_active > 0 {
|
||||
debug!(
|
||||
"Exiting CoreResourceThreadPool with {:?} still working(should be zero)",
|
||||
still_active
|
||||
);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
thread::sleep(Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CoreResourceManager {
|
||||
pub fn new(
|
||||
devtools_sender: Option<Sender<DevtoolsControlMsg>>,
|
||||
|
@ -726,7 +588,7 @@ impl CoreResourceManager {
|
|||
.map(|i| i.get())
|
||||
.unwrap_or(servo_config::pref!(threadpools_fallback_worker_num) as usize)
|
||||
.min(servo_config::pref!(threadpools_resource_workers_max).max(1) as usize);
|
||||
let pool = CoreResourceThreadPool::new(num_threads, "CoreResourceThreadPool".to_string());
|
||||
let pool = ThreadPool::new(num_threads, "CoreResourceThreadPool".to_string());
|
||||
let pool_handle = Arc::new(pool);
|
||||
CoreResourceManager {
|
||||
devtools_sender,
|
||||
|
|
|
@ -1,360 +0,0 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use std::borrow::ToOwned;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::path::PathBuf;
|
||||
use std::thread;
|
||||
|
||||
use base::generic_channel::{self, GenericReceiver, GenericSender};
|
||||
use base::id::WebViewId;
|
||||
use malloc_size_of::MallocSizeOf;
|
||||
use net_traits::storage_thread::{StorageThreadMsg, StorageType};
|
||||
use profile_traits::mem::{
|
||||
ProcessReports, ProfilerChan as MemProfilerChan, Report, ReportKind, perform_memory_report,
|
||||
};
|
||||
use profile_traits::path;
|
||||
use rustc_hash::FxHashMap;
|
||||
use servo_url::ServoUrl;
|
||||
|
||||
const QUOTA_SIZE_LIMIT: usize = 5 * 1024 * 1024;
|
||||
|
||||
pub trait StorageThreadFactory {
|
||||
fn new(config_dir: Option<PathBuf>, mem_profiler_chan: MemProfilerChan) -> Self;
|
||||
}
|
||||
|
||||
impl StorageThreadFactory for GenericSender<StorageThreadMsg> {
|
||||
/// Create a storage thread
|
||||
fn new(
|
||||
config_dir: Option<PathBuf>,
|
||||
mem_profiler_chan: MemProfilerChan,
|
||||
) -> GenericSender<StorageThreadMsg> {
|
||||
let (chan, port) = generic_channel::channel().unwrap();
|
||||
let chan2 = chan.clone();
|
||||
thread::Builder::new()
|
||||
.name("StorageManager".to_owned())
|
||||
.spawn(move || {
|
||||
mem_profiler_chan.run_with_memory_reporting(
|
||||
|| StorageManager::new(port, config_dir).start(),
|
||||
String::from("storage-reporter"),
|
||||
chan2,
|
||||
StorageThreadMsg::CollectMemoryReport,
|
||||
);
|
||||
})
|
||||
.expect("Thread spawning failed");
|
||||
chan
|
||||
}
|
||||
}
|
||||
|
||||
type OriginEntry = (usize, BTreeMap<String, String>);
|
||||
|
||||
struct StorageManager {
|
||||
port: GenericReceiver<StorageThreadMsg>,
|
||||
session_data: FxHashMap<WebViewId, HashMap<String, OriginEntry>>,
|
||||
local_data: HashMap<String, OriginEntry>,
|
||||
config_dir: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl StorageManager {
|
||||
fn new(port: GenericReceiver<StorageThreadMsg>, config_dir: Option<PathBuf>) -> StorageManager {
|
||||
let mut local_data = HashMap::new();
|
||||
if let Some(ref config_dir) = config_dir {
|
||||
base::read_json_from_file(&mut local_data, config_dir, "local_data.json");
|
||||
}
|
||||
StorageManager {
|
||||
port,
|
||||
session_data: FxHashMap::default(),
|
||||
local_data,
|
||||
config_dir,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl StorageManager {
|
||||
fn start(&mut self) {
|
||||
loop {
|
||||
match self.port.recv().unwrap() {
|
||||
StorageThreadMsg::Length(sender, storage_type, webview_id, url) => {
|
||||
self.length(sender, storage_type, webview_id, url)
|
||||
},
|
||||
StorageThreadMsg::Key(sender, storage_type, webview_id, url, index) => {
|
||||
self.key(sender, storage_type, webview_id, url, index)
|
||||
},
|
||||
StorageThreadMsg::Keys(sender, storage_type, webview_id, url) => {
|
||||
self.keys(sender, storage_type, webview_id, url)
|
||||
},
|
||||
StorageThreadMsg::SetItem(sender, storage_type, webview_id, url, name, value) => {
|
||||
self.set_item(sender, storage_type, webview_id, url, name, value);
|
||||
self.save_state()
|
||||
},
|
||||
StorageThreadMsg::GetItem(sender, storage_type, webview_id, url, name) => {
|
||||
self.request_item(sender, storage_type, webview_id, url, name)
|
||||
},
|
||||
StorageThreadMsg::RemoveItem(sender, storage_type, webview_id, url, name) => {
|
||||
self.remove_item(sender, storage_type, webview_id, url, name);
|
||||
self.save_state()
|
||||
},
|
||||
StorageThreadMsg::Clear(sender, storage_type, webview_id, url) => {
|
||||
self.clear(sender, storage_type, webview_id, url);
|
||||
self.save_state()
|
||||
},
|
||||
StorageThreadMsg::Clone {
|
||||
sender,
|
||||
src: src_webview_id,
|
||||
dest: dest_webview_id,
|
||||
} => {
|
||||
self.clone(src_webview_id, dest_webview_id);
|
||||
let _ = sender.send(());
|
||||
},
|
||||
StorageThreadMsg::CollectMemoryReport(sender) => {
|
||||
let reports = self.collect_memory_reports();
|
||||
sender.send(ProcessReports::new(reports));
|
||||
},
|
||||
StorageThreadMsg::Exit(sender) => {
|
||||
// Nothing to do since we save localstorage set eagerly.
|
||||
let _ = sender.send(());
|
||||
break;
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn collect_memory_reports(&self) -> Vec<Report> {
|
||||
let mut reports = vec![];
|
||||
perform_memory_report(|ops| {
|
||||
reports.push(Report {
|
||||
path: path!["storage", "local"],
|
||||
kind: ReportKind::ExplicitJemallocHeapSize,
|
||||
size: self.local_data.size_of(ops),
|
||||
});
|
||||
|
||||
reports.push(Report {
|
||||
path: path!["storage", "session"],
|
||||
kind: ReportKind::ExplicitJemallocHeapSize,
|
||||
size: self.session_data.size_of(ops),
|
||||
});
|
||||
});
|
||||
reports
|
||||
}
|
||||
|
||||
fn save_state(&self) {
|
||||
if let Some(ref config_dir) = self.config_dir {
|
||||
base::write_json_to_file(&self.local_data, config_dir, "local_data.json");
|
||||
}
|
||||
}
|
||||
|
||||
fn select_data(
|
||||
&self,
|
||||
storage_type: StorageType,
|
||||
webview_id: WebViewId,
|
||||
origin: &str,
|
||||
) -> Option<&OriginEntry> {
|
||||
match storage_type {
|
||||
StorageType::Session => self
|
||||
.session_data
|
||||
.get(&webview_id)
|
||||
.and_then(|origin_map| origin_map.get(origin)),
|
||||
StorageType::Local => self.local_data.get(origin),
|
||||
}
|
||||
}
|
||||
|
||||
fn select_data_mut(
|
||||
&mut self,
|
||||
storage_type: StorageType,
|
||||
webview_id: WebViewId,
|
||||
origin: &str,
|
||||
) -> Option<&mut OriginEntry> {
|
||||
match storage_type {
|
||||
StorageType::Session => self
|
||||
.session_data
|
||||
.get_mut(&webview_id)
|
||||
.and_then(|origin_map| origin_map.get_mut(origin)),
|
||||
StorageType::Local => self.local_data.get_mut(origin),
|
||||
}
|
||||
}
|
||||
|
||||
fn ensure_data_mut(
|
||||
&mut self,
|
||||
storage_type: StorageType,
|
||||
webview_id: WebViewId,
|
||||
origin: &str,
|
||||
) -> &mut OriginEntry {
|
||||
match storage_type {
|
||||
StorageType::Session => self
|
||||
.session_data
|
||||
.entry(webview_id)
|
||||
.or_default()
|
||||
.entry(origin.to_string())
|
||||
.or_default(),
|
||||
StorageType::Local => self.local_data.entry(origin.to_string()).or_default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn length(
|
||||
&self,
|
||||
sender: GenericSender<usize>,
|
||||
storage_type: StorageType,
|
||||
webview_id: WebViewId,
|
||||
url: ServoUrl,
|
||||
) {
|
||||
let origin = self.origin_as_string(url);
|
||||
let data = self.select_data(storage_type, webview_id, &origin);
|
||||
sender
|
||||
.send(data.map_or(0, |(_, entry)| entry.len()))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
fn key(
|
||||
&self,
|
||||
sender: GenericSender<Option<String>>,
|
||||
storage_type: StorageType,
|
||||
webview_id: WebViewId,
|
||||
url: ServoUrl,
|
||||
index: u32,
|
||||
) {
|
||||
let origin = self.origin_as_string(url);
|
||||
let data = self.select_data(storage_type, webview_id, &origin);
|
||||
let key = data
|
||||
.and_then(|(_, entry)| entry.keys().nth(index as usize))
|
||||
.cloned();
|
||||
sender.send(key).unwrap();
|
||||
}
|
||||
|
||||
fn keys(
|
||||
&self,
|
||||
sender: GenericSender<Vec<String>>,
|
||||
storage_type: StorageType,
|
||||
webview_id: WebViewId,
|
||||
url: ServoUrl,
|
||||
) {
|
||||
let origin = self.origin_as_string(url);
|
||||
let data = self.select_data(storage_type, webview_id, &origin);
|
||||
let keys = data.map_or(vec![], |(_, entry)| entry.keys().cloned().collect());
|
||||
|
||||
sender.send(keys).unwrap();
|
||||
}
|
||||
|
||||
/// Sends Ok(changed, Some(old_value)) in case there was a previous
|
||||
/// value with the same key name but with different value name
|
||||
/// otherwise sends Err(()) to indicate that the operation would result in
|
||||
/// exceeding the quota limit
|
||||
fn set_item(
|
||||
&mut self,
|
||||
sender: GenericSender<Result<(bool, Option<String>), ()>>,
|
||||
storage_type: StorageType,
|
||||
webview_id: WebViewId,
|
||||
url: ServoUrl,
|
||||
name: String,
|
||||
value: String,
|
||||
) {
|
||||
let origin = self.origin_as_string(url);
|
||||
|
||||
let (this_storage_size, other_storage_size) = {
|
||||
let local_data = self.select_data(StorageType::Local, webview_id, &origin);
|
||||
let session_data = self.select_data(StorageType::Session, webview_id, &origin);
|
||||
let local_data_size = local_data.map_or(0, |&(total, _)| total);
|
||||
let session_data_size = session_data.map_or(0, |&(total, _)| total);
|
||||
match storage_type {
|
||||
StorageType::Local => (local_data_size, session_data_size),
|
||||
StorageType::Session => (session_data_size, local_data_size),
|
||||
}
|
||||
};
|
||||
|
||||
let &mut (ref mut total, ref mut entry) =
|
||||
self.ensure_data_mut(storage_type, webview_id, &origin);
|
||||
|
||||
let mut new_total_size = this_storage_size + value.len();
|
||||
if let Some(old_value) = entry.get(&name) {
|
||||
new_total_size -= old_value.len();
|
||||
} else {
|
||||
new_total_size += name.len();
|
||||
}
|
||||
|
||||
let message = if (new_total_size + other_storage_size) > QUOTA_SIZE_LIMIT {
|
||||
Err(())
|
||||
} else {
|
||||
*total = new_total_size;
|
||||
entry
|
||||
.insert(name.clone(), value.clone())
|
||||
.map_or(Ok((true, None)), |old| {
|
||||
if old == value {
|
||||
Ok((false, None))
|
||||
} else {
|
||||
Ok((true, Some(old)))
|
||||
}
|
||||
})
|
||||
};
|
||||
sender.send(message).unwrap();
|
||||
}
|
||||
|
||||
fn request_item(
|
||||
&self,
|
||||
sender: GenericSender<Option<String>>,
|
||||
storage_type: StorageType,
|
||||
webview_id: WebViewId,
|
||||
url: ServoUrl,
|
||||
name: String,
|
||||
) {
|
||||
let origin = self.origin_as_string(url);
|
||||
let data = self.select_data(storage_type, webview_id, &origin);
|
||||
sender
|
||||
.send(data.and_then(|(_, entry)| entry.get(&name)).cloned())
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
/// Sends Some(old_value) in case there was a previous value with the key name, otherwise sends None
|
||||
fn remove_item(
|
||||
&mut self,
|
||||
sender: GenericSender<Option<String>>,
|
||||
storage_type: StorageType,
|
||||
webview_id: WebViewId,
|
||||
url: ServoUrl,
|
||||
name: String,
|
||||
) {
|
||||
let origin = self.origin_as_string(url);
|
||||
let data = self.select_data_mut(storage_type, webview_id, &origin);
|
||||
let old_value = data.and_then(|&mut (ref mut total, ref mut entry)| {
|
||||
entry.remove(&name).inspect(|old| {
|
||||
*total -= name.len() + old.len();
|
||||
})
|
||||
});
|
||||
sender.send(old_value).unwrap();
|
||||
}
|
||||
|
||||
fn clear(
|
||||
&mut self,
|
||||
sender: GenericSender<bool>,
|
||||
storage_type: StorageType,
|
||||
webview_id: WebViewId,
|
||||
url: ServoUrl,
|
||||
) {
|
||||
let origin = self.origin_as_string(url);
|
||||
let data = self.select_data_mut(storage_type, webview_id, &origin);
|
||||
sender
|
||||
.send(data.is_some_and(|&mut (ref mut total, ref mut entry)| {
|
||||
if !entry.is_empty() {
|
||||
entry.clear();
|
||||
*total = 0;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
fn clone(&mut self, src_webview_id: WebViewId, dest_webview_id: WebViewId) {
|
||||
let Some(src_origin_entries) = self.session_data.get(&src_webview_id) else {
|
||||
return;
|
||||
};
|
||||
|
||||
let dest_origin_entries = src_origin_entries.clone();
|
||||
self.session_data
|
||||
.insert(dest_webview_id, dest_origin_entries);
|
||||
}
|
||||
|
||||
fn origin_as_string(&self, url: ServoUrl) -> String {
|
||||
url.origin().ascii_serialization()
|
||||
}
|
||||
}
|
|
@ -12,6 +12,7 @@ use std::sync::{Arc, Mutex, Weak};
|
|||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use base::id::{TEST_PIPELINE_ID, TEST_WEBVIEW_ID};
|
||||
use base::threadpool::ThreadPool;
|
||||
use content_security_policy as csp;
|
||||
use crossbeam_channel::{Sender, unbounded};
|
||||
use devtools_traits::{HttpRequest as DevtoolsHttpRequest, HttpResponse as DevtoolsHttpResponse};
|
||||
|
@ -33,7 +34,6 @@ use net::filemanager_thread::FileManager;
|
|||
use net::hsts::HstsEntry;
|
||||
use net::protocols::ProtocolRegistry;
|
||||
use net::request_interceptor::RequestInterceptor;
|
||||
use net::resource_thread::CoreResourceThreadPool;
|
||||
use net_traits::filemanager_thread::FileTokenCheck;
|
||||
use net_traits::http_status::HttpStatus;
|
||||
use net_traits::request::{
|
||||
|
@ -235,7 +235,7 @@ fn test_file() {
|
|||
.origin(url.origin())
|
||||
.build();
|
||||
|
||||
let pool = CoreResourceThreadPool::new(1, "CoreResourceTestPool".to_string());
|
||||
let pool = ThreadPool::new(1, "CoreResourceTestPool".to_string());
|
||||
let pool_handle = Arc::new(pool);
|
||||
let mut context = new_fetch_context(None, None, Some(Arc::downgrade(&pool_handle)));
|
||||
let fetch_response = fetch_with_context(request, &mut context);
|
||||
|
|
|
@ -8,10 +8,10 @@ use std::path::PathBuf;
|
|||
use std::sync::Arc;
|
||||
|
||||
use base::id::TEST_WEBVIEW_ID;
|
||||
use base::threadpool::ThreadPool;
|
||||
use embedder_traits::FilterPattern;
|
||||
use ipc_channel::ipc;
|
||||
use net::filemanager_thread::FileManager;
|
||||
use net::resource_thread::CoreResourceThreadPool;
|
||||
use net_traits::blob_url_store::BlobURLStoreError;
|
||||
use net_traits::filemanager_thread::{
|
||||
FileManagerThreadError, FileManagerThreadMsg, ReadFileProgress,
|
||||
|
@ -26,7 +26,7 @@ fn test_filemanager() {
|
|||
preferences.dom_testing_html_input_element_select_files_enabled = true;
|
||||
servo_config::prefs::set(preferences);
|
||||
|
||||
let pool = CoreResourceThreadPool::new(1, "CoreResourceTestPool".to_string());
|
||||
let pool = ThreadPool::new(1, "CoreResourceTestPool".to_string());
|
||||
let pool_handle = Arc::new(pool);
|
||||
let filemanager = FileManager::new(create_embedder_proxy(), Arc::downgrade(&pool_handle));
|
||||
|
||||
|
|
|
@ -15,7 +15,6 @@ mod hsts;
|
|||
mod http_cache;
|
||||
mod http_loader;
|
||||
mod resource_thread;
|
||||
mod sqlite;
|
||||
mod subresource_integrity;
|
||||
|
||||
use core::convert::Infallible;
|
||||
|
@ -26,6 +25,7 @@ use std::net::TcpListener as StdTcpListener;
|
|||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, LazyLock, Mutex, RwLock, Weak};
|
||||
|
||||
use base::threadpool::ThreadPool;
|
||||
use content_security_policy as csp;
|
||||
use crossbeam_channel::{Receiver, Sender, unbounded};
|
||||
use devtools_traits::DevtoolsControlMsg;
|
||||
|
@ -45,7 +45,6 @@ use net::fetch::methods::{self, FetchContext};
|
|||
use net::filemanager_thread::FileManager;
|
||||
use net::protocols::ProtocolRegistry;
|
||||
use net::request_interceptor::RequestInterceptor;
|
||||
use net::resource_thread::CoreResourceThreadPool;
|
||||
use net::test::HttpState;
|
||||
use net_traits::filemanager_thread::FileTokenCheck;
|
||||
use net_traits::request::Request;
|
||||
|
@ -163,7 +162,7 @@ fn create_http_state(fc: Option<EmbedderProxy>) -> HttpState {
|
|||
fn new_fetch_context(
|
||||
dc: Option<Sender<DevtoolsControlMsg>>,
|
||||
fc: Option<EmbedderProxy>,
|
||||
pool_handle: Option<Weak<CoreResourceThreadPool>>,
|
||||
pool_handle: Option<Weak<ThreadPool>>,
|
||||
) -> FetchContext {
|
||||
let _ = &*ASYNC_RUNTIME;
|
||||
|
||||
|
|
|
@ -1,328 +0,0 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
use std::collections::VecDeque;
|
||||
use std::sync::Arc;
|
||||
|
||||
use net::indexeddb::engines::{KvsEngine, KvsOperation, KvsTransaction, SqliteEngine};
|
||||
use net::indexeddb::idb_thread::IndexedDBDescription;
|
||||
use net::resource_thread::CoreResourceThreadPool;
|
||||
use net_traits::indexeddb_thread::{
|
||||
AsyncOperation, AsyncReadOnlyOperation, AsyncReadWriteOperation, CreateObjectResult,
|
||||
IndexedDBKeyRange, IndexedDBKeyType, IndexedDBTxnMode, KeyPath, PutItemResult,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servo_url::ImmutableOrigin;
|
||||
use url::Host;
|
||||
|
||||
fn test_origin() -> ImmutableOrigin {
|
||||
ImmutableOrigin::Tuple(
|
||||
"test_origin".to_string(),
|
||||
Host::Domain("localhost".to_string()),
|
||||
80,
|
||||
)
|
||||
}
|
||||
|
||||
fn get_pool() -> Arc<CoreResourceThreadPool> {
|
||||
Arc::new(CoreResourceThreadPool::new(1, "test".to_string()))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cycle() {
|
||||
let base_dir = tempfile::tempdir().expect("Failed to create temp dir");
|
||||
let thread_pool = get_pool();
|
||||
// Test create
|
||||
let _ = SqliteEngine::new(
|
||||
base_dir.path(),
|
||||
&IndexedDBDescription {
|
||||
name: "test_db".to_string(),
|
||||
origin: test_origin(),
|
||||
},
|
||||
thread_pool.clone(),
|
||||
)
|
||||
.unwrap();
|
||||
// Test open
|
||||
let db = SqliteEngine::new(
|
||||
base_dir.path(),
|
||||
&IndexedDBDescription {
|
||||
name: "test_db".to_string(),
|
||||
origin: test_origin(),
|
||||
},
|
||||
thread_pool.clone(),
|
||||
)
|
||||
.unwrap();
|
||||
let version = db.version().expect("Failed to get version");
|
||||
assert_eq!(version, 0);
|
||||
db.set_version(5).unwrap();
|
||||
let new_version = db.version().expect("Failed to get new version");
|
||||
assert_eq!(new_version, 5);
|
||||
db.delete_database().expect("Failed to delete database");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_store() {
|
||||
let base_dir = tempfile::tempdir().expect("Failed to create temp dir");
|
||||
let thread_pool = get_pool();
|
||||
let db = SqliteEngine::new(
|
||||
base_dir.path(),
|
||||
&IndexedDBDescription {
|
||||
name: "test_db".to_string(),
|
||||
origin: test_origin(),
|
||||
},
|
||||
thread_pool,
|
||||
)
|
||||
.unwrap();
|
||||
let store_name = "test_store";
|
||||
let result = db.create_store(store_name, None, true);
|
||||
assert!(result.is_ok());
|
||||
let create_result = result.unwrap();
|
||||
assert_eq!(create_result, CreateObjectResult::Created);
|
||||
// Try to create the same store again
|
||||
let result = db.create_store(store_name, None, false);
|
||||
assert!(result.is_ok());
|
||||
let create_result = result.unwrap();
|
||||
assert_eq!(create_result, CreateObjectResult::AlreadyExists);
|
||||
// Ensure store was not overwritten
|
||||
assert!(db.has_key_generator(store_name));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_store_empty_name() {
|
||||
let base_dir = tempfile::tempdir().expect("Failed to create temp dir");
|
||||
let thread_pool = get_pool();
|
||||
let db = SqliteEngine::new(
|
||||
base_dir.path(),
|
||||
&IndexedDBDescription {
|
||||
name: "test_db".to_string(),
|
||||
origin: test_origin(),
|
||||
},
|
||||
thread_pool,
|
||||
)
|
||||
.unwrap();
|
||||
let store_name = "";
|
||||
let result = db.create_store(store_name, None, true);
|
||||
assert!(result.is_ok());
|
||||
let create_result = result.unwrap();
|
||||
assert_eq!(create_result, CreateObjectResult::Created);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_injection() {
|
||||
let base_dir = tempfile::tempdir().expect("Failed to create temp dir");
|
||||
let thread_pool = get_pool();
|
||||
let db = SqliteEngine::new(
|
||||
base_dir.path(),
|
||||
&IndexedDBDescription {
|
||||
name: "test_db".to_string(),
|
||||
origin: test_origin(),
|
||||
},
|
||||
thread_pool,
|
||||
)
|
||||
.unwrap();
|
||||
// Create a normal store
|
||||
let store_name1 = "test_store";
|
||||
let result = db.create_store(store_name1, None, true);
|
||||
assert!(result.is_ok());
|
||||
let create_result = result.unwrap();
|
||||
assert_eq!(create_result, CreateObjectResult::Created);
|
||||
// Injection
|
||||
let store_name2 = "' OR 1=1 -- -";
|
||||
let result = db.create_store(store_name2, None, false);
|
||||
assert!(result.is_ok());
|
||||
let create_result = result.unwrap();
|
||||
assert_eq!(create_result, CreateObjectResult::Created);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_key_path() {
|
||||
let base_dir = tempfile::tempdir().expect("Failed to create temp dir");
|
||||
let thread_pool = get_pool();
|
||||
let db = SqliteEngine::new(
|
||||
base_dir.path(),
|
||||
&IndexedDBDescription {
|
||||
name: "test_db".to_string(),
|
||||
origin: test_origin(),
|
||||
},
|
||||
thread_pool,
|
||||
)
|
||||
.unwrap();
|
||||
let store_name = "test_store";
|
||||
let result = db.create_store(store_name, Some(KeyPath::String("test".to_string())), true);
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(
|
||||
db.key_path(store_name),
|
||||
Some(KeyPath::String("test".to_string()))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_delete_store() {
|
||||
let base_dir = tempfile::tempdir().expect("Failed to create temp dir");
|
||||
let thread_pool = get_pool();
|
||||
let db = SqliteEngine::new(
|
||||
base_dir.path(),
|
||||
&IndexedDBDescription {
|
||||
name: "test_db".to_string(),
|
||||
origin: test_origin(),
|
||||
},
|
||||
thread_pool,
|
||||
)
|
||||
.unwrap();
|
||||
db.create_store("test_store", None, false)
|
||||
.expect("Failed to create store");
|
||||
// Delete the store
|
||||
db.delete_store("test_store")
|
||||
.expect("Failed to delete store");
|
||||
// Try to delete the same store again
|
||||
let result = db.delete_store("test_store");
|
||||
assert!(result.is_err());
|
||||
// Try to delete a non-existing store
|
||||
let result = db.delete_store("test_store");
|
||||
// Should work as per spec
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_async_operations() {
|
||||
fn get_channel<T>() -> (
|
||||
ipc_channel::ipc::IpcSender<T>,
|
||||
ipc_channel::ipc::IpcReceiver<T>,
|
||||
)
|
||||
where
|
||||
T: for<'de> Deserialize<'de> + Serialize,
|
||||
{
|
||||
ipc_channel::ipc::channel().unwrap()
|
||||
}
|
||||
|
||||
let base_dir = tempfile::tempdir().expect("Failed to create temp dir");
|
||||
let thread_pool = get_pool();
|
||||
let db = SqliteEngine::new(
|
||||
base_dir.path(),
|
||||
&IndexedDBDescription {
|
||||
name: "test_db".to_string(),
|
||||
origin: test_origin(),
|
||||
},
|
||||
thread_pool,
|
||||
)
|
||||
.unwrap();
|
||||
let store_name = "test_store";
|
||||
db.create_store(store_name, None, false)
|
||||
.expect("Failed to create store");
|
||||
let put = get_channel();
|
||||
let put2 = get_channel();
|
||||
let put3 = get_channel();
|
||||
let put_dup = get_channel();
|
||||
let get_item_some = get_channel();
|
||||
let get_item_none = get_channel();
|
||||
let get_all_items = get_channel();
|
||||
let count = get_channel();
|
||||
let remove = get_channel();
|
||||
let clear = get_channel();
|
||||
let rx = db.process_transaction(KvsTransaction {
|
||||
mode: IndexedDBTxnMode::Readwrite,
|
||||
requests: VecDeque::from(vec![
|
||||
KvsOperation {
|
||||
store_name: store_name.to_owned(),
|
||||
operation: AsyncOperation::ReadWrite(AsyncReadWriteOperation::PutItem {
|
||||
sender: put.0,
|
||||
key: Some(IndexedDBKeyType::Number(1.0)),
|
||||
value: vec![1, 2, 3],
|
||||
should_overwrite: false,
|
||||
}),
|
||||
},
|
||||
KvsOperation {
|
||||
store_name: store_name.to_owned(),
|
||||
operation: AsyncOperation::ReadWrite(AsyncReadWriteOperation::PutItem {
|
||||
sender: put2.0,
|
||||
key: Some(IndexedDBKeyType::String("2.0".to_string())),
|
||||
value: vec![4, 5, 6],
|
||||
should_overwrite: false,
|
||||
}),
|
||||
},
|
||||
KvsOperation {
|
||||
store_name: store_name.to_owned(),
|
||||
operation: AsyncOperation::ReadWrite(AsyncReadWriteOperation::PutItem {
|
||||
sender: put3.0,
|
||||
key: Some(IndexedDBKeyType::Array(vec![
|
||||
IndexedDBKeyType::String("3".to_string()),
|
||||
IndexedDBKeyType::Number(0.0),
|
||||
])),
|
||||
value: vec![7, 8, 9],
|
||||
should_overwrite: false,
|
||||
}),
|
||||
},
|
||||
// Try to put a duplicate key without overwrite
|
||||
KvsOperation {
|
||||
store_name: store_name.to_owned(),
|
||||
operation: AsyncOperation::ReadWrite(AsyncReadWriteOperation::PutItem {
|
||||
sender: put_dup.0,
|
||||
key: Some(IndexedDBKeyType::Number(1.0)),
|
||||
value: vec![10, 11, 12],
|
||||
should_overwrite: false,
|
||||
}),
|
||||
},
|
||||
KvsOperation {
|
||||
store_name: store_name.to_owned(),
|
||||
operation: AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetItem {
|
||||
sender: get_item_some.0,
|
||||
key_range: IndexedDBKeyRange::only(IndexedDBKeyType::Number(1.0)),
|
||||
}),
|
||||
},
|
||||
KvsOperation {
|
||||
store_name: store_name.to_owned(),
|
||||
operation: AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetItem {
|
||||
sender: get_item_none.0,
|
||||
key_range: IndexedDBKeyRange::only(IndexedDBKeyType::Number(5.0)),
|
||||
}),
|
||||
},
|
||||
KvsOperation {
|
||||
store_name: store_name.to_owned(),
|
||||
operation: AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetAllItems {
|
||||
sender: get_all_items.0,
|
||||
key_range: IndexedDBKeyRange::lower_bound(IndexedDBKeyType::Number(0.0), false),
|
||||
count: None,
|
||||
}),
|
||||
},
|
||||
KvsOperation {
|
||||
store_name: store_name.to_owned(),
|
||||
operation: AsyncOperation::ReadOnly(AsyncReadOnlyOperation::Count {
|
||||
sender: count.0,
|
||||
key_range: IndexedDBKeyRange::only(IndexedDBKeyType::Number(1.0)),
|
||||
}),
|
||||
},
|
||||
KvsOperation {
|
||||
store_name: store_name.to_owned(),
|
||||
operation: AsyncOperation::ReadWrite(AsyncReadWriteOperation::RemoveItem {
|
||||
sender: remove.0,
|
||||
key: IndexedDBKeyType::Number(1.0),
|
||||
}),
|
||||
},
|
||||
KvsOperation {
|
||||
store_name: store_name.to_owned(),
|
||||
operation: AsyncOperation::ReadWrite(AsyncReadWriteOperation::Clear(clear.0)),
|
||||
},
|
||||
]),
|
||||
});
|
||||
let _ = rx.blocking_recv().unwrap();
|
||||
put.1.recv().unwrap().unwrap();
|
||||
put2.1.recv().unwrap().unwrap();
|
||||
put3.1.recv().unwrap().unwrap();
|
||||
let err = put_dup.1.recv().unwrap().unwrap();
|
||||
assert_eq!(err, PutItemResult::CannotOverwrite);
|
||||
let get_result = get_item_some.1.recv().unwrap();
|
||||
let value = get_result.unwrap();
|
||||
assert_eq!(value, Some(vec![1, 2, 3]));
|
||||
let get_result = get_item_none.1.recv().unwrap();
|
||||
let value = get_result.unwrap();
|
||||
assert_eq!(value, None);
|
||||
let all_items = get_all_items.1.recv().unwrap().unwrap();
|
||||
assert_eq!(all_items.len(), 3);
|
||||
// Check that all three items are present
|
||||
assert!(all_items.contains(&vec![1, 2, 3]));
|
||||
assert!(all_items.contains(&vec![4, 5, 6]));
|
||||
assert!(all_items.contains(&vec![7, 8, 9]));
|
||||
let amount = count.1.recv().unwrap().unwrap();
|
||||
assert_eq!(amount, 1);
|
||||
remove.1.recv().unwrap().unwrap();
|
||||
clear.1.recv().unwrap().unwrap();
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue