mirror of
https://github.com/servo/servo.git
synced 2025-09-30 00:29:14 +01:00
Switch indexeddb backend to sqlite and improve IPC messaging (#38187)
- Use sqlite instead of heed. (one indexed database = one sqlite database) - Implement the backend for indexes - Use keyranges where needed (as specified by the spec) - Implement `getKey` - Fix channel error messaging (led to a bunch of changes to how async requests are handled) Note: `components/net/indexeddb/engines/sqlite/serialize.rs` is unused; I can delete it if needed. Testing: Switching to sqlite eliminated many panics (exposing some new failures). Fixes: #38040 --------- Signed-off-by: Ashwin Naren <arihant2math@gmail.com>
This commit is contained in:
parent
f4bbdf8010
commit
fc3feceee5
59 changed files with 2002 additions and 818 deletions
|
@ -36,7 +36,6 @@ futures-core = { version = "0.3.30", default-features = false }
|
|||
futures-util = { version = "0.3.30", default-features = false }
|
||||
generic-array = "0.14"
|
||||
headers = { workspace = true }
|
||||
heed = "0.20"
|
||||
http = { workspace = true }
|
||||
http-body-util = { workspace = true }
|
||||
hyper = { workspace = true, features = ["client", "http1", "http2"] }
|
||||
|
@ -60,6 +59,8 @@ rustls = { workspace = true }
|
|||
rustls-pemfile = { workspace = true }
|
||||
rustls-pki-types = { workspace = true }
|
||||
resvg = { workspace = true }
|
||||
rusqlite = { version = "0.37", features = ["bundled"] }
|
||||
sea-query = { version = "0.32", default-features = false, features = ["derive", "backend-sqlite"] }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
servo_arc = { workspace = true }
|
||||
|
@ -86,6 +87,7 @@ futures = { version = "0.3", features = ["compat"] }
|
|||
hyper = { workspace = true, features = ["full"] }
|
||||
hyper-util = { workspace = true, features = ["server-graceful"] }
|
||||
rustls = { workspace = true, features = ["aws-lc-rs"] }
|
||||
tempfile = "3"
|
||||
|
||||
[[test]]
|
||||
name = "main"
|
||||
|
|
|
@ -1,304 +0,0 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use heed::types::*;
|
||||
use heed::{Database, Env, EnvOpenOptions};
|
||||
use log::warn;
|
||||
use net_traits::indexeddb_thread::{
|
||||
AsyncOperation, AsyncReadOnlyOperation, AsyncReadWriteOperation, IdbResult, IndexedDBTxnMode,
|
||||
};
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
use super::{KvsEngine, KvsTransaction, SanitizedName};
|
||||
use crate::resource_thread::CoreResourceThreadPool;
|
||||
|
||||
type HeedDatabase = Database<Bytes, Bytes>;
|
||||
|
||||
// A simple store that also has a key generator that can be used if no key
|
||||
// is provided for the stored objects
|
||||
#[derive(Clone)]
|
||||
struct Store {
|
||||
inner: HeedDatabase,
|
||||
// https://www.w3.org/TR/IndexedDB-2/#key-generator
|
||||
key_generator: Option<u64>,
|
||||
}
|
||||
|
||||
pub struct HeedEngine {
|
||||
heed_env: Arc<Env>,
|
||||
open_stores: Arc<RwLock<HashMap<SanitizedName, Store>>>,
|
||||
read_pool: Arc<CoreResourceThreadPool>,
|
||||
write_pool: Arc<CoreResourceThreadPool>,
|
||||
}
|
||||
|
||||
impl HeedEngine {
|
||||
pub fn new(
|
||||
base_dir: &Path,
|
||||
db_file_name: &Path,
|
||||
thread_pool: Arc<CoreResourceThreadPool>,
|
||||
) -> Self {
|
||||
let mut db_dir = PathBuf::new();
|
||||
db_dir.push(base_dir);
|
||||
db_dir.push(db_file_name);
|
||||
|
||||
std::fs::create_dir_all(&db_dir).expect("Could not create OS directory for idb");
|
||||
// FIXME:(arihant2math) gracefully handle errors like hitting max dbs
|
||||
#[allow(unsafe_code)]
|
||||
let env = unsafe {
|
||||
EnvOpenOptions::new()
|
||||
.max_dbs(1024)
|
||||
.open(db_dir)
|
||||
.expect("Failed to open db_dir")
|
||||
};
|
||||
Self {
|
||||
heed_env: Arc::new(env),
|
||||
open_stores: Arc::new(RwLock::new(HashMap::new())),
|
||||
read_pool: thread_pool.clone(),
|
||||
write_pool: thread_pool,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl KvsEngine for HeedEngine {
|
||||
type Error = heed::Error;
|
||||
|
||||
fn create_store(&self, store_name: SanitizedName, auto_increment: bool) -> heed::Result<()> {
|
||||
let mut write_txn = self.heed_env.write_txn()?;
|
||||
let _ = self.heed_env.clear_stale_readers();
|
||||
let new_store: HeedDatabase = self
|
||||
.heed_env
|
||||
.create_database(&mut write_txn, Some(&*store_name.to_string()))?;
|
||||
|
||||
write_txn.commit()?;
|
||||
|
||||
let key_generator = { if auto_increment { Some(0) } else { None } };
|
||||
|
||||
let store = Store {
|
||||
inner: new_store,
|
||||
key_generator,
|
||||
};
|
||||
|
||||
self.open_stores
|
||||
.write()
|
||||
.expect("Could not acquire lock on stores")
|
||||
.insert(store_name, store);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn delete_store(&self, store_name: SanitizedName) -> heed::Result<()> {
|
||||
// TODO: Actually delete store instead of just clearing it
|
||||
let mut write_txn = self.heed_env.write_txn()?;
|
||||
let store: HeedDatabase = self
|
||||
.heed_env
|
||||
.create_database(&mut write_txn, Some(&*store_name.to_string()))?;
|
||||
store.clear(&mut write_txn)?;
|
||||
write_txn.commit()?;
|
||||
|
||||
let mut open_stores = self.open_stores.write().unwrap();
|
||||
open_stores.retain(|key, _| key != &store_name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn close_store(&self, store_name: SanitizedName) -> heed::Result<()> {
|
||||
// FIXME: (arihant2math) unused
|
||||
// FIXME:(arihant2math) return error if no store ...
|
||||
let mut open_stores = self.open_stores.write().unwrap();
|
||||
open_stores.retain(|key, _| key != &store_name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Starts a transaction, processes all operations for that transaction,
|
||||
// and commits the changes.
|
||||
fn process_transaction(
|
||||
&self,
|
||||
transaction: KvsTransaction,
|
||||
) -> oneshot::Receiver<Option<Vec<u8>>> {
|
||||
// This executes in a thread pool, and `readwrite` transactions
|
||||
// will block their thread if the writer is occupied, so we can
|
||||
// probably do some smart things here in order to optimize.
|
||||
// Queueing 8 writers will for example block 7 threads,
|
||||
// so write operations are reserved for just one thread,
|
||||
// so that the rest of the threads can work in parallel with read txns.
|
||||
let heed_env = self.heed_env.clone();
|
||||
let stores = self.open_stores.clone();
|
||||
|
||||
let (tx, rx) = oneshot::channel();
|
||||
if let IndexedDBTxnMode::Readonly = transaction.mode {
|
||||
self.read_pool.spawn(move || {
|
||||
let env = heed_env;
|
||||
let rtxn = env.read_txn().expect("Could not create idb store reader");
|
||||
let mut results = vec![];
|
||||
for request in transaction.requests {
|
||||
match request.operation {
|
||||
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetItem(key)) => {
|
||||
let key: Vec<u8> = bincode::serialize(&key).unwrap();
|
||||
let stores = stores
|
||||
.read()
|
||||
.expect("Could not acquire read lock on stores");
|
||||
let store = stores
|
||||
.get(&request.store_name)
|
||||
.expect("Could not get store");
|
||||
let result = store.inner.get(&rtxn, &key).expect("Could not get item");
|
||||
|
||||
if let Some(blob) = result {
|
||||
results
|
||||
.push((request.sender, Some(IdbResult::Data(blob.to_vec()))));
|
||||
} else {
|
||||
results.push((request.sender, None));
|
||||
}
|
||||
},
|
||||
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::Count(key)) => {
|
||||
let _key: Vec<u8> = bincode::serialize(&key).unwrap();
|
||||
let stores = stores
|
||||
.read()
|
||||
.expect("Could not acquire read lock on stores");
|
||||
let _store = stores
|
||||
.get(&request.store_name)
|
||||
.expect("Could not get store");
|
||||
// FIXME:(arihant2math) Return count with sender
|
||||
},
|
||||
AsyncOperation::ReadWrite(..) => {
|
||||
// We cannot reach this, as checks are made earlier so that
|
||||
// no modifying requests are executed on readonly transactions
|
||||
unreachable!(
|
||||
"Cannot execute modifying request with readonly transactions"
|
||||
);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if tx.send(None).is_err() {
|
||||
warn!("IDBTransaction's execution channel is dropped");
|
||||
};
|
||||
|
||||
if let Err(e) = rtxn.commit() {
|
||||
warn!("Error committing transaction: {:?}", e);
|
||||
for (sender, _) in results {
|
||||
let _ = sender.send(Err(()));
|
||||
}
|
||||
} else {
|
||||
for (sender, result) in results {
|
||||
let _ = sender.send(Ok(result));
|
||||
}
|
||||
}
|
||||
});
|
||||
} else {
|
||||
self.write_pool.spawn(move || {
|
||||
// Acquiring a writer will block the thread if another `readwrite` transaction is active
|
||||
let env = heed_env;
|
||||
let mut wtxn = env.write_txn().expect("Could not create idb store writer");
|
||||
let mut results = vec![];
|
||||
for request in transaction.requests {
|
||||
match request.operation {
|
||||
AsyncOperation::ReadWrite(AsyncReadWriteOperation::PutItem(
|
||||
key,
|
||||
value,
|
||||
overwrite,
|
||||
)) => {
|
||||
let serialized_key: Vec<u8> = bincode::serialize(&key).unwrap();
|
||||
let stores = stores
|
||||
.write()
|
||||
.expect("Could not acquire write lock on stores");
|
||||
let store = stores
|
||||
.get(&request.store_name)
|
||||
.expect("Could not get store");
|
||||
if overwrite ||
|
||||
store
|
||||
.inner
|
||||
.get(&wtxn, &serialized_key)
|
||||
.expect("Could not get item")
|
||||
.is_none()
|
||||
{
|
||||
let result = store
|
||||
.inner
|
||||
.put(&mut wtxn, &serialized_key, &value)
|
||||
.ok()
|
||||
.and(Some(IdbResult::Key(key)));
|
||||
results.push((request.sender, result));
|
||||
} else {
|
||||
results.push((request.sender, None));
|
||||
}
|
||||
},
|
||||
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetItem(key)) => {
|
||||
let key: Vec<u8> = bincode::serialize(&key).unwrap();
|
||||
let stores = stores
|
||||
.read()
|
||||
.expect("Could not acquire write lock on stores");
|
||||
let store = stores
|
||||
.get(&request.store_name)
|
||||
.expect("Could not get store");
|
||||
let result = store.inner.get(&wtxn, &key).expect("Could not get item");
|
||||
|
||||
results.push((
|
||||
request.sender,
|
||||
result.map(|blob| IdbResult::Data(blob.to_vec())),
|
||||
));
|
||||
},
|
||||
AsyncOperation::ReadWrite(AsyncReadWriteOperation::RemoveItem(key)) => {
|
||||
let serialized_key: Vec<u8> = bincode::serialize(&key).unwrap();
|
||||
let stores = stores
|
||||
.write()
|
||||
.expect("Could not acquire write lock on stores");
|
||||
let store = stores
|
||||
.get(&request.store_name)
|
||||
.expect("Could not get store");
|
||||
let result = store
|
||||
.inner
|
||||
.delete(&mut wtxn, &serialized_key)
|
||||
.ok()
|
||||
.and(Some(IdbResult::Key(key)));
|
||||
results.push((request.sender, result));
|
||||
},
|
||||
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::Count(key)) => {
|
||||
let _key: Vec<u8> = bincode::serialize(&key).unwrap();
|
||||
let stores = stores
|
||||
.read()
|
||||
.expect("Could not acquire read lock on stores");
|
||||
let _store = stores
|
||||
.get(&request.store_name)
|
||||
.expect("Could not get store");
|
||||
// FIXME:(arihant2math) Return count with sender
|
||||
},
|
||||
AsyncOperation::ReadWrite(AsyncReadWriteOperation::Clear) => {
|
||||
let stores = stores
|
||||
.write()
|
||||
.expect("Could not acquire write lock on stores");
|
||||
let store = stores
|
||||
.get(&request.store_name)
|
||||
.expect("Could not get store");
|
||||
// FIXME:(arihant2math) Error handling
|
||||
let _ = store.inner.clear(&mut wtxn);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(e) = wtxn.commit() {
|
||||
warn!("Error committing to database: {:?}", e);
|
||||
for (sender, _) in results {
|
||||
let _ = sender.send(Err(()));
|
||||
}
|
||||
} else {
|
||||
for (sender, result) in results {
|
||||
let _ = sender.send(Ok(result));
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
rx
|
||||
}
|
||||
|
||||
fn has_key_generator(&self, store_name: SanitizedName) -> bool {
|
||||
let has_generator = self
|
||||
.open_stores
|
||||
.read()
|
||||
.expect("Could not acquire read lock on stores")
|
||||
.get(&store_name)
|
||||
.expect("Store not found")
|
||||
.key_generator
|
||||
.is_some();
|
||||
has_generator
|
||||
}
|
||||
}
|
|
@ -4,15 +4,14 @@
|
|||
|
||||
use std::collections::VecDeque;
|
||||
|
||||
use ipc_channel::ipc::IpcSender;
|
||||
use net_traits::indexeddb_thread::{AsyncOperation, IdbResult, IndexedDBTxnMode};
|
||||
use net_traits::indexeddb_thread::{AsyncOperation, CreateObjectResult, IndexedDBTxnMode, KeyPath};
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
pub use self::heed::HeedEngine;
|
||||
pub use self::sqlite::SqliteEngine;
|
||||
|
||||
mod heed;
|
||||
mod sqlite;
|
||||
|
||||
#[derive(Eq, Hash, PartialEq)]
|
||||
#[derive(Clone, Eq, Hash, PartialEq)]
|
||||
pub struct SanitizedName {
|
||||
name: String,
|
||||
}
|
||||
|
@ -46,34 +45,57 @@ impl std::fmt::Display for SanitizedName {
|
|||
}
|
||||
|
||||
pub struct KvsOperation {
|
||||
pub sender: IpcSender<Result<Option<IdbResult>, ()>>,
|
||||
pub store_name: SanitizedName,
|
||||
pub operation: AsyncOperation,
|
||||
}
|
||||
|
||||
pub struct KvsTransaction {
|
||||
// Mode could be used by a more optimal implementation of transactions
|
||||
// that has different allocated threadpools for reading and writing
|
||||
#[allow(unused)]
|
||||
pub mode: IndexedDBTxnMode,
|
||||
pub requests: VecDeque<KvsOperation>,
|
||||
}
|
||||
|
||||
pub trait KvsEngine {
|
||||
type Error;
|
||||
type Error: std::error::Error;
|
||||
|
||||
fn create_store(
|
||||
&self,
|
||||
store_name: SanitizedName,
|
||||
key_path: Option<KeyPath>,
|
||||
auto_increment: bool,
|
||||
) -> Result<(), Self::Error>;
|
||||
) -> Result<CreateObjectResult, Self::Error>;
|
||||
|
||||
fn delete_store(&self, store_name: SanitizedName) -> Result<(), Self::Error>;
|
||||
|
||||
#[expect(dead_code)]
|
||||
fn close_store(&self, store_name: SanitizedName) -> Result<(), Self::Error>;
|
||||
|
||||
fn delete_database(self) -> Result<(), Self::Error>;
|
||||
|
||||
fn process_transaction(
|
||||
&self,
|
||||
transaction: KvsTransaction,
|
||||
) -> oneshot::Receiver<Option<Vec<u8>>>;
|
||||
|
||||
fn has_key_generator(&self, store_name: SanitizedName) -> bool;
|
||||
fn key_path(&self, store_name: SanitizedName) -> Option<KeyPath>;
|
||||
|
||||
fn create_index(
|
||||
&self,
|
||||
store_name: SanitizedName,
|
||||
index_name: String,
|
||||
key_path: KeyPath,
|
||||
unique: bool,
|
||||
multi_entry: bool,
|
||||
) -> Result<CreateObjectResult, Self::Error>;
|
||||
fn delete_index(
|
||||
&self,
|
||||
store_name: SanitizedName,
|
||||
index_name: String,
|
||||
) -> Result<(), Self::Error>;
|
||||
|
||||
fn version(&self) -> Result<u64, Self::Error>;
|
||||
fn set_version(&self, version: u64) -> Result<(), Self::Error>;
|
||||
}
|
||||
|
|
713
components/net/indexeddb/engines/sqlite.rs
Normal file
713
components/net/indexeddb/engines/sqlite.rs
Normal file
|
@ -0,0 +1,713 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use ipc_channel::ipc::IpcSender;
|
||||
use log::{error, info};
|
||||
use net_traits::indexeddb_thread::{
|
||||
AsyncOperation, AsyncReadOnlyOperation, AsyncReadWriteOperation, BackendError, BackendResult,
|
||||
CreateObjectResult, IndexedDBKeyRange, IndexedDBTxnMode, KeyPath, PutItemResult,
|
||||
};
|
||||
use rusqlite::{Connection, Error, OptionalExtension, params};
|
||||
use sea_query::{Condition, Expr, ExprTrait, IntoCondition, SqliteQueryBuilder};
|
||||
use serde::Serialize;
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
use crate::indexeddb::engines::{KvsEngine, KvsTransaction, SanitizedName};
|
||||
use crate::indexeddb::idb_thread::IndexedDBDescription;
|
||||
use crate::resource_thread::CoreResourceThreadPool;
|
||||
|
||||
mod create;
|
||||
mod database_model;
|
||||
mod object_data_model;
|
||||
mod object_store_index_model;
|
||||
mod object_store_model;
|
||||
|
||||
// These pragmas need to be set once
|
||||
const DB_INIT_PRAGMAS: [&str; 2] = ["PRAGMA journal_mode = WAL;", "PRAGMA encoding = 'UTF-16';"];
|
||||
|
||||
// These pragmas need to be run once per connection.
|
||||
const DB_PRAGMAS: [&str; 4] = [
|
||||
"PRAGMA synchronous = NORMAL;",
|
||||
"PRAGMA journal_size_limit = 67108864 -- 64 megabytes;",
|
||||
"PRAGMA mmap_size = 67108864 -- 64 megabytes;",
|
||||
"PRAGMA cache_size = 2000;",
|
||||
];
|
||||
|
||||
fn range_to_query(range: IndexedDBKeyRange) -> Condition {
|
||||
// Special case for optimization
|
||||
if let Some(singleton) = range.as_singleton() {
|
||||
let encoded = bincode::serialize(singleton).unwrap();
|
||||
return Expr::column(object_data_model::Column::Data)
|
||||
.eq(encoded)
|
||||
.into_condition();
|
||||
}
|
||||
let mut parts = vec![];
|
||||
if let Some(upper) = range.upper.as_ref() {
|
||||
let upper_bytes = bincode::serialize(upper).unwrap();
|
||||
let query = if range.upper_open {
|
||||
Expr::column(object_data_model::Column::Data).lt(upper_bytes)
|
||||
} else {
|
||||
Expr::column(object_data_model::Column::Data).lte(upper_bytes)
|
||||
};
|
||||
parts.push(query);
|
||||
}
|
||||
if let Some(lower) = range.lower.as_ref() {
|
||||
let lower_bytes = bincode::serialize(lower).unwrap();
|
||||
let query = if range.upper_open {
|
||||
Expr::column(object_data_model::Column::Data).gt(lower_bytes)
|
||||
} else {
|
||||
Expr::column(object_data_model::Column::Data).gte(lower_bytes)
|
||||
};
|
||||
parts.push(query);
|
||||
}
|
||||
let mut condition = Condition::all();
|
||||
for part in parts {
|
||||
condition = condition.add(part);
|
||||
}
|
||||
condition
|
||||
}
|
||||
|
||||
pub struct SqliteEngine {
|
||||
db_path: PathBuf,
|
||||
connection: Connection,
|
||||
read_pool: Arc<CoreResourceThreadPool>,
|
||||
write_pool: Arc<CoreResourceThreadPool>,
|
||||
}
|
||||
|
||||
impl SqliteEngine {
|
||||
// TODO: intake dual pools
|
||||
pub fn new(
|
||||
base_dir: &Path,
|
||||
db_info: &IndexedDBDescription,
|
||||
version: u64,
|
||||
pool: Arc<CoreResourceThreadPool>,
|
||||
) -> Result<Self, Error> {
|
||||
let mut db_path = PathBuf::new();
|
||||
db_path.push(base_dir);
|
||||
db_path.push(db_info.as_path());
|
||||
let db_parent = db_path.clone();
|
||||
db_path.push("db.sqlite");
|
||||
|
||||
if !db_path.exists() {
|
||||
std::fs::create_dir_all(db_parent).unwrap();
|
||||
std::fs::File::create(&db_path).unwrap();
|
||||
}
|
||||
let connection = Self::init_db(&db_path, db_info, version)?;
|
||||
|
||||
for stmt in DB_PRAGMAS {
|
||||
// TODO: Handle errors properly
|
||||
let _ = connection.execute(stmt, ());
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
connection,
|
||||
db_path,
|
||||
read_pool: pool.clone(),
|
||||
write_pool: pool,
|
||||
})
|
||||
}
|
||||
|
||||
fn init_db(
|
||||
path: &Path,
|
||||
db_info: &IndexedDBDescription,
|
||||
version: u64,
|
||||
) -> Result<Connection, Error> {
|
||||
let connection = Connection::open(path)?;
|
||||
if connection.table_exists(None, "database")? {
|
||||
// Database already exists, no need to initialize
|
||||
return Ok(connection);
|
||||
}
|
||||
info!("Initializing indexeddb database at {:?}", path);
|
||||
for stmt in DB_INIT_PRAGMAS {
|
||||
// FIXME(arihant2math): this fails occasionally
|
||||
let _ = connection.execute(stmt, ());
|
||||
}
|
||||
create::create_tables(&connection)?;
|
||||
connection.execute(
|
||||
"INSERT INTO database (name, origin, version) VALUES (?, ?, ?)",
|
||||
params![
|
||||
db_info.name.to_owned(),
|
||||
db_info.origin.to_owned().ascii_serialization(),
|
||||
i64::from_ne_bytes(version.to_ne_bytes())
|
||||
],
|
||||
)?;
|
||||
Ok(connection)
|
||||
}
|
||||
|
||||
fn get(
|
||||
connection: &Connection,
|
||||
store: object_store_model::Model,
|
||||
key_range: IndexedDBKeyRange,
|
||||
) -> Result<Option<object_data_model::Model>, Error> {
|
||||
let query = range_to_query(key_range);
|
||||
let stmt = sea_query::Query::select()
|
||||
.from(object_data_model::Column::Table)
|
||||
.and_where(query.and(Expr::col(object_data_model::Column::ObjectStoreId).is(store.id)))
|
||||
.to_owned();
|
||||
connection
|
||||
.prepare(&stmt.build(SqliteQueryBuilder).0)?
|
||||
.query_one((), |row| object_data_model::Model::try_from(row))
|
||||
.optional()
|
||||
}
|
||||
|
||||
fn get_key(
|
||||
connection: &Connection,
|
||||
store: object_store_model::Model,
|
||||
key_range: IndexedDBKeyRange,
|
||||
) -> Result<Option<Vec<u8>>, Error> {
|
||||
Self::get(connection, store, key_range).map(|opt| opt.map(|model| model.key))
|
||||
}
|
||||
|
||||
fn get_item(
|
||||
connection: &Connection,
|
||||
store: object_store_model::Model,
|
||||
key_range: IndexedDBKeyRange,
|
||||
) -> Result<Option<Vec<u8>>, Error> {
|
||||
Self::get(connection, store, key_range).map(|opt| opt.map(|model| model.data))
|
||||
}
|
||||
|
||||
fn put_item(
|
||||
connection: &Connection,
|
||||
store: object_store_model::Model,
|
||||
serialized_key: Vec<u8>,
|
||||
value: Vec<u8>,
|
||||
should_overwrite: bool,
|
||||
) -> Result<PutItemResult, Error> {
|
||||
let existing_item = connection
|
||||
.prepare("SELECT * FROM object_data WHERE key = ? AND object_store_id = ?")
|
||||
.and_then(|mut stmt| {
|
||||
stmt.query_row(params![serialized_key, store.id], |row| {
|
||||
object_data_model::Model::try_from(row)
|
||||
})
|
||||
.optional()
|
||||
})?;
|
||||
if should_overwrite || existing_item.is_none() {
|
||||
connection.execute(
|
||||
"INSERT INTO object_data (object_store_id, key, data) VALUES (?, ?, ?)",
|
||||
params![store.id, serialized_key, value],
|
||||
)?;
|
||||
Ok(PutItemResult::Success)
|
||||
} else {
|
||||
Ok(PutItemResult::CannotOverwrite)
|
||||
}
|
||||
}
|
||||
|
||||
fn delete_item(
|
||||
connection: &Connection,
|
||||
store: object_store_model::Model,
|
||||
serialized_key: Vec<u8>,
|
||||
) -> Result<(), Error> {
|
||||
connection.execute(
|
||||
"DELETE FROM object_data WHERE key = ? AND object_store_id = ?",
|
||||
params![serialized_key, store.id],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn clear(connection: &Connection, store: object_store_model::Model) -> Result<(), Error> {
|
||||
connection.execute(
|
||||
"DELETE FROM object_data WHERE object_store_id = ?",
|
||||
params![store.id],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn count(
|
||||
connection: &Connection,
|
||||
store: object_store_model::Model,
|
||||
key_range: IndexedDBKeyRange,
|
||||
) -> Result<usize, Error> {
|
||||
let query = range_to_query(key_range);
|
||||
let count = sea_query::Query::select()
|
||||
.expr(Expr::col(object_data_model::Column::Key).count())
|
||||
.from(object_data_model::Column::Table)
|
||||
.and_where(query.and(Expr::col(object_data_model::Column::ObjectStoreId).is(store.id)))
|
||||
.to_owned();
|
||||
connection
|
||||
.prepare(&count.build(SqliteQueryBuilder).0)?
|
||||
.query_row((), |row| row.get(0))
|
||||
.map(|count: i64| count as usize)
|
||||
}
|
||||
}
|
||||
|
||||
impl KvsEngine for SqliteEngine {
|
||||
type Error = Error;
|
||||
|
||||
fn create_store(
|
||||
&self,
|
||||
store_name: SanitizedName,
|
||||
key_path: Option<KeyPath>,
|
||||
auto_increment: bool,
|
||||
) -> Result<CreateObjectResult, Self::Error> {
|
||||
let mut stmt = self
|
||||
.connection
|
||||
.prepare("SELECT 1 FROM object_store WHERE name = ?")?;
|
||||
if stmt.exists(params![store_name.to_string()])? {
|
||||
// Store already exists
|
||||
return Ok(CreateObjectResult::AlreadyExists);
|
||||
}
|
||||
self.connection.execute(
|
||||
"INSERT INTO object_store (name, key_path, auto_increment) VALUES (?, ?, ?)",
|
||||
params![
|
||||
store_name.to_string(),
|
||||
key_path.map(|v| bincode::serialize(&v).unwrap()),
|
||||
auto_increment
|
||||
],
|
||||
)?;
|
||||
|
||||
Ok(CreateObjectResult::Created)
|
||||
}
|
||||
|
||||
fn delete_store(&self, store_name: SanitizedName) -> Result<(), Self::Error> {
|
||||
let result = self.connection.execute(
|
||||
"DELETE FROM object_store WHERE name = ?",
|
||||
params![store_name.to_string()],
|
||||
)?;
|
||||
if result == 0 {
|
||||
Err(Error::QueryReturnedNoRows)
|
||||
} else if result > 1 {
|
||||
Err(Error::QueryReturnedMoreThanOneRow)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn close_store(&self, _store_name: SanitizedName) -> Result<(), Self::Error> {
|
||||
// TODO: do something
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn delete_database(self) -> Result<(), Self::Error> {
|
||||
// attempt to close the connection first
|
||||
let _ = self.connection.close();
|
||||
if self.db_path.exists() {
|
||||
if let Err(e) = std::fs::remove_dir_all(self.db_path.parent().unwrap()) {
|
||||
error!("Failed to delete database: {:?}", e);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_transaction(
|
||||
&self,
|
||||
transaction: KvsTransaction,
|
||||
) -> oneshot::Receiver<Option<Vec<u8>>> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
let spawning_pool = if transaction.mode == IndexedDBTxnMode::Readonly {
|
||||
self.read_pool.clone()
|
||||
} else {
|
||||
self.write_pool.clone()
|
||||
};
|
||||
let path = self.db_path.clone();
|
||||
spawning_pool.spawn(move || {
|
||||
let connection = Connection::open(path).unwrap();
|
||||
for request in transaction.requests {
|
||||
let object_store = connection
|
||||
.prepare("SELECT 1 FROM object_store WHERE name = ?")
|
||||
.and_then(|mut stmt| {
|
||||
stmt.query_row(params![request.store_name.to_string()], |row| {
|
||||
object_store_model::Model::try_from(row)
|
||||
})
|
||||
.optional()
|
||||
});
|
||||
fn process_object_store<T>(
|
||||
object_store: Result<Option<object_store_model::Model>, Error>,
|
||||
sender: &IpcSender<BackendResult<T>>,
|
||||
) -> Result<object_store_model::Model, ()>
|
||||
where
|
||||
T: Serialize,
|
||||
{
|
||||
match object_store {
|
||||
Ok(Some(store)) => Ok(store),
|
||||
Ok(None) => {
|
||||
let _ = sender.send(Err(BackendError::StoreNotFound));
|
||||
Err(())
|
||||
},
|
||||
Err(e) => {
|
||||
let _ = sender.send(Err(BackendError::DbErr(format!("{:?}", e))));
|
||||
Err(())
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
match request.operation {
|
||||
AsyncOperation::ReadWrite(AsyncReadWriteOperation::PutItem {
|
||||
sender,
|
||||
key,
|
||||
value,
|
||||
should_overwrite,
|
||||
}) => {
|
||||
let Ok(object_store) = process_object_store(object_store, &sender) else {
|
||||
continue;
|
||||
};
|
||||
let serialized_key: Vec<u8> = bincode::serialize(&key).unwrap();
|
||||
let _ = sender.send(
|
||||
Self::put_item(
|
||||
&connection,
|
||||
object_store,
|
||||
serialized_key,
|
||||
value,
|
||||
should_overwrite,
|
||||
)
|
||||
.map_err(|e| BackendError::DbErr(format!("{:?}", e))),
|
||||
);
|
||||
},
|
||||
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetItem {
|
||||
sender,
|
||||
key_range,
|
||||
}) => {
|
||||
let Ok(object_store) = process_object_store(object_store, &sender) else {
|
||||
continue;
|
||||
};
|
||||
let _ = sender.send(
|
||||
Self::get_item(&connection, object_store, key_range)
|
||||
.map_err(|e| BackendError::DbErr(format!("{:?}", e))),
|
||||
);
|
||||
},
|
||||
AsyncOperation::ReadWrite(AsyncReadWriteOperation::RemoveItem {
|
||||
sender,
|
||||
key,
|
||||
}) => {
|
||||
let Ok(object_store) = process_object_store(object_store, &sender) else {
|
||||
continue;
|
||||
};
|
||||
let serialized_key: Vec<u8> = bincode::serialize(&key).unwrap();
|
||||
let _ = sender.send(
|
||||
Self::delete_item(&connection, object_store, serialized_key)
|
||||
.map_err(|e| BackendError::DbErr(format!("{:?}", e))),
|
||||
);
|
||||
},
|
||||
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::Count {
|
||||
sender,
|
||||
key_range,
|
||||
}) => {
|
||||
let Ok(object_store) = process_object_store(object_store, &sender) else {
|
||||
continue;
|
||||
};
|
||||
let _ = sender.send(
|
||||
Self::count(&connection, object_store, key_range)
|
||||
.map(|r| r as u64)
|
||||
.map_err(|e| BackendError::DbErr(format!("{:?}", e))),
|
||||
);
|
||||
},
|
||||
AsyncOperation::ReadWrite(AsyncReadWriteOperation::Clear(sender)) => {
|
||||
let Ok(object_store) = process_object_store(object_store, &sender) else {
|
||||
continue;
|
||||
};
|
||||
let _ = sender.send(
|
||||
Self::clear(&connection, object_store)
|
||||
.map_err(|e| BackendError::DbErr(format!("{:?}", e))),
|
||||
);
|
||||
},
|
||||
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetKey {
|
||||
sender,
|
||||
key_range,
|
||||
}) => {
|
||||
let Ok(object_store) = process_object_store(object_store, &sender) else {
|
||||
continue;
|
||||
};
|
||||
let _ = sender.send(
|
||||
Self::get_key(&connection, object_store, key_range)
|
||||
.map(|key| key.map(|k| bincode::deserialize(&k).unwrap()))
|
||||
.map_err(|e| BackendError::DbErr(format!("{:?}", e))),
|
||||
);
|
||||
},
|
||||
}
|
||||
}
|
||||
let _ = tx.send(None);
|
||||
});
|
||||
rx
|
||||
}
|
||||
|
||||
// TODO: we should be able to error out here, maybe change the trait definition?
|
||||
fn has_key_generator(&self, store_name: SanitizedName) -> bool {
|
||||
self.connection
|
||||
.prepare("SELECT * FROM object_store WHERE name = ?")
|
||||
.and_then(|mut stmt| {
|
||||
stmt.query_row(params![store_name.to_string()], |r| {
|
||||
let object_store = object_store_model::Model::try_from(r).unwrap();
|
||||
Ok(object_store.auto_increment)
|
||||
})
|
||||
})
|
||||
.optional()
|
||||
.unwrap()
|
||||
// TODO: Wrong (change trait definition for this function)
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
fn key_path(&self, store_name: SanitizedName) -> Option<KeyPath> {
|
||||
self.connection
|
||||
.prepare("SELECT * FROM object_store WHERE name = ?")
|
||||
.and_then(|mut stmt| {
|
||||
stmt.query_row(params![store_name.to_string()], |r| {
|
||||
let object_store = object_store_model::Model::try_from(r).unwrap();
|
||||
Ok(object_store
|
||||
.key_path
|
||||
.map(|key_path| bincode::deserialize(&key_path).unwrap()))
|
||||
})
|
||||
})
|
||||
.optional()
|
||||
.unwrap()
|
||||
// TODO: Wrong, same issues as has_key_generator
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
fn create_index(
|
||||
&self,
|
||||
store_name: SanitizedName,
|
||||
index_name: String,
|
||||
key_path: KeyPath,
|
||||
unique: bool,
|
||||
multi_entry: bool,
|
||||
) -> Result<CreateObjectResult, Self::Error> {
|
||||
let object_store = self.connection.query_row(
|
||||
"SELECT * FROM object_store WHERE name = ?",
|
||||
params![store_name.to_string()],
|
||||
|row| object_store_model::Model::try_from(row),
|
||||
)?;
|
||||
|
||||
let index_exists: bool = self.connection.query_row(
|
||||
"SELECT EXISTS(SELECT 1 FROM object_store_index WHERE name = ? AND object_store_id = ?)",
|
||||
params![index_name.to_string(), object_store.id],
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
if index_exists {
|
||||
return Ok(CreateObjectResult::AlreadyExists);
|
||||
}
|
||||
|
||||
self.connection.execute(
|
||||
"INSERT INTO object_store_index (object_store_id, name, key_path, unique_index, multi_entry_index)\
|
||||
VALUES (?, ?, ?, ?, ?)",
|
||||
params![
|
||||
object_store.id,
|
||||
index_name.to_string(),
|
||||
bincode::serialize(&key_path).unwrap(),
|
||||
unique,
|
||||
multi_entry,
|
||||
],
|
||||
)?;
|
||||
Ok(CreateObjectResult::Created)
|
||||
}
|
||||
|
||||
fn delete_index(
|
||||
&self,
|
||||
store_name: SanitizedName,
|
||||
index_name: String,
|
||||
) -> Result<(), Self::Error> {
|
||||
let object_store = self.connection.query_row(
|
||||
"SELECT * FROM object_store WHERE name = ?",
|
||||
params![store_name.to_string()],
|
||||
|r| Ok(object_store_model::Model::try_from(r).unwrap()),
|
||||
)?;
|
||||
|
||||
// Delete the index if it exists
|
||||
let _ = self.connection.execute(
|
||||
"DELETE FROM object_store_index WHERE name = ? AND object_store_id = ?",
|
||||
params![index_name.to_string(), object_store.id],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn version(&self) -> Result<u64, Self::Error> {
|
||||
let version: i64 =
|
||||
self.connection
|
||||
.query_row("SELECT version FROM database LIMIT 1", [], |row| row.get(0))?;
|
||||
Ok(u64::from_ne_bytes(version.to_ne_bytes()))
|
||||
}
|
||||
|
||||
fn set_version(&self, version: u64) -> Result<(), Self::Error> {
|
||||
let rows_affected = self.connection.execute(
|
||||
"UPDATE database SET version = ?",
|
||||
params![i64::from_ne_bytes(version.to_ne_bytes())],
|
||||
)?;
|
||||
if rows_affected == 0 {
|
||||
return Err(Error::QueryReturnedNoRows);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::VecDeque;
|
||||
use std::sync::Arc;
|
||||
|
||||
use net_traits::indexeddb_thread::{
|
||||
AsyncOperation, AsyncReadWriteOperation, CreateObjectResult, IndexedDBKeyType,
|
||||
IndexedDBTxnMode, KeyPath,
|
||||
};
|
||||
use servo_url::ImmutableOrigin;
|
||||
use url::Host;
|
||||
|
||||
use crate::indexeddb::engines::{KvsEngine, KvsOperation, KvsTransaction, SanitizedName};
|
||||
use crate::indexeddb::idb_thread::IndexedDBDescription;
|
||||
use crate::resource_thread::CoreResourceThreadPool;
|
||||
|
||||
fn test_origin() -> ImmutableOrigin {
|
||||
ImmutableOrigin::Tuple(
|
||||
"test_origin".to_string(),
|
||||
Host::Domain("localhost".to_string()),
|
||||
80,
|
||||
)
|
||||
}
|
||||
|
||||
fn get_pool() -> Arc<CoreResourceThreadPool> {
|
||||
Arc::new(CoreResourceThreadPool::new(1, "test".to_string()))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cycle() {
|
||||
let base_dir = tempfile::tempdir().expect("Failed to create temp dir");
|
||||
let thread_pool = get_pool();
|
||||
// Test create
|
||||
let _ = super::SqliteEngine::new(
|
||||
base_dir.path(),
|
||||
&IndexedDBDescription {
|
||||
name: "test_db".to_string(),
|
||||
origin: test_origin(),
|
||||
},
|
||||
1,
|
||||
thread_pool.clone(),
|
||||
)
|
||||
.unwrap();
|
||||
// Test open
|
||||
let db = super::SqliteEngine::new(
|
||||
base_dir.path(),
|
||||
&IndexedDBDescription {
|
||||
name: "test_db".to_string(),
|
||||
origin: test_origin(),
|
||||
},
|
||||
1,
|
||||
thread_pool.clone(),
|
||||
)
|
||||
.unwrap();
|
||||
let version = db.version().expect("Failed to get version");
|
||||
assert_eq!(version, 1);
|
||||
db.set_version(5).unwrap();
|
||||
let new_version = db.version().expect("Failed to get new version");
|
||||
assert_eq!(new_version, 5);
|
||||
db.delete_database().expect("Failed to delete database");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_store() {
|
||||
let base_dir = tempfile::tempdir().expect("Failed to create temp dir");
|
||||
let thread_pool = get_pool();
|
||||
let db = super::SqliteEngine::new(
|
||||
base_dir.path(),
|
||||
&IndexedDBDescription {
|
||||
name: "test_db".to_string(),
|
||||
origin: test_origin(),
|
||||
},
|
||||
1,
|
||||
thread_pool,
|
||||
)
|
||||
.unwrap();
|
||||
let store_name = SanitizedName::new("test_store".to_string());
|
||||
let result = db.create_store(store_name.clone(), None, true);
|
||||
assert!(result.is_ok());
|
||||
let create_result = result.unwrap();
|
||||
assert_eq!(create_result, CreateObjectResult::Created);
|
||||
// Try to create the same store again
|
||||
let result = db.create_store(store_name.clone(), None, false);
|
||||
assert!(result.is_ok());
|
||||
let create_result = result.unwrap();
|
||||
assert_eq!(create_result, CreateObjectResult::AlreadyExists);
|
||||
// Ensure store was not overwritten
|
||||
assert!(db.has_key_generator(store_name));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_key_path() {
|
||||
let base_dir = tempfile::tempdir().expect("Failed to create temp dir");
|
||||
let thread_pool = get_pool();
|
||||
let db = super::SqliteEngine::new(
|
||||
base_dir.path(),
|
||||
&IndexedDBDescription {
|
||||
name: "test_db".to_string(),
|
||||
origin: test_origin(),
|
||||
},
|
||||
1,
|
||||
thread_pool,
|
||||
)
|
||||
.unwrap();
|
||||
let store_name = SanitizedName::new("test_store".to_string());
|
||||
let result = db.create_store(
|
||||
store_name.clone(),
|
||||
Some(KeyPath::String("test".to_string())),
|
||||
true,
|
||||
);
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(
|
||||
db.key_path(store_name),
|
||||
Some(KeyPath::String("test".to_string()))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_delete_store() {
|
||||
let base_dir = tempfile::tempdir().expect("Failed to create temp dir");
|
||||
let thread_pool = get_pool();
|
||||
let db = super::SqliteEngine::new(
|
||||
base_dir.path(),
|
||||
&IndexedDBDescription {
|
||||
name: "test_db".to_string(),
|
||||
origin: test_origin(),
|
||||
},
|
||||
1,
|
||||
thread_pool,
|
||||
)
|
||||
.unwrap();
|
||||
db.create_store(SanitizedName::new("test_store".to_string()), None, false)
|
||||
.expect("Failed to create store");
|
||||
// Delete the store
|
||||
db.delete_store(SanitizedName::new("test_store".to_string()))
|
||||
.expect("Failed to delete store");
|
||||
// Try to delete the same store again
|
||||
let result = db.delete_store(SanitizedName::new("test_store".into()));
|
||||
assert!(result.is_err());
|
||||
// Try to delete a non-existing store
|
||||
let result = db.delete_store(SanitizedName::new("test_store".into()));
|
||||
// Should work as per spec
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_async_operations() {
|
||||
let base_dir = tempfile::tempdir().expect("Failed to create temp dir");
|
||||
let thread_pool = get_pool();
|
||||
let db = super::SqliteEngine::new(
|
||||
base_dir.path(),
|
||||
&IndexedDBDescription {
|
||||
name: "test_db".to_string(),
|
||||
origin: test_origin(),
|
||||
},
|
||||
1,
|
||||
thread_pool,
|
||||
)
|
||||
.unwrap();
|
||||
let store_name = SanitizedName::new("test_store".to_string());
|
||||
db.create_store(store_name.clone(), None, false)
|
||||
.expect("Failed to create store");
|
||||
let rx = db.process_transaction(KvsTransaction {
|
||||
mode: IndexedDBTxnMode::Readwrite,
|
||||
requests: VecDeque::from(vec![
|
||||
// TODO: Test other operations
|
||||
KvsOperation {
|
||||
store_name: store_name.clone(),
|
||||
operation: AsyncOperation::ReadWrite(AsyncReadWriteOperation::PutItem {
|
||||
sender: ipc_channel::ipc::channel().unwrap().0,
|
||||
key: IndexedDBKeyType::Number(1.0),
|
||||
value: vec![],
|
||||
should_overwrite: false,
|
||||
}),
|
||||
},
|
||||
]),
|
||||
});
|
||||
let _ = rx.blocking_recv().unwrap();
|
||||
}
|
||||
}
|
59
components/net/indexeddb/engines/sqlite/create.rs
Normal file
59
components/net/indexeddb/engines/sqlite/create.rs
Normal file
|
@ -0,0 +1,59 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
|
||||
pub(crate) fn create_tables(conn: &rusqlite::Connection) -> Result<(), rusqlite::Error> {
|
||||
conn.execute(
|
||||
r#"create table database
|
||||
(
|
||||
name varchar not null
|
||||
primary key,
|
||||
origin varchar not null,
|
||||
version bigint default 0 not null
|
||||
);"#,
|
||||
[],
|
||||
)?;
|
||||
|
||||
conn.execute(
|
||||
r#"create table object_store
|
||||
(
|
||||
id integer not null
|
||||
primary key autoincrement,
|
||||
name varchar not null
|
||||
unique,
|
||||
key_path varbinary_blob,
|
||||
auto_increment boolean default FALSE not null
|
||||
);"#,
|
||||
[],
|
||||
)?;
|
||||
|
||||
conn.execute(
|
||||
r#"create table object_data
|
||||
(
|
||||
object_store_id integer not null
|
||||
references object_store,
|
||||
key blob not null,
|
||||
data blob not null,
|
||||
constraint "pk-object_data"
|
||||
primary key (object_store_id, key)
|
||||
);"#,
|
||||
[],
|
||||
)?;
|
||||
|
||||
conn.execute(
|
||||
r#"create table object_store_index
|
||||
(
|
||||
id integer not null
|
||||
primary key autoincrement,
|
||||
object_store_id integer not null
|
||||
references object_store,
|
||||
name varchar not null
|
||||
unique,
|
||||
key_path varbinary_blob not null,
|
||||
unique_index boolean not null,
|
||||
multi_entry_index boolean not null
|
||||
);"#,
|
||||
[],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
22
components/net/indexeddb/engines/sqlite/database_model.rs
Normal file
22
components/net/indexeddb/engines/sqlite/database_model.rs
Normal file
|
@ -0,0 +1,22 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
use sea_query::Iden;
|
||||
|
||||
#[derive(Iden)]
|
||||
#[expect(unused)]
|
||||
pub enum Column {
|
||||
Table,
|
||||
Name,
|
||||
Origin,
|
||||
Version,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct Model {
|
||||
pub name: String,
|
||||
pub origin: String,
|
||||
pub version: i64,
|
||||
// TODO: Hold timestamp for vacuuming
|
||||
// TODO: implement vacuuming
|
||||
}
|
32
components/net/indexeddb/engines/sqlite/object_data_model.rs
Normal file
32
components/net/indexeddb/engines/sqlite/object_data_model.rs
Normal file
|
@ -0,0 +1,32 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
use rusqlite::Row;
|
||||
use sea_query::Iden;
|
||||
|
||||
#[derive(Iden)]
|
||||
pub enum Column {
|
||||
Table,
|
||||
ObjectStoreId,
|
||||
Key,
|
||||
Data,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct Model {
|
||||
pub object_store_id: i32,
|
||||
pub key: Vec<u8>,
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
||||
impl TryFrom<&Row<'_>> for Model {
|
||||
type Error = rusqlite::Error;
|
||||
|
||||
fn try_from(value: &Row) -> Result<Self, Self::Error> {
|
||||
Ok(Self {
|
||||
object_store_id: value.get(0)?,
|
||||
key: value.get(1)?,
|
||||
data: value.get(2)?,
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
use sea_query::Iden;
|
||||
|
||||
#[derive(Iden)]
|
||||
#[expect(unused)]
|
||||
pub enum Column {
|
||||
Table,
|
||||
ObjectStoreId,
|
||||
Name,
|
||||
KeyPath,
|
||||
UniqueIndex,
|
||||
MultiEntryIndex,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct Model {
|
||||
pub id: i32,
|
||||
pub object_store_id: i32,
|
||||
pub name: String,
|
||||
pub key_path: Vec<u8>,
|
||||
pub unique_index: bool,
|
||||
pub multi_entry_index: bool,
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
use rusqlite::Row;
|
||||
use sea_query::Iden;
|
||||
|
||||
#[derive(Iden)]
|
||||
#[expect(unused)]
|
||||
pub enum Column {
|
||||
Table,
|
||||
Id,
|
||||
Name,
|
||||
KeyPath,
|
||||
AutoIncrement,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct Model {
|
||||
pub id: i32,
|
||||
pub name: String,
|
||||
pub key_path: Option<Vec<u8>>,
|
||||
pub auto_increment: bool,
|
||||
}
|
||||
|
||||
impl TryFrom<&Row<'_>> for Model {
|
||||
type Error = rusqlite::Error;
|
||||
|
||||
fn try_from(value: &Row) -> Result<Self, Self::Error> {
|
||||
Ok(Self {
|
||||
id: value.get(0)?,
|
||||
name: value.get(1)?,
|
||||
key_path: value.get(2)?,
|
||||
auto_increment: value.get(3)?,
|
||||
})
|
||||
}
|
||||
}
|
|
@ -11,13 +11,14 @@ use std::thread;
|
|||
use ipc_channel::ipc::{self, IpcError, IpcReceiver, IpcSender};
|
||||
use log::{debug, warn};
|
||||
use net_traits::indexeddb_thread::{
|
||||
AsyncOperation, IdbResult, IndexedDBThreadMsg, IndexedDBTxnMode, SyncOperation,
|
||||
AsyncOperation, BackendError, BackendResult, CreateObjectResult, DbResult, IndexedDBThreadMsg,
|
||||
IndexedDBTxnMode, KeyPath, SyncOperation,
|
||||
};
|
||||
use servo_config::pref;
|
||||
use servo_url::origin::ImmutableOrigin;
|
||||
|
||||
use crate::indexeddb::engines::{
|
||||
HeedEngine, KvsEngine, KvsOperation, KvsTransaction, SanitizedName,
|
||||
KvsEngine, KvsOperation, KvsTransaction, SanitizedName, SqliteEngine,
|
||||
};
|
||||
use crate::resource_thread::CoreResourceThreadPool;
|
||||
|
||||
|
@ -48,14 +49,14 @@ impl IndexedDBThreadFactory for IpcSender<IndexedDBThreadMsg> {
|
|||
|
||||
#[derive(Clone, Eq, Hash, PartialEq)]
|
||||
pub struct IndexedDBDescription {
|
||||
origin: ImmutableOrigin,
|
||||
name: String,
|
||||
pub(super) origin: ImmutableOrigin,
|
||||
pub(super) name: String,
|
||||
}
|
||||
|
||||
impl IndexedDBDescription {
|
||||
// Converts the database description to a folder name where all
|
||||
// data for this database is stored
|
||||
fn as_path(&self) -> PathBuf {
|
||||
pub(super) fn as_path(&self) -> PathBuf {
|
||||
let mut path = PathBuf::new();
|
||||
|
||||
let sanitized_origin = SanitizedName::new(self.origin.ascii_serialization());
|
||||
|
@ -69,18 +70,14 @@ impl IndexedDBDescription {
|
|||
|
||||
struct IndexedDBEnvironment<E: KvsEngine> {
|
||||
engine: E,
|
||||
version: u64,
|
||||
|
||||
transactions: HashMap<u64, KvsTransaction>,
|
||||
serial_number_counter: u64,
|
||||
}
|
||||
|
||||
impl<E: KvsEngine> IndexedDBEnvironment<E> {
|
||||
fn new(engine: E, version: u64) -> IndexedDBEnvironment<E> {
|
||||
fn new(engine: E) -> IndexedDBEnvironment<E> {
|
||||
IndexedDBEnvironment {
|
||||
engine,
|
||||
version,
|
||||
|
||||
transactions: HashMap::new(),
|
||||
serial_number_counter: 0,
|
||||
}
|
||||
|
@ -88,7 +85,6 @@ impl<E: KvsEngine> IndexedDBEnvironment<E> {
|
|||
|
||||
fn queue_operation(
|
||||
&mut self,
|
||||
sender: IpcSender<Result<Option<IdbResult>, ()>>,
|
||||
store_name: SanitizedName,
|
||||
serial_number: u64,
|
||||
mode: IndexedDBTxnMode,
|
||||
|
@ -102,16 +98,15 @@ impl<E: KvsEngine> IndexedDBEnvironment<E> {
|
|||
})
|
||||
.requests
|
||||
.push_back(KvsOperation {
|
||||
sender,
|
||||
operation,
|
||||
store_name,
|
||||
});
|
||||
}
|
||||
|
||||
// Executes all requests for a transaction (without committing)
|
||||
fn start_transaction(&mut self, txn: u64, sender: Option<IpcSender<Result<(), ()>>>) {
|
||||
// FIXME:(arihant2math) find a way to optimizations in this function
|
||||
// rather than on the engine level code (less repetition)
|
||||
fn start_transaction(&mut self, txn: u64, sender: Option<IpcSender<BackendResult<()>>>) {
|
||||
// FIXME:(arihant2math) find optimizations in this function
|
||||
// rather than on the engine level code (less repetition)
|
||||
if let Some(txn) = self.transactions.remove(&txn) {
|
||||
let _ = self.engine.process_transaction(txn).blocking_recv();
|
||||
}
|
||||
|
@ -129,40 +124,69 @@ impl<E: KvsEngine> IndexedDBEnvironment<E> {
|
|||
self.engine.has_key_generator(store_name)
|
||||
}
|
||||
|
||||
fn create_object_store(
|
||||
&mut self,
|
||||
sender: IpcSender<Result<(), ()>>,
|
||||
store_name: SanitizedName,
|
||||
auto_increment: bool,
|
||||
) {
|
||||
let result = self.engine.create_store(store_name, auto_increment);
|
||||
|
||||
if result.is_ok() {
|
||||
let _ = sender.send(Ok(()));
|
||||
} else {
|
||||
let _ = sender.send(Err(()));
|
||||
}
|
||||
fn key_path(&self, store_name: SanitizedName) -> Option<KeyPath> {
|
||||
self.engine.key_path(store_name)
|
||||
}
|
||||
|
||||
fn delete_object_store(
|
||||
&mut self,
|
||||
sender: IpcSender<Result<(), ()>>,
|
||||
fn create_index(
|
||||
&self,
|
||||
store_name: SanitizedName,
|
||||
) {
|
||||
let result = self.engine.delete_store(store_name);
|
||||
index_name: String,
|
||||
key_path: KeyPath,
|
||||
unique: bool,
|
||||
multi_entry: bool,
|
||||
) -> DbResult<CreateObjectResult> {
|
||||
self.engine
|
||||
.create_index(store_name, index_name, key_path, unique, multi_entry)
|
||||
.map_err(|err| format!("{err:?}"))
|
||||
}
|
||||
|
||||
if result.is_ok() {
|
||||
let _ = sender.send(Ok(()));
|
||||
} else {
|
||||
let _ = sender.send(Err(()));
|
||||
}
|
||||
fn delete_index(&self, store_name: SanitizedName, index_name: String) -> DbResult<()> {
|
||||
self.engine
|
||||
.delete_index(store_name, index_name)
|
||||
.map_err(|err| format!("{err:?}"))
|
||||
}
|
||||
|
||||
fn create_object_store(
|
||||
&mut self,
|
||||
store_name: SanitizedName,
|
||||
key_path: Option<KeyPath>,
|
||||
auto_increment: bool,
|
||||
) -> DbResult<CreateObjectResult> {
|
||||
self.engine
|
||||
.create_store(store_name, key_path, auto_increment)
|
||||
.map_err(|err| format!("{err:?}"))
|
||||
}
|
||||
|
||||
fn delete_object_store(&mut self, store_name: SanitizedName) -> DbResult<()> {
|
||||
let result = self.engine.delete_store(store_name);
|
||||
result.map_err(|err| format!("{err:?}"))
|
||||
}
|
||||
|
||||
fn delete_database(self, sender: IpcSender<BackendResult<()>>) {
|
||||
let result = self.engine.delete_database();
|
||||
let _ = sender.send(
|
||||
result
|
||||
.map_err(|err| format!("{err:?}"))
|
||||
.map_err(BackendError::from),
|
||||
);
|
||||
}
|
||||
|
||||
fn version(&self) -> DbResult<u64> {
|
||||
self.engine.version().map_err(|err| format!("{err:?}"))
|
||||
}
|
||||
|
||||
fn set_version(&mut self, version: u64) -> DbResult<()> {
|
||||
self.engine
|
||||
.set_version(version)
|
||||
.map_err(|err| format!("{err:?}"))
|
||||
}
|
||||
}
|
||||
|
||||
struct IndexedDBManager {
|
||||
port: IpcReceiver<IndexedDBThreadMsg>,
|
||||
idb_base_dir: PathBuf,
|
||||
databases: HashMap<IndexedDBDescription, IndexedDBEnvironment<HeedEngine>>,
|
||||
databases: HashMap<IndexedDBDescription, IndexedDBEnvironment<SqliteEngine>>,
|
||||
thread_pool: Arc<CoreResourceThreadPool>,
|
||||
}
|
||||
|
||||
|
@ -170,10 +194,14 @@ impl IndexedDBManager {
|
|||
fn new(port: IpcReceiver<IndexedDBThreadMsg>, idb_base_dir: PathBuf) -> IndexedDBManager {
|
||||
debug!("New indexedDBManager");
|
||||
|
||||
// Uses an estimate of the system cpus to process IndexedDB transactions
|
||||
// See https://doc.rust-lang.org/stable/std/thread/fn.available_parallelism.html
|
||||
// If no information can be obtained about the system, uses 4 threads as a default
|
||||
let thread_count = thread::available_parallelism()
|
||||
.map(|i| i.get())
|
||||
.unwrap_or(pref!(threadpools_fallback_worker_num) as usize)
|
||||
.min(pref!(threadpools_indexeddb_workers_max).max(1) as usize);
|
||||
|
||||
IndexedDBManager {
|
||||
port,
|
||||
idb_base_dir,
|
||||
|
@ -210,21 +238,15 @@ impl IndexedDBManager {
|
|||
IndexedDBThreadMsg::Sync(operation) => {
|
||||
self.handle_sync_operation(operation);
|
||||
},
|
||||
IndexedDBThreadMsg::Async(
|
||||
sender,
|
||||
origin,
|
||||
db_name,
|
||||
store_name,
|
||||
txn,
|
||||
mode,
|
||||
operation,
|
||||
) => {
|
||||
IndexedDBThreadMsg::Async(origin, db_name, store_name, txn, mode, operation) => {
|
||||
let store_name = SanitizedName::new(store_name);
|
||||
if let Some(db) = self.get_database_mut(origin, db_name) {
|
||||
// Queues an operation for a transaction without starting it
|
||||
db.queue_operation(sender, store_name, txn, mode, operation);
|
||||
// FIXME:(arihant2math) Schedule transactions properly:
|
||||
// for now, we start them directly.
|
||||
db.queue_operation(store_name, txn, mode, operation);
|
||||
// FIXME:(arihant2math) Schedule transactions properly
|
||||
// while db.transactions.iter().any(|s| s.1.mode == IndexedDBTxnMode::Readwrite) {
|
||||
// std::hint::spin_loop();
|
||||
// }
|
||||
db.start_transaction(txn, None);
|
||||
}
|
||||
},
|
||||
|
@ -236,7 +258,7 @@ impl IndexedDBManager {
|
|||
&self,
|
||||
origin: ImmutableOrigin,
|
||||
db_name: String,
|
||||
) -> Option<&IndexedDBEnvironment<HeedEngine>> {
|
||||
) -> Option<&IndexedDBEnvironment<SqliteEngine>> {
|
||||
let idb_description = IndexedDBDescription {
|
||||
origin,
|
||||
name: db_name,
|
||||
|
@ -249,7 +271,7 @@ impl IndexedDBManager {
|
|||
&mut self,
|
||||
origin: ImmutableOrigin,
|
||||
db_name: String,
|
||||
) -> Option<&mut IndexedDBEnvironment<HeedEngine>> {
|
||||
) -> Option<&mut IndexedDBEnvironment<SqliteEngine>> {
|
||||
let idb_description = IndexedDBDescription {
|
||||
origin,
|
||||
name: db_name,
|
||||
|
@ -266,7 +288,7 @@ impl IndexedDBManager {
|
|||
name: db_name,
|
||||
};
|
||||
if let Some(_db) = self.databases.remove(&idb_description) {
|
||||
// TODO: maybe close store here?
|
||||
// TODO: maybe a close database function should be added to the trait and called here?
|
||||
}
|
||||
let _ = sender.send(Ok(()));
|
||||
},
|
||||
|
@ -278,21 +300,24 @@ impl IndexedDBManager {
|
|||
|
||||
let idb_base_dir = self.idb_base_dir.as_path();
|
||||
|
||||
let version = version.unwrap_or(0);
|
||||
|
||||
match self.databases.entry(idb_description.clone()) {
|
||||
Entry::Vacant(e) => {
|
||||
let db = IndexedDBEnvironment::new(
|
||||
HeedEngine::new(
|
||||
SqliteEngine::new(
|
||||
idb_base_dir,
|
||||
&idb_description.as_path(),
|
||||
&idb_description,
|
||||
version,
|
||||
self.thread_pool.clone(),
|
||||
),
|
||||
version.unwrap_or(0),
|
||||
)
|
||||
.expect("Failed to create sqlite engine"),
|
||||
);
|
||||
let _ = sender.send(db.version);
|
||||
let _ = sender.send(db.version().unwrap_or(version));
|
||||
e.insert(db);
|
||||
},
|
||||
Entry::Occupied(db) => {
|
||||
let _ = sender.send(db.get().version);
|
||||
let _ = sender.send(db.get().version().unwrap_or(version));
|
||||
},
|
||||
}
|
||||
},
|
||||
|
@ -304,17 +329,8 @@ impl IndexedDBManager {
|
|||
origin,
|
||||
name: db_name,
|
||||
};
|
||||
if self.databases.remove(&idb_description).is_none() {
|
||||
let _ = sender.send(Ok(()));
|
||||
return;
|
||||
}
|
||||
|
||||
// FIXME:(rasviitanen) Possible security issue?
|
||||
// FIXME:(arihant2math) using remove_dir_all with arbitrary input ...
|
||||
let mut db_dir = self.idb_base_dir.clone();
|
||||
db_dir.push(idb_description.as_path());
|
||||
if std::fs::remove_dir_all(&db_dir).is_err() {
|
||||
let _ = sender.send(Err(()));
|
||||
if let Some(db) = self.databases.remove(&idb_description) {
|
||||
db.delete_database(sender);
|
||||
} else {
|
||||
let _ = sender.send(Ok(()));
|
||||
}
|
||||
|
@ -323,23 +339,57 @@ impl IndexedDBManager {
|
|||
let store_name = SanitizedName::new(store_name);
|
||||
let result = self
|
||||
.get_database(origin, db_name)
|
||||
.map(|db| db.has_key_generator(store_name))
|
||||
.expect("No Database");
|
||||
sender.send(result).expect("Could not send generator info");
|
||||
.map(|db| db.has_key_generator(store_name));
|
||||
let _ = sender.send(result.ok_or(BackendError::DbNotFound));
|
||||
},
|
||||
SyncOperation::KeyPath(sender, origin, db_name, store_name) => {
|
||||
let store_name = SanitizedName::new(store_name);
|
||||
let result = self
|
||||
.get_database(origin, db_name)
|
||||
.map(|db| db.key_path(store_name));
|
||||
let _ = sender.send(result.ok_or(BackendError::DbNotFound));
|
||||
},
|
||||
SyncOperation::CreateIndex(
|
||||
sender,
|
||||
origin,
|
||||
db_name,
|
||||
store_name,
|
||||
index_name,
|
||||
key_path,
|
||||
unique,
|
||||
multi_entry,
|
||||
) => {
|
||||
let store_name = SanitizedName::new(store_name);
|
||||
if let Some(db) = self.get_database(origin, db_name) {
|
||||
let result =
|
||||
db.create_index(store_name, index_name, key_path, unique, multi_entry);
|
||||
let _ = sender.send(result.map_err(BackendError::from));
|
||||
} else {
|
||||
let _ = sender.send(Err(BackendError::DbNotFound));
|
||||
}
|
||||
},
|
||||
SyncOperation::DeleteIndex(sender, origin, db_name, store_name, index_name) => {
|
||||
let store_name = SanitizedName::new(store_name);
|
||||
if let Some(db) = self.get_database(origin, db_name) {
|
||||
let result = db.delete_index(store_name, index_name);
|
||||
let _ = sender.send(result.map_err(BackendError::from));
|
||||
} else {
|
||||
let _ = sender.send(Err(BackendError::DbNotFound));
|
||||
}
|
||||
},
|
||||
SyncOperation::Commit(sender, _origin, _db_name, _txn) => {
|
||||
// FIXME:(arihant2math) This does nothing at the moment
|
||||
sender.send(Err(())).expect("Could not send commit status");
|
||||
let _ = sender.send(Ok(()));
|
||||
},
|
||||
SyncOperation::UpgradeVersion(sender, origin, db_name, _txn, version) => {
|
||||
if let Some(db) = self.get_database_mut(origin, db_name) {
|
||||
if version > db.version {
|
||||
db.version = version;
|
||||
if version > db.version().unwrap_or(0) {
|
||||
let _ = db.set_version(version);
|
||||
}
|
||||
// erroring out if the version is not upgraded can be and non-replicable
|
||||
let _ = sender.send(Ok(db.version));
|
||||
let _ = sender.send(db.version().map_err(BackendError::from));
|
||||
} else {
|
||||
let _ = sender.send(Err(()));
|
||||
let _ = sender.send(Err(BackendError::DbNotFound));
|
||||
}
|
||||
},
|
||||
SyncOperation::CreateObjectStore(
|
||||
|
@ -347,28 +397,39 @@ impl IndexedDBManager {
|
|||
origin,
|
||||
db_name,
|
||||
store_name,
|
||||
key_paths,
|
||||
auto_increment,
|
||||
) => {
|
||||
let store_name = SanitizedName::new(store_name);
|
||||
if let Some(db) = self.get_database_mut(origin, db_name) {
|
||||
db.create_object_store(sender, store_name, auto_increment);
|
||||
let result = db.create_object_store(store_name, key_paths, auto_increment);
|
||||
let _ = sender.send(result.map_err(BackendError::from));
|
||||
} else {
|
||||
let _ = sender.send(Err(BackendError::DbNotFound));
|
||||
}
|
||||
},
|
||||
SyncOperation::DeleteObjectStore(sender, origin, db_name, store_name) => {
|
||||
let store_name = SanitizedName::new(store_name);
|
||||
if let Some(db) = self.get_database_mut(origin, db_name) {
|
||||
db.delete_object_store(sender, store_name);
|
||||
let result = db.delete_object_store(store_name);
|
||||
let _ = sender.send(result.map_err(BackendError::from));
|
||||
} else {
|
||||
let _ = sender.send(Err(BackendError::DbNotFound));
|
||||
}
|
||||
},
|
||||
SyncOperation::StartTransaction(sender, origin, db_name, txn) => {
|
||||
if let Some(db) = self.get_database_mut(origin, db_name) {
|
||||
db.start_transaction(txn, Some(sender));
|
||||
};
|
||||
} else {
|
||||
let _ = sender.send(Err(BackendError::DbNotFound));
|
||||
}
|
||||
},
|
||||
SyncOperation::Version(sender, origin, db_name) => {
|
||||
if let Some(db) = self.get_database(origin, db_name) {
|
||||
let _ = sender.send(db.version);
|
||||
};
|
||||
let _ = sender.send(db.version().map_err(BackendError::from));
|
||||
} else {
|
||||
let _ = sender.send(Err(BackendError::DbNotFound));
|
||||
}
|
||||
},
|
||||
SyncOperation::RegisterNewTxn(sender, origin, db_name) => {
|
||||
if let Some(db) = self.get_database_mut(origin, db_name) {
|
||||
|
|
|
@ -7,7 +7,7 @@ use std::cell::Cell;
|
|||
use dom_struct::dom_struct;
|
||||
use ipc_channel::ipc::IpcSender;
|
||||
use net_traits::IpcSend;
|
||||
use net_traits::indexeddb_thread::{IndexedDBThreadMsg, SyncOperation};
|
||||
use net_traits::indexeddb_thread::{IndexedDBThreadMsg, KeyPath, SyncOperation};
|
||||
use profile_traits::ipc;
|
||||
use stylo_atoms::Atom;
|
||||
|
||||
|
@ -104,7 +104,10 @@ impl IDBDatabase {
|
|||
.get_idb_thread()
|
||||
.send(IndexedDBThreadMsg::Sync(operation));
|
||||
|
||||
receiver.recv().unwrap()
|
||||
receiver.recv().unwrap().unwrap_or_else(|e| {
|
||||
error!("{e:?}");
|
||||
u64::MAX
|
||||
})
|
||||
}
|
||||
|
||||
pub fn set_transaction(&self, transaction: &IDBTransaction) {
|
||||
|
@ -185,7 +188,6 @@ impl IDBDatabaseMethods<crate::DomTypeHolder> for IDBDatabase {
|
|||
name: DOMString,
|
||||
options: &IDBObjectStoreParameters,
|
||||
) -> Fallible<DomRoot<IDBObjectStore>> {
|
||||
// FIXME:(arihant2math) ^^ Change idl to match above.
|
||||
// Step 2
|
||||
let upgrade_transaction = match self.upgrade_transaction.get() {
|
||||
Some(txn) => txn,
|
||||
|
@ -247,11 +249,18 @@ impl IDBDatabaseMethods<crate::DomTypeHolder> for IDBDatabase {
|
|||
|
||||
let (sender, receiver) = ipc::channel(self.global().time_profiler_chan().clone()).unwrap();
|
||||
|
||||
let key_paths = key_path.map(|p| match p {
|
||||
StringOrStringSequence::String(s) => KeyPath::String(s.to_string()),
|
||||
StringOrStringSequence::StringSequence(s) => {
|
||||
KeyPath::Sequence(s.iter().map(|s| s.to_string()).collect())
|
||||
},
|
||||
});
|
||||
let operation = SyncOperation::CreateObjectStore(
|
||||
sender,
|
||||
self.global().origin().immutable().clone(),
|
||||
self.name.to_string(),
|
||||
name.to_string(),
|
||||
key_paths,
|
||||
auto_increment,
|
||||
);
|
||||
|
||||
|
|
|
@ -26,7 +26,8 @@ use crate::dom::domstringlist::DOMStringList;
|
|||
use crate::dom::globalscope::GlobalScope;
|
||||
use crate::dom::idbrequest::IDBRequest;
|
||||
use crate::dom::idbtransaction::IDBTransaction;
|
||||
use crate::indexed_db::{convert_value_to_key, extract_key};
|
||||
use crate::indexed_db;
|
||||
use crate::indexed_db::{convert_value_to_key, convert_value_to_key_range, extract_key};
|
||||
use crate::script_runtime::{CanGc, JSContext as SafeJSContext};
|
||||
|
||||
#[derive(JSTraceable, MallocSizeOf)]
|
||||
|
@ -125,9 +126,43 @@ impl IDBObjectStore {
|
|||
.send(IndexedDBThreadMsg::Sync(operation))
|
||||
.unwrap();
|
||||
|
||||
receiver.recv().unwrap()
|
||||
// First unwrap for ipc
|
||||
// Second unwrap will never happen unless this db gets manually deleted somehow
|
||||
receiver.recv().unwrap().unwrap()
|
||||
}
|
||||
|
||||
// fn get_stored_key_path(&mut self) -> Option<KeyPath> {
|
||||
// let (sender, receiver) = ipc::channel(self.global().time_profiler_chan().clone()).unwrap();
|
||||
//
|
||||
// let operation = SyncOperation::KeyPath(
|
||||
// sender,
|
||||
// self.global().origin().immutable().clone(),
|
||||
// self.db_name.to_string(),
|
||||
// self.name.borrow().to_string(),
|
||||
// );
|
||||
//
|
||||
// self.global()
|
||||
// .resource_threads()
|
||||
// .sender()
|
||||
// .send(IndexedDBThreadMsg::Sync(operation))
|
||||
// .unwrap();
|
||||
//
|
||||
// // First unwrap for ipc
|
||||
// // Second unwrap will never happen unless this db gets manually deleted somehow
|
||||
// let key_path = receiver.recv().unwrap().unwrap();
|
||||
// key_path.map(|p| {
|
||||
// // TODO: have separate storage for string sequence of len 1 and signle string
|
||||
// if p.len() == 1 {
|
||||
// KeyPath::String(DOMString::from_string(p[0].clone()))
|
||||
// } else {
|
||||
// let strings: Vec<_> = p.into_iter().map(|s| {
|
||||
// DOMString::from_string(s)
|
||||
// }).collect();
|
||||
// KeyPath::StringSequence(strings)
|
||||
// }
|
||||
// })
|
||||
// }
|
||||
|
||||
// https://www.w3.org/TR/IndexedDB-2/#object-store-in-line-keys
|
||||
fn uses_inline_keys(&self) -> bool {
|
||||
self.key_path.is_some()
|
||||
|
@ -200,30 +235,34 @@ impl IDBObjectStore {
|
|||
serialized_key = convert_value_to_key(cx, key, None)?;
|
||||
} else {
|
||||
// Step 11: We should use in-line keys instead
|
||||
if let Ok(kpk) = extract_key(
|
||||
cx,
|
||||
value,
|
||||
self.key_path.as_ref().expect("No key path"),
|
||||
None,
|
||||
) {
|
||||
if let Some(Ok(kpk)) = self
|
||||
.key_path
|
||||
.as_ref()
|
||||
.map(|p| extract_key(cx, value, p, None))
|
||||
{
|
||||
serialized_key = kpk;
|
||||
} else {
|
||||
// FIXME:(rasviitanen)
|
||||
// Check if store has a key generator
|
||||
// Check if we can inject a key
|
||||
return Err(Error::Data);
|
||||
if !self.has_key_generator() {
|
||||
return Err(Error::Data);
|
||||
}
|
||||
// FIXME:(arihant2math)
|
||||
return Err(Error::NotSupported);
|
||||
}
|
||||
}
|
||||
|
||||
let serialized_value = structuredclone::write(cx, value, None)?;
|
||||
|
||||
let (sender, receiver) = indexed_db::create_channel(self.global());
|
||||
|
||||
IDBRequest::execute_async(
|
||||
self,
|
||||
AsyncOperation::ReadWrite(AsyncReadWriteOperation::PutItem(
|
||||
serialized_key,
|
||||
serialized_value.serialized,
|
||||
overwrite,
|
||||
)),
|
||||
AsyncOperation::ReadWrite(AsyncReadWriteOperation::PutItem {
|
||||
sender,
|
||||
key: serialized_key,
|
||||
value: serialized_value.serialized,
|
||||
should_overwrite: overwrite,
|
||||
}),
|
||||
receiver,
|
||||
None,
|
||||
can_gc,
|
||||
)
|
||||
|
@ -262,10 +301,12 @@ impl IDBObjectStoreMethods<crate::DomTypeHolder> for IDBObjectStore {
|
|||
// TODO: Convert to key range instead
|
||||
let serialized_query = convert_value_to_key(cx, query, None);
|
||||
// Step 7
|
||||
let (sender, receiver) = indexed_db::create_channel(self.global());
|
||||
serialized_query.and_then(|q| {
|
||||
IDBRequest::execute_async(
|
||||
self,
|
||||
AsyncOperation::ReadWrite(AsyncReadWriteOperation::RemoveItem(q)),
|
||||
AsyncOperation::ReadWrite(AsyncReadWriteOperation::RemoveItem { sender, key: q }),
|
||||
receiver,
|
||||
None,
|
||||
CanGc::note(),
|
||||
)
|
||||
|
@ -279,9 +320,12 @@ impl IDBObjectStoreMethods<crate::DomTypeHolder> for IDBObjectStore {
|
|||
// TODO: Step 3
|
||||
// Steps 4-5
|
||||
self.check_readwrite_transaction_active()?;
|
||||
let (sender, receiver) = indexed_db::create_channel(self.global());
|
||||
|
||||
IDBRequest::execute_async(
|
||||
self,
|
||||
AsyncOperation::ReadWrite(AsyncReadWriteOperation::Clear),
|
||||
AsyncOperation::ReadWrite(AsyncReadWriteOperation::Clear(sender)),
|
||||
receiver,
|
||||
None,
|
||||
CanGc::note(),
|
||||
)
|
||||
|
@ -295,13 +339,17 @@ impl IDBObjectStoreMethods<crate::DomTypeHolder> for IDBObjectStore {
|
|||
// Step 4
|
||||
self.check_transaction_active()?;
|
||||
// Step 5
|
||||
// TODO: Convert to key range instead
|
||||
let serialized_query = convert_value_to_key(cx, query, None);
|
||||
let serialized_query = convert_value_to_key_range(cx, query, None);
|
||||
// Step 6
|
||||
let (sender, receiver) = indexed_db::create_channel(self.global());
|
||||
serialized_query.and_then(|q| {
|
||||
IDBRequest::execute_async(
|
||||
self,
|
||||
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetItem(q)),
|
||||
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetItem {
|
||||
sender,
|
||||
key_range: q,
|
||||
}),
|
||||
receiver,
|
||||
None,
|
||||
CanGc::note(),
|
||||
)
|
||||
|
@ -309,17 +357,29 @@ impl IDBObjectStoreMethods<crate::DomTypeHolder> for IDBObjectStore {
|
|||
}
|
||||
|
||||
// https://www.w3.org/TR/IndexedDB-2/#dom-idbobjectstore-getkey
|
||||
// fn GetKey(&self, _cx: SafeJSContext, _query: HandleValue) -> DomRoot<IDBRequest> {
|
||||
// // Step 1: Unneeded, handled by self.check_transaction_active()
|
||||
// // TODO: Step 2
|
||||
// // TODO: Step 3
|
||||
// // Step 4
|
||||
// self.check_transaction_active()?;
|
||||
// // Step 5
|
||||
// // TODO: Convert to key range instead
|
||||
// let serialized_query = IDBObjectStore::convert_value_to_key(cx, query, None);
|
||||
// unimplemented!();
|
||||
// }
|
||||
fn GetKey(&self, cx: SafeJSContext, query: HandleValue) -> Result<DomRoot<IDBRequest>, Error> {
|
||||
// Step 1: Unneeded, handled by self.check_transaction_active()
|
||||
// TODO: Step 2
|
||||
// TODO: Step 3
|
||||
// Step 4
|
||||
self.check_transaction_active()?;
|
||||
// Step 5
|
||||
let serialized_query = convert_value_to_key_range(cx, query, None);
|
||||
// Step 6
|
||||
let (sender, receiver) = indexed_db::create_channel(self.global());
|
||||
serialized_query.and_then(|q| {
|
||||
IDBRequest::execute_async(
|
||||
self,
|
||||
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetKey {
|
||||
sender,
|
||||
key_range: q,
|
||||
}),
|
||||
receiver,
|
||||
None,
|
||||
CanGc::note(),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
// https://www.w3.org/TR/IndexedDB-2/#dom-idbobjectstore-getall
|
||||
// fn GetAll(
|
||||
|
@ -350,13 +410,18 @@ impl IDBObjectStoreMethods<crate::DomTypeHolder> for IDBObjectStore {
|
|||
self.check_transaction_active()?;
|
||||
|
||||
// Step 5
|
||||
let serialized_query = convert_value_to_key(cx, query, None);
|
||||
let serialized_query = convert_value_to_key_range(cx, query, None);
|
||||
|
||||
// Step 6
|
||||
let (sender, receiver) = indexed_db::create_channel(self.global());
|
||||
serialized_query.and_then(|q| {
|
||||
IDBRequest::execute_async(
|
||||
self,
|
||||
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::Count(q)),
|
||||
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::Count {
|
||||
sender,
|
||||
key_range: q,
|
||||
}),
|
||||
receiver,
|
||||
None,
|
||||
CanGc::note(),
|
||||
)
|
||||
|
|
|
@ -7,7 +7,7 @@ use ipc_channel::router::ROUTER;
|
|||
use js::jsval::UndefinedValue;
|
||||
use js::rust::HandleValue;
|
||||
use net_traits::IpcSend;
|
||||
use net_traits::indexeddb_thread::{IndexedDBThreadMsg, SyncOperation};
|
||||
use net_traits::indexeddb_thread::{BackendResult, IndexedDBThreadMsg, SyncOperation};
|
||||
use profile_traits::ipc;
|
||||
use script_bindings::conversions::SafeToJSValConvertible;
|
||||
use stylo_atoms::Atom;
|
||||
|
@ -84,7 +84,7 @@ impl OpenRequestListener {
|
|||
}
|
||||
}
|
||||
|
||||
fn handle_delete_db(&self, result: Result<(), ()>, can_gc: CanGc) {
|
||||
fn handle_delete_db(&self, result: BackendResult<()>, can_gc: CanGc) {
|
||||
let open_request = self.open_request.root();
|
||||
let global = open_request.global();
|
||||
open_request.idbrequest.set_ready_state_done();
|
||||
|
@ -108,7 +108,7 @@ impl OpenRequestListener {
|
|||
event.fire(open_request.upcast(), can_gc);
|
||||
},
|
||||
Err(_e) => {
|
||||
// FIXME(rasviitanen) Set the error of request to the
|
||||
// FIXME(arihant2math) Set the error of request to the
|
||||
// appropriate error
|
||||
|
||||
let event = Event::new(
|
||||
|
|
|
@ -8,13 +8,15 @@ use constellation_traits::StructuredSerializedData;
|
|||
use dom_struct::dom_struct;
|
||||
use ipc_channel::router::ROUTER;
|
||||
use js::jsapi::Heap;
|
||||
use js::jsval::{JSVal, UndefinedValue};
|
||||
use js::jsval::{DoubleValue, JSVal, UndefinedValue};
|
||||
use js::rust::HandleValue;
|
||||
use net_traits::IpcSend;
|
||||
use net_traits::indexeddb_thread::{
|
||||
AsyncOperation, IdbResult, IndexedDBThreadMsg, IndexedDBTxnMode,
|
||||
AsyncOperation, BackendResult, IndexedDBKeyType, IndexedDBThreadMsg, IndexedDBTxnMode,
|
||||
PutItemResult,
|
||||
};
|
||||
use profile_traits::ipc;
|
||||
use profile_traits::ipc::IpcReceiver;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use stylo_atoms::Atom;
|
||||
|
||||
use crate::dom::bindings::codegen::Bindings::IDBRequestBinding::{
|
||||
|
@ -42,8 +44,61 @@ struct RequestListener {
|
|||
request: Trusted<IDBRequest>,
|
||||
}
|
||||
|
||||
pub enum IdbResult {
|
||||
Key(IndexedDBKeyType),
|
||||
Data(Vec<u8>),
|
||||
Count(u64),
|
||||
Error(Error),
|
||||
None,
|
||||
}
|
||||
|
||||
impl From<IndexedDBKeyType> for IdbResult {
|
||||
fn from(value: IndexedDBKeyType) -> Self {
|
||||
IdbResult::Key(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<u8>> for IdbResult {
|
||||
fn from(value: Vec<u8>) -> Self {
|
||||
IdbResult::Data(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PutItemResult> for IdbResult {
|
||||
fn from(value: PutItemResult) -> Self {
|
||||
match value {
|
||||
PutItemResult::Success => Self::None,
|
||||
PutItemResult::CannotOverwrite => Self::Error(Error::Constraint),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<()> for IdbResult {
|
||||
fn from(_value: ()) -> Self {
|
||||
Self::None
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<Option<T>> for IdbResult
|
||||
where
|
||||
T: Into<IdbResult>,
|
||||
{
|
||||
fn from(value: Option<T>) -> Self {
|
||||
match value {
|
||||
Some(value) => value.into(),
|
||||
None => IdbResult::None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u64> for IdbResult {
|
||||
fn from(value: u64) -> Self {
|
||||
IdbResult::Count(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestListener {
|
||||
fn handle_async_request_finished(&self, result: Result<Option<IdbResult>, ()>) {
|
||||
fn handle_async_request_finished(&self, result: BackendResult<IdbResult>) {
|
||||
let request = self.request.root();
|
||||
let global = request.global();
|
||||
let cx = GlobalScope::get_cx();
|
||||
|
@ -53,7 +108,7 @@ impl RequestListener {
|
|||
let _ac = enter_realm(&*request);
|
||||
rooted!(in(*cx) let mut answer = UndefinedValue());
|
||||
|
||||
if let Ok(Some(data)) = result {
|
||||
if let Ok(data) = result {
|
||||
match data {
|
||||
IdbResult::Key(key) => {
|
||||
key_type_to_jsval(GlobalScope::get_cx(), &key, answer.handle_mut())
|
||||
|
@ -68,6 +123,17 @@ impl RequestListener {
|
|||
warn!("Error reading structuredclone data");
|
||||
}
|
||||
},
|
||||
IdbResult::Count(count) => {
|
||||
answer.handle_mut().set(DoubleValue(count as f64));
|
||||
},
|
||||
IdbResult::None => {
|
||||
// no-op
|
||||
},
|
||||
IdbResult::Error(_err) => {
|
||||
request.set_result(answer.handle());
|
||||
Self::handle_async_request_error(request, &global);
|
||||
return;
|
||||
},
|
||||
}
|
||||
|
||||
request.set_result(answer.handle());
|
||||
|
@ -92,30 +158,33 @@ impl RequestListener {
|
|||
transaction.set_active_flag(false);
|
||||
} else {
|
||||
request.set_result(answer.handle());
|
||||
|
||||
// FIXME:(rasviitanen)
|
||||
// Set the error of request to result
|
||||
|
||||
let transaction = request
|
||||
.transaction
|
||||
.get()
|
||||
.expect("Request has no transaction");
|
||||
|
||||
let event = Event::new(
|
||||
&global,
|
||||
Atom::from("error"),
|
||||
EventBubbles::Bubbles,
|
||||
EventCancelable::Cancelable,
|
||||
CanGc::note(),
|
||||
);
|
||||
|
||||
transaction.set_active_flag(true);
|
||||
event
|
||||
.upcast::<Event>()
|
||||
.fire(request.upcast(), CanGc::note());
|
||||
transaction.set_active_flag(false);
|
||||
Self::handle_async_request_error(request, &global);
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_async_request_error(request: DomRoot<IDBRequest>, global: &GlobalScope) {
|
||||
// FIXME:(rasviitanen)
|
||||
// Set the error of request to result
|
||||
|
||||
let transaction = request
|
||||
.transaction
|
||||
.get()
|
||||
.expect("Request has no transaction");
|
||||
|
||||
let event = Event::new(
|
||||
global,
|
||||
Atom::from("error"),
|
||||
EventBubbles::Bubbles,
|
||||
EventCancelable::Cancelable,
|
||||
CanGc::note(),
|
||||
);
|
||||
|
||||
transaction.set_active_flag(true);
|
||||
event
|
||||
.upcast::<Event>()
|
||||
.fire(request.upcast(), CanGc::note());
|
||||
transaction.set_active_flag(false);
|
||||
}
|
||||
}
|
||||
|
||||
#[dom_struct]
|
||||
|
@ -174,12 +243,16 @@ impl IDBRequest {
|
|||
}
|
||||
|
||||
// https://www.w3.org/TR/IndexedDB-2/#asynchronously-execute-a-request
|
||||
pub fn execute_async(
|
||||
pub fn execute_async<T>(
|
||||
source: &IDBObjectStore,
|
||||
operation: AsyncOperation,
|
||||
receiver: IpcReceiver<BackendResult<T>>,
|
||||
request: Option<DomRoot<IDBRequest>>,
|
||||
can_gc: CanGc,
|
||||
) -> Fallible<DomRoot<IDBRequest>> {
|
||||
) -> Fallible<DomRoot<IDBRequest>>
|
||||
where
|
||||
T: Into<IdbResult> + for<'a> Deserialize<'a> + Serialize + Send + Sync + 'static,
|
||||
{
|
||||
// Step 1: Let transaction be the transaction associated with source.
|
||||
let transaction = source.transaction().expect("Store has no transaction");
|
||||
let global = transaction.global();
|
||||
|
@ -208,10 +281,6 @@ impl IDBRequest {
|
|||
IDBTransactionMode::Versionchange => IndexedDBTxnMode::Versionchange,
|
||||
};
|
||||
|
||||
let (sender, receiver) =
|
||||
ipc::channel::<Result<Option<IdbResult>, ()>>(global.time_profiler_chan().clone())
|
||||
.unwrap();
|
||||
|
||||
let response_listener = RequestListener {
|
||||
request: Trusted::new(&request),
|
||||
};
|
||||
|
@ -227,7 +296,7 @@ impl IDBRequest {
|
|||
let response_listener = response_listener.clone();
|
||||
task_source.queue(task!(request_callback: move || {
|
||||
response_listener.handle_async_request_finished(
|
||||
message.expect("Could not unwrap message"));
|
||||
message.expect("Could not unwrap message").map(|t| t.into()));
|
||||
}));
|
||||
}),
|
||||
);
|
||||
|
@ -237,7 +306,6 @@ impl IDBRequest {
|
|||
.resource_threads()
|
||||
.sender()
|
||||
.send(IndexedDBThreadMsg::Async(
|
||||
sender,
|
||||
global.origin().immutable().clone(),
|
||||
transaction.get_db_name().to_string(),
|
||||
source.get_name().to_string(),
|
||||
|
|
|
@ -229,6 +229,7 @@ impl IDBTransactionMethods<crate::DomTypeHolder> for IDBTransaction {
|
|||
// returns the same IDBObjectStore instance.
|
||||
let mut store_handles = self.store_handles.borrow_mut();
|
||||
let store = store_handles.entry(name.to_string()).or_insert_with(|| {
|
||||
// TODO: get key path from backend
|
||||
let store = IDBObjectStore::new(
|
||||
&self.global(),
|
||||
self.db.get_name(),
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
use std::iter::repeat;
|
||||
use std::ptr;
|
||||
|
||||
use ipc_channel::ipc::IpcSender;
|
||||
use js::conversions::jsstr_to_string;
|
||||
use js::gc::MutableHandle;
|
||||
use js::jsapi::{
|
||||
|
@ -14,17 +15,31 @@ use js::jsapi::{
|
|||
};
|
||||
use js::jsval::{DoubleValue, UndefinedValue};
|
||||
use js::rust::{HandleValue, MutableHandleValue};
|
||||
use net_traits::indexeddb_thread::{IndexedDBKeyRange, IndexedDBKeyType};
|
||||
use net_traits::indexeddb_thread::{BackendResult, IndexedDBKeyRange, IndexedDBKeyType};
|
||||
use profile_traits::ipc;
|
||||
use profile_traits::ipc::IpcReceiver;
|
||||
use script_bindings::conversions::{SafeToJSValConvertible, root_from_object};
|
||||
use script_bindings::root::DomRoot;
|
||||
use script_bindings::str::DOMString;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::dom::bindings::codegen::UnionTypes::StringOrStringSequence as StrOrStringSequence;
|
||||
use crate::dom::bindings::error::Error;
|
||||
use crate::dom::bindings::import::module::SafeJSContext;
|
||||
use crate::dom::bindings::structuredclone;
|
||||
use crate::dom::globalscope::GlobalScope;
|
||||
use crate::dom::idbkeyrange::IDBKeyRange;
|
||||
use crate::dom::idbobjectstore::KeyPath;
|
||||
|
||||
pub fn create_channel<T>(
|
||||
global: DomRoot<GlobalScope>,
|
||||
) -> (IpcSender<BackendResult<T>>, IpcReceiver<BackendResult<T>>)
|
||||
where
|
||||
T: for<'a> Deserialize<'a> + Serialize,
|
||||
{
|
||||
ipc::channel::<BackendResult<T>>(global.time_profiler_chan().clone()).unwrap()
|
||||
}
|
||||
|
||||
// https://www.w3.org/TR/IndexedDB-2/#convert-key-to-value
|
||||
#[allow(unsafe_code)]
|
||||
pub fn key_type_to_jsval(
|
||||
|
@ -147,7 +162,6 @@ pub fn convert_value_to_key(
|
|||
|
||||
// https://www.w3.org/TR/IndexedDB-2/#convert-a-value-to-a-key-range
|
||||
#[allow(unsafe_code)]
|
||||
#[expect(unused)]
|
||||
pub fn convert_value_to_key_range(
|
||||
cx: SafeJSContext,
|
||||
input: HandleValue,
|
||||
|
|
|
@ -21,7 +21,7 @@ interface IDBObjectStore {
|
|||
[NewObject, Throws] IDBRequest delete(any query);
|
||||
[NewObject, Throws] IDBRequest clear();
|
||||
[NewObject, Throws] IDBRequest get(any query);
|
||||
// [NewObject] IDBRequest getKey(any query);
|
||||
[NewObject, Throws] IDBRequest getKey(any query);
|
||||
// [NewObject] IDBRequest getAll(optional any query,
|
||||
// optional [EnforceRange] unsigned long count);
|
||||
// [NewObject] IDBRequest getAllKeys(optional any query,
|
||||
|
|
|
@ -2,15 +2,53 @@
|
|||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use std::cmp::{PartialEq, PartialOrd};
|
||||
use std::cmp::{Ordering, PartialEq, PartialOrd};
|
||||
use std::error::Error;
|
||||
use std::fmt::{Debug, Display, Formatter};
|
||||
|
||||
use ipc_channel::ipc::IpcSender;
|
||||
use malloc_size_of_derive::MallocSizeOf;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servo_url::origin::ImmutableOrigin;
|
||||
|
||||
// TODO Box<dyn Error> is not serializable, fix needs to be found
|
||||
pub type DbError = String;
|
||||
/// A DbResult wraps any part of a call that has to reach into the backend (in this case sqlite.rs)
|
||||
/// These errors could be anything, depending on the backend
|
||||
pub type DbResult<T> = Result<T, DbError>;
|
||||
|
||||
/// Any error from the backend
|
||||
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
|
||||
pub enum BackendError {
|
||||
DbNotFound,
|
||||
StoreNotFound,
|
||||
DbErr(DbError),
|
||||
}
|
||||
|
||||
impl From<DbError> for BackendError {
|
||||
fn from(value: DbError) -> Self {
|
||||
BackendError::DbErr(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for BackendError {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:?}", self)
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for BackendError {}
|
||||
|
||||
pub type BackendResult<T> = Result<T, BackendError>;
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
|
||||
pub enum KeyPath {
|
||||
String(String),
|
||||
Sequence(Vec<String>),
|
||||
}
|
||||
|
||||
// https://www.w3.org/TR/IndexedDB-2/#enumdef-idbtransactionmode
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
#[derive(Debug, Deserialize, Eq, PartialEq, Serialize)]
|
||||
pub enum IndexedDBTxnMode {
|
||||
Readonly,
|
||||
Readwrite,
|
||||
|
@ -30,7 +68,7 @@ pub enum IndexedDBKeyType {
|
|||
|
||||
/// <https://www.w3.org/TR/IndexedDB-2/#compare-two-keys>
|
||||
impl PartialOrd for IndexedDBKeyType {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
// 1. Let ta be the type of a.
|
||||
// 2. Let tb be the type of b.
|
||||
|
||||
|
@ -42,7 +80,7 @@ impl PartialOrd for IndexedDBKeyType {
|
|||
IndexedDBKeyType::Date(_) |
|
||||
IndexedDBKeyType::Number(_) |
|
||||
IndexedDBKeyType::String(_),
|
||||
) => Some(std::cmp::Ordering::Greater),
|
||||
) => Some(Ordering::Greater),
|
||||
// Step 4: If tb is array and ta is binary, string, date or number, return -1.
|
||||
(
|
||||
IndexedDBKeyType::Binary(_) |
|
||||
|
@ -50,39 +88,35 @@ impl PartialOrd for IndexedDBKeyType {
|
|||
IndexedDBKeyType::Number(_) |
|
||||
IndexedDBKeyType::String(_),
|
||||
IndexedDBKeyType::Array(_),
|
||||
) => Some(std::cmp::Ordering::Less),
|
||||
) => Some(Ordering::Less),
|
||||
// Step 5: If ta is binary and tb is string, date or number, return 1.
|
||||
(
|
||||
IndexedDBKeyType::Binary(_),
|
||||
IndexedDBKeyType::String(_) |
|
||||
IndexedDBKeyType::Date(_) |
|
||||
IndexedDBKeyType::Number(_),
|
||||
) => Some(std::cmp::Ordering::Greater),
|
||||
) => Some(Ordering::Greater),
|
||||
// Step 6: If tb is binary and ta is string, date or number, return -1.
|
||||
(
|
||||
IndexedDBKeyType::String(_) |
|
||||
IndexedDBKeyType::Date(_) |
|
||||
IndexedDBKeyType::Number(_),
|
||||
IndexedDBKeyType::Binary(_),
|
||||
) => Some(std::cmp::Ordering::Less),
|
||||
) => Some(Ordering::Less),
|
||||
// Step 7: If ta is string and tb is date or number, return 1.
|
||||
(
|
||||
IndexedDBKeyType::String(_),
|
||||
IndexedDBKeyType::Date(_) | IndexedDBKeyType::Number(_),
|
||||
) => Some(std::cmp::Ordering::Greater),
|
||||
) => Some(Ordering::Greater),
|
||||
// Step 8: If tb is string and ta is date or number, return -1.
|
||||
(
|
||||
IndexedDBKeyType::Date(_) | IndexedDBKeyType::Number(_),
|
||||
IndexedDBKeyType::String(_),
|
||||
) => Some(std::cmp::Ordering::Less),
|
||||
) => Some(Ordering::Less),
|
||||
// Step 9: If ta is date and tb is number, return 1.
|
||||
(IndexedDBKeyType::Date(_), IndexedDBKeyType::Number(_)) => {
|
||||
Some(std::cmp::Ordering::Greater)
|
||||
},
|
||||
(IndexedDBKeyType::Date(_), IndexedDBKeyType::Number(_)) => Some(Ordering::Greater),
|
||||
// Step 10: If tb is date and ta is number, return -1.
|
||||
(IndexedDBKeyType::Number(_), IndexedDBKeyType::Date(_)) => {
|
||||
Some(std::cmp::Ordering::Less)
|
||||
},
|
||||
(IndexedDBKeyType::Number(_), IndexedDBKeyType::Date(_)) => Some(Ordering::Less),
|
||||
// Step 11 skipped
|
||||
// TODO: Likely a tiny bit wrong (use js number comparison)
|
||||
(IndexedDBKeyType::Number(a), IndexedDBKeyType::Number(b)) => a.partial_cmp(b),
|
||||
|
@ -90,7 +124,6 @@ impl PartialOrd for IndexedDBKeyType {
|
|||
(IndexedDBKeyType::String(a), IndexedDBKeyType::String(b)) => a.partial_cmp(b),
|
||||
// TODO: Likely a little wrong (use js binary comparison)
|
||||
(IndexedDBKeyType::Binary(a), IndexedDBKeyType::Binary(b)) => a.partial_cmp(b),
|
||||
// TODO: Very wrong (convert to Date and compare)
|
||||
(IndexedDBKeyType::Date(a), IndexedDBKeyType::Date(b)) => a.partial_cmp(b),
|
||||
// TODO: Probably also wrong (the items in a and b should be compared, double check against the spec)
|
||||
(IndexedDBKeyType::Array(a), IndexedDBKeyType::Array(b)) => a.partial_cmp(b),
|
||||
|
@ -103,8 +136,8 @@ impl PartialEq for IndexedDBKeyType {
|
|||
fn eq(&self, other: &Self) -> bool {
|
||||
let cmp = self.partial_cmp(other);
|
||||
match cmp {
|
||||
Some(std::cmp::Ordering::Equal) => true,
|
||||
Some(std::cmp::Ordering::Less) | Some(std::cmp::Ordering::Greater) => false,
|
||||
Some(Ordering::Equal) => true,
|
||||
Some(Ordering::Less) | Some(Ordering::Greater) => false,
|
||||
None => {
|
||||
// If we can't compare the two keys, we assume they are not equal.
|
||||
false
|
||||
|
@ -215,33 +248,47 @@ fn test_as_singleton() {
|
|||
assert!(full_range.as_singleton().is_none());
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
|
||||
pub enum PutItemResult {
|
||||
Success,
|
||||
CannotOverwrite,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub enum AsyncReadOnlyOperation {
|
||||
/// Gets the value associated with the given key in the associated idb data
|
||||
GetItem(
|
||||
IndexedDBKeyType, // Key
|
||||
),
|
||||
GetKey {
|
||||
sender: IpcSender<BackendResult<Option<IndexedDBKeyType>>>,
|
||||
key_range: IndexedDBKeyRange,
|
||||
},
|
||||
GetItem {
|
||||
sender: IpcSender<BackendResult<Option<Vec<u8>>>>,
|
||||
key_range: IndexedDBKeyRange,
|
||||
},
|
||||
|
||||
Count(
|
||||
IndexedDBKeyType, // Key
|
||||
),
|
||||
Count {
|
||||
sender: IpcSender<BackendResult<u64>>,
|
||||
key_range: IndexedDBKeyRange,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub enum AsyncReadWriteOperation {
|
||||
/// Sets the value of the given key in the associated idb data
|
||||
PutItem(
|
||||
IndexedDBKeyType, // Key
|
||||
Vec<u8>, // Value
|
||||
bool, // Should overwrite
|
||||
),
|
||||
PutItem {
|
||||
sender: IpcSender<BackendResult<PutItemResult>>,
|
||||
key: IndexedDBKeyType,
|
||||
value: Vec<u8>,
|
||||
should_overwrite: bool,
|
||||
},
|
||||
|
||||
/// Removes the key/value pair for the given key in the associated idb data
|
||||
RemoveItem(
|
||||
IndexedDBKeyType, // Key
|
||||
),
|
||||
RemoveItem {
|
||||
sender: IpcSender<BackendResult<()>>,
|
||||
key: IndexedDBKeyType,
|
||||
},
|
||||
/// Clears all key/value pairs in the associated idb data
|
||||
Clear,
|
||||
Clear(IpcSender<BackendResult<()>>),
|
||||
}
|
||||
|
||||
/// Operations that are not executed instantly, but rather added to a
|
||||
|
@ -252,11 +299,17 @@ pub enum AsyncOperation {
|
|||
ReadWrite(AsyncReadWriteOperation),
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize)]
|
||||
pub enum CreateObjectResult {
|
||||
Created,
|
||||
AlreadyExists,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub enum SyncOperation {
|
||||
/// Upgrades the version of the database
|
||||
UpgradeVersion(
|
||||
IpcSender<Result<u64, ()>>,
|
||||
IpcSender<BackendResult<u64>>,
|
||||
ImmutableOrigin,
|
||||
String, // Database
|
||||
u64, // Serial number for the transaction
|
||||
|
@ -264,7 +317,15 @@ pub enum SyncOperation {
|
|||
),
|
||||
/// Checks if an object store has a key generator, used in e.g. Put
|
||||
HasKeyGenerator(
|
||||
IpcSender<bool>,
|
||||
IpcSender<BackendResult<bool>>,
|
||||
ImmutableOrigin,
|
||||
String, // Database
|
||||
String, // Store
|
||||
),
|
||||
/// Gets an object store's key path
|
||||
KeyPath(
|
||||
/// Object stores do not have to have key paths
|
||||
IpcSender<BackendResult<Option<KeyPath>>>,
|
||||
ImmutableOrigin,
|
||||
String, // Database
|
||||
String, // Store
|
||||
|
@ -272,30 +333,51 @@ pub enum SyncOperation {
|
|||
|
||||
/// Commits changes of a transaction to the database
|
||||
Commit(
|
||||
IpcSender<Result<(), ()>>,
|
||||
IpcSender<BackendResult<()>>,
|
||||
ImmutableOrigin,
|
||||
String, // Database
|
||||
u64, // Transaction serial number
|
||||
),
|
||||
|
||||
/// Creates a new store for the database
|
||||
CreateObjectStore(
|
||||
IpcSender<Result<(), ()>>,
|
||||
/// Creates a new index for the database
|
||||
CreateIndex(
|
||||
IpcSender<BackendResult<CreateObjectResult>>,
|
||||
ImmutableOrigin,
|
||||
String, // Database
|
||||
String, // Store
|
||||
String, // Index name
|
||||
KeyPath, // key path
|
||||
bool, // unique flag
|
||||
bool, // multientry flag
|
||||
),
|
||||
/// Delete an index
|
||||
DeleteIndex(
|
||||
IpcSender<BackendResult<()>>,
|
||||
ImmutableOrigin,
|
||||
String, // Database
|
||||
String, // Store
|
||||
String, // Index name
|
||||
),
|
||||
|
||||
/// Creates a new store for the database
|
||||
CreateObjectStore(
|
||||
IpcSender<BackendResult<CreateObjectResult>>,
|
||||
ImmutableOrigin,
|
||||
String, // Database
|
||||
String, // Store
|
||||
Option<KeyPath>, // Key Path
|
||||
bool,
|
||||
),
|
||||
|
||||
DeleteObjectStore(
|
||||
IpcSender<Result<(), ()>>,
|
||||
IpcSender<BackendResult<()>>,
|
||||
ImmutableOrigin,
|
||||
String, // Database
|
||||
String, // Store
|
||||
),
|
||||
|
||||
CloseDatabase(
|
||||
IpcSender<Result<(), ()>>,
|
||||
IpcSender<BackendResult<()>>,
|
||||
ImmutableOrigin,
|
||||
String, // Database
|
||||
),
|
||||
|
@ -309,7 +391,7 @@ pub enum SyncOperation {
|
|||
|
||||
/// Deletes the database
|
||||
DeleteDatabase(
|
||||
IpcSender<Result<(), ()>>,
|
||||
IpcSender<BackendResult<()>>,
|
||||
ImmutableOrigin,
|
||||
String, // Database
|
||||
),
|
||||
|
@ -325,7 +407,7 @@ pub enum SyncOperation {
|
|||
/// Starts executing the requests of a transaction
|
||||
/// <https://www.w3.org/TR/IndexedDB-2/#transaction-start>
|
||||
StartTransaction(
|
||||
IpcSender<Result<(), ()>>,
|
||||
IpcSender<BackendResult<()>>,
|
||||
ImmutableOrigin,
|
||||
String, // Database
|
||||
u64, // The serial number of the mutating transaction
|
||||
|
@ -333,7 +415,7 @@ pub enum SyncOperation {
|
|||
|
||||
/// Returns the version of the database
|
||||
Version(
|
||||
IpcSender<u64>,
|
||||
IpcSender<BackendResult<u64>>,
|
||||
ImmutableOrigin,
|
||||
String, // Database
|
||||
),
|
||||
|
@ -342,20 +424,10 @@ pub enum SyncOperation {
|
|||
Exit(IpcSender<()>),
|
||||
}
|
||||
|
||||
/// The set of all kinds of results that can be returned from async operations.
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub enum IdbResult {
|
||||
/// The key used to perform an async operation.
|
||||
Key(IndexedDBKeyType),
|
||||
/// A structured clone of a value retrieved from an object store.
|
||||
Data(Vec<u8>),
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub enum IndexedDBThreadMsg {
|
||||
Sync(SyncOperation),
|
||||
Async(
|
||||
IpcSender<Result<Option<IdbResult>, ()>>, // Sender to send the result of the async operation
|
||||
ImmutableOrigin,
|
||||
String, // Database
|
||||
String, // ObjectStore
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue