Switch indexeddb backend to sqlite and improve IPC messaging (#38187)

- Use sqlite instead of heed. (one indexed database = one sqlite
database)
- Implement the backend for indexes
- Use keyranges where needed (as specified by the spec)
- Implement `getKey`
- Fix channel error messaging (led to a bunch of changes to how async
requests are handled)

Note: `components/net/indexeddb/engines/sqlite/serialize.rs` is unused;
I can delete it if needed.

Testing: Switching to sqlite eliminated many panics (exposing some new
failures).
Fixes: #38040

---------

Signed-off-by: Ashwin Naren <arihant2math@gmail.com>
This commit is contained in:
Ashwin Naren 2025-08-16 00:27:17 -07:00 committed by GitHub
parent f4bbdf8010
commit fc3feceee5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
59 changed files with 2002 additions and 818 deletions

View file

@ -36,7 +36,6 @@ futures-core = { version = "0.3.30", default-features = false }
futures-util = { version = "0.3.30", default-features = false }
generic-array = "0.14"
headers = { workspace = true }
heed = "0.20"
http = { workspace = true }
http-body-util = { workspace = true }
hyper = { workspace = true, features = ["client", "http1", "http2"] }
@ -60,6 +59,8 @@ rustls = { workspace = true }
rustls-pemfile = { workspace = true }
rustls-pki-types = { workspace = true }
resvg = { workspace = true }
rusqlite = { version = "0.37", features = ["bundled"] }
sea-query = { version = "0.32", default-features = false, features = ["derive", "backend-sqlite"] }
serde = { workspace = true }
serde_json = { workspace = true }
servo_arc = { workspace = true }
@ -86,6 +87,7 @@ futures = { version = "0.3", features = ["compat"] }
hyper = { workspace = true, features = ["full"] }
hyper-util = { workspace = true, features = ["server-graceful"] }
rustls = { workspace = true, features = ["aws-lc-rs"] }
tempfile = "3"
[[test]]
name = "main"

View file

@ -1,304 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::{Arc, RwLock};
use heed::types::*;
use heed::{Database, Env, EnvOpenOptions};
use log::warn;
use net_traits::indexeddb_thread::{
AsyncOperation, AsyncReadOnlyOperation, AsyncReadWriteOperation, IdbResult, IndexedDBTxnMode,
};
use tokio::sync::oneshot;
use super::{KvsEngine, KvsTransaction, SanitizedName};
use crate::resource_thread::CoreResourceThreadPool;
type HeedDatabase = Database<Bytes, Bytes>;
// A simple store that also has a key generator that can be used if no key
// is provided for the stored objects
#[derive(Clone)]
struct Store {
inner: HeedDatabase,
// https://www.w3.org/TR/IndexedDB-2/#key-generator
key_generator: Option<u64>,
}
pub struct HeedEngine {
heed_env: Arc<Env>,
open_stores: Arc<RwLock<HashMap<SanitizedName, Store>>>,
read_pool: Arc<CoreResourceThreadPool>,
write_pool: Arc<CoreResourceThreadPool>,
}
impl HeedEngine {
pub fn new(
base_dir: &Path,
db_file_name: &Path,
thread_pool: Arc<CoreResourceThreadPool>,
) -> Self {
let mut db_dir = PathBuf::new();
db_dir.push(base_dir);
db_dir.push(db_file_name);
std::fs::create_dir_all(&db_dir).expect("Could not create OS directory for idb");
// FIXME:(arihant2math) gracefully handle errors like hitting max dbs
#[allow(unsafe_code)]
let env = unsafe {
EnvOpenOptions::new()
.max_dbs(1024)
.open(db_dir)
.expect("Failed to open db_dir")
};
Self {
heed_env: Arc::new(env),
open_stores: Arc::new(RwLock::new(HashMap::new())),
read_pool: thread_pool.clone(),
write_pool: thread_pool,
}
}
}
impl KvsEngine for HeedEngine {
type Error = heed::Error;
fn create_store(&self, store_name: SanitizedName, auto_increment: bool) -> heed::Result<()> {
let mut write_txn = self.heed_env.write_txn()?;
let _ = self.heed_env.clear_stale_readers();
let new_store: HeedDatabase = self
.heed_env
.create_database(&mut write_txn, Some(&*store_name.to_string()))?;
write_txn.commit()?;
let key_generator = { if auto_increment { Some(0) } else { None } };
let store = Store {
inner: new_store,
key_generator,
};
self.open_stores
.write()
.expect("Could not acquire lock on stores")
.insert(store_name, store);
Ok(())
}
fn delete_store(&self, store_name: SanitizedName) -> heed::Result<()> {
// TODO: Actually delete store instead of just clearing it
let mut write_txn = self.heed_env.write_txn()?;
let store: HeedDatabase = self
.heed_env
.create_database(&mut write_txn, Some(&*store_name.to_string()))?;
store.clear(&mut write_txn)?;
write_txn.commit()?;
let mut open_stores = self.open_stores.write().unwrap();
open_stores.retain(|key, _| key != &store_name);
Ok(())
}
fn close_store(&self, store_name: SanitizedName) -> heed::Result<()> {
// FIXME: (arihant2math) unused
// FIXME:(arihant2math) return error if no store ...
let mut open_stores = self.open_stores.write().unwrap();
open_stores.retain(|key, _| key != &store_name);
Ok(())
}
// Starts a transaction, processes all operations for that transaction,
// and commits the changes.
fn process_transaction(
&self,
transaction: KvsTransaction,
) -> oneshot::Receiver<Option<Vec<u8>>> {
// This executes in a thread pool, and `readwrite` transactions
// will block their thread if the writer is occupied, so we can
// probably do some smart things here in order to optimize.
// Queueing 8 writers will for example block 7 threads,
// so write operations are reserved for just one thread,
// so that the rest of the threads can work in parallel with read txns.
let heed_env = self.heed_env.clone();
let stores = self.open_stores.clone();
let (tx, rx) = oneshot::channel();
if let IndexedDBTxnMode::Readonly = transaction.mode {
self.read_pool.spawn(move || {
let env = heed_env;
let rtxn = env.read_txn().expect("Could not create idb store reader");
let mut results = vec![];
for request in transaction.requests {
match request.operation {
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetItem(key)) => {
let key: Vec<u8> = bincode::serialize(&key).unwrap();
let stores = stores
.read()
.expect("Could not acquire read lock on stores");
let store = stores
.get(&request.store_name)
.expect("Could not get store");
let result = store.inner.get(&rtxn, &key).expect("Could not get item");
if let Some(blob) = result {
results
.push((request.sender, Some(IdbResult::Data(blob.to_vec()))));
} else {
results.push((request.sender, None));
}
},
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::Count(key)) => {
let _key: Vec<u8> = bincode::serialize(&key).unwrap();
let stores = stores
.read()
.expect("Could not acquire read lock on stores");
let _store = stores
.get(&request.store_name)
.expect("Could not get store");
// FIXME:(arihant2math) Return count with sender
},
AsyncOperation::ReadWrite(..) => {
// We cannot reach this, as checks are made earlier so that
// no modifying requests are executed on readonly transactions
unreachable!(
"Cannot execute modifying request with readonly transactions"
);
},
}
}
if tx.send(None).is_err() {
warn!("IDBTransaction's execution channel is dropped");
};
if let Err(e) = rtxn.commit() {
warn!("Error committing transaction: {:?}", e);
for (sender, _) in results {
let _ = sender.send(Err(()));
}
} else {
for (sender, result) in results {
let _ = sender.send(Ok(result));
}
}
});
} else {
self.write_pool.spawn(move || {
// Acquiring a writer will block the thread if another `readwrite` transaction is active
let env = heed_env;
let mut wtxn = env.write_txn().expect("Could not create idb store writer");
let mut results = vec![];
for request in transaction.requests {
match request.operation {
AsyncOperation::ReadWrite(AsyncReadWriteOperation::PutItem(
key,
value,
overwrite,
)) => {
let serialized_key: Vec<u8> = bincode::serialize(&key).unwrap();
let stores = stores
.write()
.expect("Could not acquire write lock on stores");
let store = stores
.get(&request.store_name)
.expect("Could not get store");
if overwrite ||
store
.inner
.get(&wtxn, &serialized_key)
.expect("Could not get item")
.is_none()
{
let result = store
.inner
.put(&mut wtxn, &serialized_key, &value)
.ok()
.and(Some(IdbResult::Key(key)));
results.push((request.sender, result));
} else {
results.push((request.sender, None));
}
},
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetItem(key)) => {
let key: Vec<u8> = bincode::serialize(&key).unwrap();
let stores = stores
.read()
.expect("Could not acquire write lock on stores");
let store = stores
.get(&request.store_name)
.expect("Could not get store");
let result = store.inner.get(&wtxn, &key).expect("Could not get item");
results.push((
request.sender,
result.map(|blob| IdbResult::Data(blob.to_vec())),
));
},
AsyncOperation::ReadWrite(AsyncReadWriteOperation::RemoveItem(key)) => {
let serialized_key: Vec<u8> = bincode::serialize(&key).unwrap();
let stores = stores
.write()
.expect("Could not acquire write lock on stores");
let store = stores
.get(&request.store_name)
.expect("Could not get store");
let result = store
.inner
.delete(&mut wtxn, &serialized_key)
.ok()
.and(Some(IdbResult::Key(key)));
results.push((request.sender, result));
},
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::Count(key)) => {
let _key: Vec<u8> = bincode::serialize(&key).unwrap();
let stores = stores
.read()
.expect("Could not acquire read lock on stores");
let _store = stores
.get(&request.store_name)
.expect("Could not get store");
// FIXME:(arihant2math) Return count with sender
},
AsyncOperation::ReadWrite(AsyncReadWriteOperation::Clear) => {
let stores = stores
.write()
.expect("Could not acquire write lock on stores");
let store = stores
.get(&request.store_name)
.expect("Could not get store");
// FIXME:(arihant2math) Error handling
let _ = store.inner.clear(&mut wtxn);
},
}
}
if let Err(e) = wtxn.commit() {
warn!("Error committing to database: {:?}", e);
for (sender, _) in results {
let _ = sender.send(Err(()));
}
} else {
for (sender, result) in results {
let _ = sender.send(Ok(result));
}
}
})
}
rx
}
fn has_key_generator(&self, store_name: SanitizedName) -> bool {
let has_generator = self
.open_stores
.read()
.expect("Could not acquire read lock on stores")
.get(&store_name)
.expect("Store not found")
.key_generator
.is_some();
has_generator
}
}

View file

@ -4,15 +4,14 @@
use std::collections::VecDeque;
use ipc_channel::ipc::IpcSender;
use net_traits::indexeddb_thread::{AsyncOperation, IdbResult, IndexedDBTxnMode};
use net_traits::indexeddb_thread::{AsyncOperation, CreateObjectResult, IndexedDBTxnMode, KeyPath};
use tokio::sync::oneshot;
pub use self::heed::HeedEngine;
pub use self::sqlite::SqliteEngine;
mod heed;
mod sqlite;
#[derive(Eq, Hash, PartialEq)]
#[derive(Clone, Eq, Hash, PartialEq)]
pub struct SanitizedName {
name: String,
}
@ -46,34 +45,57 @@ impl std::fmt::Display for SanitizedName {
}
pub struct KvsOperation {
pub sender: IpcSender<Result<Option<IdbResult>, ()>>,
pub store_name: SanitizedName,
pub operation: AsyncOperation,
}
pub struct KvsTransaction {
// Mode could be used by a more optimal implementation of transactions
// that has different allocated threadpools for reading and writing
#[allow(unused)]
pub mode: IndexedDBTxnMode,
pub requests: VecDeque<KvsOperation>,
}
pub trait KvsEngine {
type Error;
type Error: std::error::Error;
fn create_store(
&self,
store_name: SanitizedName,
key_path: Option<KeyPath>,
auto_increment: bool,
) -> Result<(), Self::Error>;
) -> Result<CreateObjectResult, Self::Error>;
fn delete_store(&self, store_name: SanitizedName) -> Result<(), Self::Error>;
#[expect(dead_code)]
fn close_store(&self, store_name: SanitizedName) -> Result<(), Self::Error>;
fn delete_database(self) -> Result<(), Self::Error>;
fn process_transaction(
&self,
transaction: KvsTransaction,
) -> oneshot::Receiver<Option<Vec<u8>>>;
fn has_key_generator(&self, store_name: SanitizedName) -> bool;
fn key_path(&self, store_name: SanitizedName) -> Option<KeyPath>;
fn create_index(
&self,
store_name: SanitizedName,
index_name: String,
key_path: KeyPath,
unique: bool,
multi_entry: bool,
) -> Result<CreateObjectResult, Self::Error>;
fn delete_index(
&self,
store_name: SanitizedName,
index_name: String,
) -> Result<(), Self::Error>;
fn version(&self) -> Result<u64, Self::Error>;
fn set_version(&self, version: u64) -> Result<(), Self::Error>;
}

View file

@ -0,0 +1,713 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use std::path::{Path, PathBuf};
use std::sync::Arc;
use ipc_channel::ipc::IpcSender;
use log::{error, info};
use net_traits::indexeddb_thread::{
AsyncOperation, AsyncReadOnlyOperation, AsyncReadWriteOperation, BackendError, BackendResult,
CreateObjectResult, IndexedDBKeyRange, IndexedDBTxnMode, KeyPath, PutItemResult,
};
use rusqlite::{Connection, Error, OptionalExtension, params};
use sea_query::{Condition, Expr, ExprTrait, IntoCondition, SqliteQueryBuilder};
use serde::Serialize;
use tokio::sync::oneshot;
use crate::indexeddb::engines::{KvsEngine, KvsTransaction, SanitizedName};
use crate::indexeddb::idb_thread::IndexedDBDescription;
use crate::resource_thread::CoreResourceThreadPool;
mod create;
mod database_model;
mod object_data_model;
mod object_store_index_model;
mod object_store_model;
// These pragmas need to be set once
const DB_INIT_PRAGMAS: [&str; 2] = ["PRAGMA journal_mode = WAL;", "PRAGMA encoding = 'UTF-16';"];
// These pragmas need to be run once per connection.
const DB_PRAGMAS: [&str; 4] = [
"PRAGMA synchronous = NORMAL;",
"PRAGMA journal_size_limit = 67108864 -- 64 megabytes;",
"PRAGMA mmap_size = 67108864 -- 64 megabytes;",
"PRAGMA cache_size = 2000;",
];
fn range_to_query(range: IndexedDBKeyRange) -> Condition {
// Special case for optimization
if let Some(singleton) = range.as_singleton() {
let encoded = bincode::serialize(singleton).unwrap();
return Expr::column(object_data_model::Column::Data)
.eq(encoded)
.into_condition();
}
let mut parts = vec![];
if let Some(upper) = range.upper.as_ref() {
let upper_bytes = bincode::serialize(upper).unwrap();
let query = if range.upper_open {
Expr::column(object_data_model::Column::Data).lt(upper_bytes)
} else {
Expr::column(object_data_model::Column::Data).lte(upper_bytes)
};
parts.push(query);
}
if let Some(lower) = range.lower.as_ref() {
let lower_bytes = bincode::serialize(lower).unwrap();
let query = if range.upper_open {
Expr::column(object_data_model::Column::Data).gt(lower_bytes)
} else {
Expr::column(object_data_model::Column::Data).gte(lower_bytes)
};
parts.push(query);
}
let mut condition = Condition::all();
for part in parts {
condition = condition.add(part);
}
condition
}
pub struct SqliteEngine {
db_path: PathBuf,
connection: Connection,
read_pool: Arc<CoreResourceThreadPool>,
write_pool: Arc<CoreResourceThreadPool>,
}
impl SqliteEngine {
// TODO: intake dual pools
pub fn new(
base_dir: &Path,
db_info: &IndexedDBDescription,
version: u64,
pool: Arc<CoreResourceThreadPool>,
) -> Result<Self, Error> {
let mut db_path = PathBuf::new();
db_path.push(base_dir);
db_path.push(db_info.as_path());
let db_parent = db_path.clone();
db_path.push("db.sqlite");
if !db_path.exists() {
std::fs::create_dir_all(db_parent).unwrap();
std::fs::File::create(&db_path).unwrap();
}
let connection = Self::init_db(&db_path, db_info, version)?;
for stmt in DB_PRAGMAS {
// TODO: Handle errors properly
let _ = connection.execute(stmt, ());
}
Ok(Self {
connection,
db_path,
read_pool: pool.clone(),
write_pool: pool,
})
}
fn init_db(
path: &Path,
db_info: &IndexedDBDescription,
version: u64,
) -> Result<Connection, Error> {
let connection = Connection::open(path)?;
if connection.table_exists(None, "database")? {
// Database already exists, no need to initialize
return Ok(connection);
}
info!("Initializing indexeddb database at {:?}", path);
for stmt in DB_INIT_PRAGMAS {
// FIXME(arihant2math): this fails occasionally
let _ = connection.execute(stmt, ());
}
create::create_tables(&connection)?;
connection.execute(
"INSERT INTO database (name, origin, version) VALUES (?, ?, ?)",
params![
db_info.name.to_owned(),
db_info.origin.to_owned().ascii_serialization(),
i64::from_ne_bytes(version.to_ne_bytes())
],
)?;
Ok(connection)
}
fn get(
connection: &Connection,
store: object_store_model::Model,
key_range: IndexedDBKeyRange,
) -> Result<Option<object_data_model::Model>, Error> {
let query = range_to_query(key_range);
let stmt = sea_query::Query::select()
.from(object_data_model::Column::Table)
.and_where(query.and(Expr::col(object_data_model::Column::ObjectStoreId).is(store.id)))
.to_owned();
connection
.prepare(&stmt.build(SqliteQueryBuilder).0)?
.query_one((), |row| object_data_model::Model::try_from(row))
.optional()
}
fn get_key(
connection: &Connection,
store: object_store_model::Model,
key_range: IndexedDBKeyRange,
) -> Result<Option<Vec<u8>>, Error> {
Self::get(connection, store, key_range).map(|opt| opt.map(|model| model.key))
}
fn get_item(
connection: &Connection,
store: object_store_model::Model,
key_range: IndexedDBKeyRange,
) -> Result<Option<Vec<u8>>, Error> {
Self::get(connection, store, key_range).map(|opt| opt.map(|model| model.data))
}
fn put_item(
connection: &Connection,
store: object_store_model::Model,
serialized_key: Vec<u8>,
value: Vec<u8>,
should_overwrite: bool,
) -> Result<PutItemResult, Error> {
let existing_item = connection
.prepare("SELECT * FROM object_data WHERE key = ? AND object_store_id = ?")
.and_then(|mut stmt| {
stmt.query_row(params![serialized_key, store.id], |row| {
object_data_model::Model::try_from(row)
})
.optional()
})?;
if should_overwrite || existing_item.is_none() {
connection.execute(
"INSERT INTO object_data (object_store_id, key, data) VALUES (?, ?, ?)",
params![store.id, serialized_key, value],
)?;
Ok(PutItemResult::Success)
} else {
Ok(PutItemResult::CannotOverwrite)
}
}
fn delete_item(
connection: &Connection,
store: object_store_model::Model,
serialized_key: Vec<u8>,
) -> Result<(), Error> {
connection.execute(
"DELETE FROM object_data WHERE key = ? AND object_store_id = ?",
params![serialized_key, store.id],
)?;
Ok(())
}
fn clear(connection: &Connection, store: object_store_model::Model) -> Result<(), Error> {
connection.execute(
"DELETE FROM object_data WHERE object_store_id = ?",
params![store.id],
)?;
Ok(())
}
fn count(
connection: &Connection,
store: object_store_model::Model,
key_range: IndexedDBKeyRange,
) -> Result<usize, Error> {
let query = range_to_query(key_range);
let count = sea_query::Query::select()
.expr(Expr::col(object_data_model::Column::Key).count())
.from(object_data_model::Column::Table)
.and_where(query.and(Expr::col(object_data_model::Column::ObjectStoreId).is(store.id)))
.to_owned();
connection
.prepare(&count.build(SqliteQueryBuilder).0)?
.query_row((), |row| row.get(0))
.map(|count: i64| count as usize)
}
}
impl KvsEngine for SqliteEngine {
type Error = Error;
fn create_store(
&self,
store_name: SanitizedName,
key_path: Option<KeyPath>,
auto_increment: bool,
) -> Result<CreateObjectResult, Self::Error> {
let mut stmt = self
.connection
.prepare("SELECT 1 FROM object_store WHERE name = ?")?;
if stmt.exists(params![store_name.to_string()])? {
// Store already exists
return Ok(CreateObjectResult::AlreadyExists);
}
self.connection.execute(
"INSERT INTO object_store (name, key_path, auto_increment) VALUES (?, ?, ?)",
params![
store_name.to_string(),
key_path.map(|v| bincode::serialize(&v).unwrap()),
auto_increment
],
)?;
Ok(CreateObjectResult::Created)
}
fn delete_store(&self, store_name: SanitizedName) -> Result<(), Self::Error> {
let result = self.connection.execute(
"DELETE FROM object_store WHERE name = ?",
params![store_name.to_string()],
)?;
if result == 0 {
Err(Error::QueryReturnedNoRows)
} else if result > 1 {
Err(Error::QueryReturnedMoreThanOneRow)
} else {
Ok(())
}
}
fn close_store(&self, _store_name: SanitizedName) -> Result<(), Self::Error> {
// TODO: do something
Ok(())
}
fn delete_database(self) -> Result<(), Self::Error> {
// attempt to close the connection first
let _ = self.connection.close();
if self.db_path.exists() {
if let Err(e) = std::fs::remove_dir_all(self.db_path.parent().unwrap()) {
error!("Failed to delete database: {:?}", e);
}
}
Ok(())
}
fn process_transaction(
&self,
transaction: KvsTransaction,
) -> oneshot::Receiver<Option<Vec<u8>>> {
let (tx, rx) = oneshot::channel();
let spawning_pool = if transaction.mode == IndexedDBTxnMode::Readonly {
self.read_pool.clone()
} else {
self.write_pool.clone()
};
let path = self.db_path.clone();
spawning_pool.spawn(move || {
let connection = Connection::open(path).unwrap();
for request in transaction.requests {
let object_store = connection
.prepare("SELECT 1 FROM object_store WHERE name = ?")
.and_then(|mut stmt| {
stmt.query_row(params![request.store_name.to_string()], |row| {
object_store_model::Model::try_from(row)
})
.optional()
});
fn process_object_store<T>(
object_store: Result<Option<object_store_model::Model>, Error>,
sender: &IpcSender<BackendResult<T>>,
) -> Result<object_store_model::Model, ()>
where
T: Serialize,
{
match object_store {
Ok(Some(store)) => Ok(store),
Ok(None) => {
let _ = sender.send(Err(BackendError::StoreNotFound));
Err(())
},
Err(e) => {
let _ = sender.send(Err(BackendError::DbErr(format!("{:?}", e))));
Err(())
},
}
}
match request.operation {
AsyncOperation::ReadWrite(AsyncReadWriteOperation::PutItem {
sender,
key,
value,
should_overwrite,
}) => {
let Ok(object_store) = process_object_store(object_store, &sender) else {
continue;
};
let serialized_key: Vec<u8> = bincode::serialize(&key).unwrap();
let _ = sender.send(
Self::put_item(
&connection,
object_store,
serialized_key,
value,
should_overwrite,
)
.map_err(|e| BackendError::DbErr(format!("{:?}", e))),
);
},
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetItem {
sender,
key_range,
}) => {
let Ok(object_store) = process_object_store(object_store, &sender) else {
continue;
};
let _ = sender.send(
Self::get_item(&connection, object_store, key_range)
.map_err(|e| BackendError::DbErr(format!("{:?}", e))),
);
},
AsyncOperation::ReadWrite(AsyncReadWriteOperation::RemoveItem {
sender,
key,
}) => {
let Ok(object_store) = process_object_store(object_store, &sender) else {
continue;
};
let serialized_key: Vec<u8> = bincode::serialize(&key).unwrap();
let _ = sender.send(
Self::delete_item(&connection, object_store, serialized_key)
.map_err(|e| BackendError::DbErr(format!("{:?}", e))),
);
},
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::Count {
sender,
key_range,
}) => {
let Ok(object_store) = process_object_store(object_store, &sender) else {
continue;
};
let _ = sender.send(
Self::count(&connection, object_store, key_range)
.map(|r| r as u64)
.map_err(|e| BackendError::DbErr(format!("{:?}", e))),
);
},
AsyncOperation::ReadWrite(AsyncReadWriteOperation::Clear(sender)) => {
let Ok(object_store) = process_object_store(object_store, &sender) else {
continue;
};
let _ = sender.send(
Self::clear(&connection, object_store)
.map_err(|e| BackendError::DbErr(format!("{:?}", e))),
);
},
AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetKey {
sender,
key_range,
}) => {
let Ok(object_store) = process_object_store(object_store, &sender) else {
continue;
};
let _ = sender.send(
Self::get_key(&connection, object_store, key_range)
.map(|key| key.map(|k| bincode::deserialize(&k).unwrap()))
.map_err(|e| BackendError::DbErr(format!("{:?}", e))),
);
},
}
}
let _ = tx.send(None);
});
rx
}
// TODO: we should be able to error out here, maybe change the trait definition?
fn has_key_generator(&self, store_name: SanitizedName) -> bool {
self.connection
.prepare("SELECT * FROM object_store WHERE name = ?")
.and_then(|mut stmt| {
stmt.query_row(params![store_name.to_string()], |r| {
let object_store = object_store_model::Model::try_from(r).unwrap();
Ok(object_store.auto_increment)
})
})
.optional()
.unwrap()
// TODO: Wrong (change trait definition for this function)
.unwrap_or_default()
}
fn key_path(&self, store_name: SanitizedName) -> Option<KeyPath> {
self.connection
.prepare("SELECT * FROM object_store WHERE name = ?")
.and_then(|mut stmt| {
stmt.query_row(params![store_name.to_string()], |r| {
let object_store = object_store_model::Model::try_from(r).unwrap();
Ok(object_store
.key_path
.map(|key_path| bincode::deserialize(&key_path).unwrap()))
})
})
.optional()
.unwrap()
// TODO: Wrong, same issues as has_key_generator
.unwrap_or_default()
}
fn create_index(
&self,
store_name: SanitizedName,
index_name: String,
key_path: KeyPath,
unique: bool,
multi_entry: bool,
) -> Result<CreateObjectResult, Self::Error> {
let object_store = self.connection.query_row(
"SELECT * FROM object_store WHERE name = ?",
params![store_name.to_string()],
|row| object_store_model::Model::try_from(row),
)?;
let index_exists: bool = self.connection.query_row(
"SELECT EXISTS(SELECT 1 FROM object_store_index WHERE name = ? AND object_store_id = ?)",
params![index_name.to_string(), object_store.id],
|row| row.get(0),
)?;
if index_exists {
return Ok(CreateObjectResult::AlreadyExists);
}
self.connection.execute(
"INSERT INTO object_store_index (object_store_id, name, key_path, unique_index, multi_entry_index)\
VALUES (?, ?, ?, ?, ?)",
params![
object_store.id,
index_name.to_string(),
bincode::serialize(&key_path).unwrap(),
unique,
multi_entry,
],
)?;
Ok(CreateObjectResult::Created)
}
fn delete_index(
&self,
store_name: SanitizedName,
index_name: String,
) -> Result<(), Self::Error> {
let object_store = self.connection.query_row(
"SELECT * FROM object_store WHERE name = ?",
params![store_name.to_string()],
|r| Ok(object_store_model::Model::try_from(r).unwrap()),
)?;
// Delete the index if it exists
let _ = self.connection.execute(
"DELETE FROM object_store_index WHERE name = ? AND object_store_id = ?",
params![index_name.to_string(), object_store.id],
)?;
Ok(())
}
fn version(&self) -> Result<u64, Self::Error> {
let version: i64 =
self.connection
.query_row("SELECT version FROM database LIMIT 1", [], |row| row.get(0))?;
Ok(u64::from_ne_bytes(version.to_ne_bytes()))
}
fn set_version(&self, version: u64) -> Result<(), Self::Error> {
let rows_affected = self.connection.execute(
"UPDATE database SET version = ?",
params![i64::from_ne_bytes(version.to_ne_bytes())],
)?;
if rows_affected == 0 {
return Err(Error::QueryReturnedNoRows);
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::collections::VecDeque;
use std::sync::Arc;
use net_traits::indexeddb_thread::{
AsyncOperation, AsyncReadWriteOperation, CreateObjectResult, IndexedDBKeyType,
IndexedDBTxnMode, KeyPath,
};
use servo_url::ImmutableOrigin;
use url::Host;
use crate::indexeddb::engines::{KvsEngine, KvsOperation, KvsTransaction, SanitizedName};
use crate::indexeddb::idb_thread::IndexedDBDescription;
use crate::resource_thread::CoreResourceThreadPool;
fn test_origin() -> ImmutableOrigin {
ImmutableOrigin::Tuple(
"test_origin".to_string(),
Host::Domain("localhost".to_string()),
80,
)
}
fn get_pool() -> Arc<CoreResourceThreadPool> {
Arc::new(CoreResourceThreadPool::new(1, "test".to_string()))
}
#[test]
fn test_cycle() {
let base_dir = tempfile::tempdir().expect("Failed to create temp dir");
let thread_pool = get_pool();
// Test create
let _ = super::SqliteEngine::new(
base_dir.path(),
&IndexedDBDescription {
name: "test_db".to_string(),
origin: test_origin(),
},
1,
thread_pool.clone(),
)
.unwrap();
// Test open
let db = super::SqliteEngine::new(
base_dir.path(),
&IndexedDBDescription {
name: "test_db".to_string(),
origin: test_origin(),
},
1,
thread_pool.clone(),
)
.unwrap();
let version = db.version().expect("Failed to get version");
assert_eq!(version, 1);
db.set_version(5).unwrap();
let new_version = db.version().expect("Failed to get new version");
assert_eq!(new_version, 5);
db.delete_database().expect("Failed to delete database");
}
#[test]
fn test_create_store() {
let base_dir = tempfile::tempdir().expect("Failed to create temp dir");
let thread_pool = get_pool();
let db = super::SqliteEngine::new(
base_dir.path(),
&IndexedDBDescription {
name: "test_db".to_string(),
origin: test_origin(),
},
1,
thread_pool,
)
.unwrap();
let store_name = SanitizedName::new("test_store".to_string());
let result = db.create_store(store_name.clone(), None, true);
assert!(result.is_ok());
let create_result = result.unwrap();
assert_eq!(create_result, CreateObjectResult::Created);
// Try to create the same store again
let result = db.create_store(store_name.clone(), None, false);
assert!(result.is_ok());
let create_result = result.unwrap();
assert_eq!(create_result, CreateObjectResult::AlreadyExists);
// Ensure store was not overwritten
assert!(db.has_key_generator(store_name));
}
#[test]
fn test_key_path() {
let base_dir = tempfile::tempdir().expect("Failed to create temp dir");
let thread_pool = get_pool();
let db = super::SqliteEngine::new(
base_dir.path(),
&IndexedDBDescription {
name: "test_db".to_string(),
origin: test_origin(),
},
1,
thread_pool,
)
.unwrap();
let store_name = SanitizedName::new("test_store".to_string());
let result = db.create_store(
store_name.clone(),
Some(KeyPath::String("test".to_string())),
true,
);
assert!(result.is_ok());
assert_eq!(
db.key_path(store_name),
Some(KeyPath::String("test".to_string()))
);
}
#[test]
fn test_delete_store() {
let base_dir = tempfile::tempdir().expect("Failed to create temp dir");
let thread_pool = get_pool();
let db = super::SqliteEngine::new(
base_dir.path(),
&IndexedDBDescription {
name: "test_db".to_string(),
origin: test_origin(),
},
1,
thread_pool,
)
.unwrap();
db.create_store(SanitizedName::new("test_store".to_string()), None, false)
.expect("Failed to create store");
// Delete the store
db.delete_store(SanitizedName::new("test_store".to_string()))
.expect("Failed to delete store");
// Try to delete the same store again
let result = db.delete_store(SanitizedName::new("test_store".into()));
assert!(result.is_err());
// Try to delete a non-existing store
let result = db.delete_store(SanitizedName::new("test_store".into()));
// Should work as per spec
assert!(result.is_err());
}
#[test]
fn test_async_operations() {
let base_dir = tempfile::tempdir().expect("Failed to create temp dir");
let thread_pool = get_pool();
let db = super::SqliteEngine::new(
base_dir.path(),
&IndexedDBDescription {
name: "test_db".to_string(),
origin: test_origin(),
},
1,
thread_pool,
)
.unwrap();
let store_name = SanitizedName::new("test_store".to_string());
db.create_store(store_name.clone(), None, false)
.expect("Failed to create store");
let rx = db.process_transaction(KvsTransaction {
mode: IndexedDBTxnMode::Readwrite,
requests: VecDeque::from(vec![
// TODO: Test other operations
KvsOperation {
store_name: store_name.clone(),
operation: AsyncOperation::ReadWrite(AsyncReadWriteOperation::PutItem {
sender: ipc_channel::ipc::channel().unwrap().0,
key: IndexedDBKeyType::Number(1.0),
value: vec![],
should_overwrite: false,
}),
},
]),
});
let _ = rx.blocking_recv().unwrap();
}
}

View file

@ -0,0 +1,59 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
pub(crate) fn create_tables(conn: &rusqlite::Connection) -> Result<(), rusqlite::Error> {
conn.execute(
r#"create table database
(
name varchar not null
primary key,
origin varchar not null,
version bigint default 0 not null
);"#,
[],
)?;
conn.execute(
r#"create table object_store
(
id integer not null
primary key autoincrement,
name varchar not null
unique,
key_path varbinary_blob,
auto_increment boolean default FALSE not null
);"#,
[],
)?;
conn.execute(
r#"create table object_data
(
object_store_id integer not null
references object_store,
key blob not null,
data blob not null,
constraint "pk-object_data"
primary key (object_store_id, key)
);"#,
[],
)?;
conn.execute(
r#"create table object_store_index
(
id integer not null
primary key autoincrement,
object_store_id integer not null
references object_store,
name varchar not null
unique,
key_path varbinary_blob not null,
unique_index boolean not null,
multi_entry_index boolean not null
);"#,
[],
)?;
Ok(())
}

View file

@ -0,0 +1,22 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use sea_query::Iden;
#[derive(Iden)]
#[expect(unused)]
pub enum Column {
Table,
Name,
Origin,
Version,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Model {
pub name: String,
pub origin: String,
pub version: i64,
// TODO: Hold timestamp for vacuuming
// TODO: implement vacuuming
}

View file

@ -0,0 +1,32 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use rusqlite::Row;
use sea_query::Iden;
#[derive(Iden)]
pub enum Column {
Table,
ObjectStoreId,
Key,
Data,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Model {
pub object_store_id: i32,
pub key: Vec<u8>,
pub data: Vec<u8>,
}
impl TryFrom<&Row<'_>> for Model {
type Error = rusqlite::Error;
fn try_from(value: &Row) -> Result<Self, Self::Error> {
Ok(Self {
object_store_id: value.get(0)?,
key: value.get(1)?,
data: value.get(2)?,
})
}
}

View file

@ -0,0 +1,25 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use sea_query::Iden;
#[derive(Iden)]
#[expect(unused)]
pub enum Column {
Table,
ObjectStoreId,
Name,
KeyPath,
UniqueIndex,
MultiEntryIndex,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Model {
pub id: i32,
pub object_store_id: i32,
pub name: String,
pub key_path: Vec<u8>,
pub unique_index: bool,
pub multi_entry_index: bool,
}

View file

@ -0,0 +1,36 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use rusqlite::Row;
use sea_query::Iden;
#[derive(Iden)]
#[expect(unused)]
pub enum Column {
Table,
Id,
Name,
KeyPath,
AutoIncrement,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Model {
pub id: i32,
pub name: String,
pub key_path: Option<Vec<u8>>,
pub auto_increment: bool,
}
impl TryFrom<&Row<'_>> for Model {
type Error = rusqlite::Error;
fn try_from(value: &Row) -> Result<Self, Self::Error> {
Ok(Self {
id: value.get(0)?,
name: value.get(1)?,
key_path: value.get(2)?,
auto_increment: value.get(3)?,
})
}
}

View file

@ -11,13 +11,14 @@ use std::thread;
use ipc_channel::ipc::{self, IpcError, IpcReceiver, IpcSender};
use log::{debug, warn};
use net_traits::indexeddb_thread::{
AsyncOperation, IdbResult, IndexedDBThreadMsg, IndexedDBTxnMode, SyncOperation,
AsyncOperation, BackendError, BackendResult, CreateObjectResult, DbResult, IndexedDBThreadMsg,
IndexedDBTxnMode, KeyPath, SyncOperation,
};
use servo_config::pref;
use servo_url::origin::ImmutableOrigin;
use crate::indexeddb::engines::{
HeedEngine, KvsEngine, KvsOperation, KvsTransaction, SanitizedName,
KvsEngine, KvsOperation, KvsTransaction, SanitizedName, SqliteEngine,
};
use crate::resource_thread::CoreResourceThreadPool;
@ -48,14 +49,14 @@ impl IndexedDBThreadFactory for IpcSender<IndexedDBThreadMsg> {
#[derive(Clone, Eq, Hash, PartialEq)]
pub struct IndexedDBDescription {
origin: ImmutableOrigin,
name: String,
pub(super) origin: ImmutableOrigin,
pub(super) name: String,
}
impl IndexedDBDescription {
// Converts the database description to a folder name where all
// data for this database is stored
fn as_path(&self) -> PathBuf {
pub(super) fn as_path(&self) -> PathBuf {
let mut path = PathBuf::new();
let sanitized_origin = SanitizedName::new(self.origin.ascii_serialization());
@ -69,18 +70,14 @@ impl IndexedDBDescription {
struct IndexedDBEnvironment<E: KvsEngine> {
engine: E,
version: u64,
transactions: HashMap<u64, KvsTransaction>,
serial_number_counter: u64,
}
impl<E: KvsEngine> IndexedDBEnvironment<E> {
fn new(engine: E, version: u64) -> IndexedDBEnvironment<E> {
fn new(engine: E) -> IndexedDBEnvironment<E> {
IndexedDBEnvironment {
engine,
version,
transactions: HashMap::new(),
serial_number_counter: 0,
}
@ -88,7 +85,6 @@ impl<E: KvsEngine> IndexedDBEnvironment<E> {
fn queue_operation(
&mut self,
sender: IpcSender<Result<Option<IdbResult>, ()>>,
store_name: SanitizedName,
serial_number: u64,
mode: IndexedDBTxnMode,
@ -102,16 +98,15 @@ impl<E: KvsEngine> IndexedDBEnvironment<E> {
})
.requests
.push_back(KvsOperation {
sender,
operation,
store_name,
});
}
// Executes all requests for a transaction (without committing)
fn start_transaction(&mut self, txn: u64, sender: Option<IpcSender<Result<(), ()>>>) {
// FIXME:(arihant2math) find a way to optimizations in this function
// rather than on the engine level code (less repetition)
fn start_transaction(&mut self, txn: u64, sender: Option<IpcSender<BackendResult<()>>>) {
// FIXME:(arihant2math) find optimizations in this function
// rather than on the engine level code (less repetition)
if let Some(txn) = self.transactions.remove(&txn) {
let _ = self.engine.process_transaction(txn).blocking_recv();
}
@ -129,40 +124,69 @@ impl<E: KvsEngine> IndexedDBEnvironment<E> {
self.engine.has_key_generator(store_name)
}
fn create_object_store(
&mut self,
sender: IpcSender<Result<(), ()>>,
store_name: SanitizedName,
auto_increment: bool,
) {
let result = self.engine.create_store(store_name, auto_increment);
if result.is_ok() {
let _ = sender.send(Ok(()));
} else {
let _ = sender.send(Err(()));
}
fn key_path(&self, store_name: SanitizedName) -> Option<KeyPath> {
self.engine.key_path(store_name)
}
fn delete_object_store(
&mut self,
sender: IpcSender<Result<(), ()>>,
fn create_index(
&self,
store_name: SanitizedName,
) {
let result = self.engine.delete_store(store_name);
index_name: String,
key_path: KeyPath,
unique: bool,
multi_entry: bool,
) -> DbResult<CreateObjectResult> {
self.engine
.create_index(store_name, index_name, key_path, unique, multi_entry)
.map_err(|err| format!("{err:?}"))
}
if result.is_ok() {
let _ = sender.send(Ok(()));
} else {
let _ = sender.send(Err(()));
}
fn delete_index(&self, store_name: SanitizedName, index_name: String) -> DbResult<()> {
self.engine
.delete_index(store_name, index_name)
.map_err(|err| format!("{err:?}"))
}
fn create_object_store(
&mut self,
store_name: SanitizedName,
key_path: Option<KeyPath>,
auto_increment: bool,
) -> DbResult<CreateObjectResult> {
self.engine
.create_store(store_name, key_path, auto_increment)
.map_err(|err| format!("{err:?}"))
}
fn delete_object_store(&mut self, store_name: SanitizedName) -> DbResult<()> {
let result = self.engine.delete_store(store_name);
result.map_err(|err| format!("{err:?}"))
}
fn delete_database(self, sender: IpcSender<BackendResult<()>>) {
let result = self.engine.delete_database();
let _ = sender.send(
result
.map_err(|err| format!("{err:?}"))
.map_err(BackendError::from),
);
}
fn version(&self) -> DbResult<u64> {
self.engine.version().map_err(|err| format!("{err:?}"))
}
fn set_version(&mut self, version: u64) -> DbResult<()> {
self.engine
.set_version(version)
.map_err(|err| format!("{err:?}"))
}
}
struct IndexedDBManager {
port: IpcReceiver<IndexedDBThreadMsg>,
idb_base_dir: PathBuf,
databases: HashMap<IndexedDBDescription, IndexedDBEnvironment<HeedEngine>>,
databases: HashMap<IndexedDBDescription, IndexedDBEnvironment<SqliteEngine>>,
thread_pool: Arc<CoreResourceThreadPool>,
}
@ -170,10 +194,14 @@ impl IndexedDBManager {
fn new(port: IpcReceiver<IndexedDBThreadMsg>, idb_base_dir: PathBuf) -> IndexedDBManager {
debug!("New indexedDBManager");
// Uses an estimate of the system cpus to process IndexedDB transactions
// See https://doc.rust-lang.org/stable/std/thread/fn.available_parallelism.html
// If no information can be obtained about the system, uses 4 threads as a default
let thread_count = thread::available_parallelism()
.map(|i| i.get())
.unwrap_or(pref!(threadpools_fallback_worker_num) as usize)
.min(pref!(threadpools_indexeddb_workers_max).max(1) as usize);
IndexedDBManager {
port,
idb_base_dir,
@ -210,21 +238,15 @@ impl IndexedDBManager {
IndexedDBThreadMsg::Sync(operation) => {
self.handle_sync_operation(operation);
},
IndexedDBThreadMsg::Async(
sender,
origin,
db_name,
store_name,
txn,
mode,
operation,
) => {
IndexedDBThreadMsg::Async(origin, db_name, store_name, txn, mode, operation) => {
let store_name = SanitizedName::new(store_name);
if let Some(db) = self.get_database_mut(origin, db_name) {
// Queues an operation for a transaction without starting it
db.queue_operation(sender, store_name, txn, mode, operation);
// FIXME:(arihant2math) Schedule transactions properly:
// for now, we start them directly.
db.queue_operation(store_name, txn, mode, operation);
// FIXME:(arihant2math) Schedule transactions properly
// while db.transactions.iter().any(|s| s.1.mode == IndexedDBTxnMode::Readwrite) {
// std::hint::spin_loop();
// }
db.start_transaction(txn, None);
}
},
@ -236,7 +258,7 @@ impl IndexedDBManager {
&self,
origin: ImmutableOrigin,
db_name: String,
) -> Option<&IndexedDBEnvironment<HeedEngine>> {
) -> Option<&IndexedDBEnvironment<SqliteEngine>> {
let idb_description = IndexedDBDescription {
origin,
name: db_name,
@ -249,7 +271,7 @@ impl IndexedDBManager {
&mut self,
origin: ImmutableOrigin,
db_name: String,
) -> Option<&mut IndexedDBEnvironment<HeedEngine>> {
) -> Option<&mut IndexedDBEnvironment<SqliteEngine>> {
let idb_description = IndexedDBDescription {
origin,
name: db_name,
@ -266,7 +288,7 @@ impl IndexedDBManager {
name: db_name,
};
if let Some(_db) = self.databases.remove(&idb_description) {
// TODO: maybe close store here?
// TODO: maybe a close database function should be added to the trait and called here?
}
let _ = sender.send(Ok(()));
},
@ -278,21 +300,24 @@ impl IndexedDBManager {
let idb_base_dir = self.idb_base_dir.as_path();
let version = version.unwrap_or(0);
match self.databases.entry(idb_description.clone()) {
Entry::Vacant(e) => {
let db = IndexedDBEnvironment::new(
HeedEngine::new(
SqliteEngine::new(
idb_base_dir,
&idb_description.as_path(),
&idb_description,
version,
self.thread_pool.clone(),
),
version.unwrap_or(0),
)
.expect("Failed to create sqlite engine"),
);
let _ = sender.send(db.version);
let _ = sender.send(db.version().unwrap_or(version));
e.insert(db);
},
Entry::Occupied(db) => {
let _ = sender.send(db.get().version);
let _ = sender.send(db.get().version().unwrap_or(version));
},
}
},
@ -304,17 +329,8 @@ impl IndexedDBManager {
origin,
name: db_name,
};
if self.databases.remove(&idb_description).is_none() {
let _ = sender.send(Ok(()));
return;
}
// FIXME:(rasviitanen) Possible security issue?
// FIXME:(arihant2math) using remove_dir_all with arbitrary input ...
let mut db_dir = self.idb_base_dir.clone();
db_dir.push(idb_description.as_path());
if std::fs::remove_dir_all(&db_dir).is_err() {
let _ = sender.send(Err(()));
if let Some(db) = self.databases.remove(&idb_description) {
db.delete_database(sender);
} else {
let _ = sender.send(Ok(()));
}
@ -323,23 +339,57 @@ impl IndexedDBManager {
let store_name = SanitizedName::new(store_name);
let result = self
.get_database(origin, db_name)
.map(|db| db.has_key_generator(store_name))
.expect("No Database");
sender.send(result).expect("Could not send generator info");
.map(|db| db.has_key_generator(store_name));
let _ = sender.send(result.ok_or(BackendError::DbNotFound));
},
SyncOperation::KeyPath(sender, origin, db_name, store_name) => {
let store_name = SanitizedName::new(store_name);
let result = self
.get_database(origin, db_name)
.map(|db| db.key_path(store_name));
let _ = sender.send(result.ok_or(BackendError::DbNotFound));
},
SyncOperation::CreateIndex(
sender,
origin,
db_name,
store_name,
index_name,
key_path,
unique,
multi_entry,
) => {
let store_name = SanitizedName::new(store_name);
if let Some(db) = self.get_database(origin, db_name) {
let result =
db.create_index(store_name, index_name, key_path, unique, multi_entry);
let _ = sender.send(result.map_err(BackendError::from));
} else {
let _ = sender.send(Err(BackendError::DbNotFound));
}
},
SyncOperation::DeleteIndex(sender, origin, db_name, store_name, index_name) => {
let store_name = SanitizedName::new(store_name);
if let Some(db) = self.get_database(origin, db_name) {
let result = db.delete_index(store_name, index_name);
let _ = sender.send(result.map_err(BackendError::from));
} else {
let _ = sender.send(Err(BackendError::DbNotFound));
}
},
SyncOperation::Commit(sender, _origin, _db_name, _txn) => {
// FIXME:(arihant2math) This does nothing at the moment
sender.send(Err(())).expect("Could not send commit status");
let _ = sender.send(Ok(()));
},
SyncOperation::UpgradeVersion(sender, origin, db_name, _txn, version) => {
if let Some(db) = self.get_database_mut(origin, db_name) {
if version > db.version {
db.version = version;
if version > db.version().unwrap_or(0) {
let _ = db.set_version(version);
}
// erroring out if the version is not upgraded can be and non-replicable
let _ = sender.send(Ok(db.version));
let _ = sender.send(db.version().map_err(BackendError::from));
} else {
let _ = sender.send(Err(()));
let _ = sender.send(Err(BackendError::DbNotFound));
}
},
SyncOperation::CreateObjectStore(
@ -347,28 +397,39 @@ impl IndexedDBManager {
origin,
db_name,
store_name,
key_paths,
auto_increment,
) => {
let store_name = SanitizedName::new(store_name);
if let Some(db) = self.get_database_mut(origin, db_name) {
db.create_object_store(sender, store_name, auto_increment);
let result = db.create_object_store(store_name, key_paths, auto_increment);
let _ = sender.send(result.map_err(BackendError::from));
} else {
let _ = sender.send(Err(BackendError::DbNotFound));
}
},
SyncOperation::DeleteObjectStore(sender, origin, db_name, store_name) => {
let store_name = SanitizedName::new(store_name);
if let Some(db) = self.get_database_mut(origin, db_name) {
db.delete_object_store(sender, store_name);
let result = db.delete_object_store(store_name);
let _ = sender.send(result.map_err(BackendError::from));
} else {
let _ = sender.send(Err(BackendError::DbNotFound));
}
},
SyncOperation::StartTransaction(sender, origin, db_name, txn) => {
if let Some(db) = self.get_database_mut(origin, db_name) {
db.start_transaction(txn, Some(sender));
};
} else {
let _ = sender.send(Err(BackendError::DbNotFound));
}
},
SyncOperation::Version(sender, origin, db_name) => {
if let Some(db) = self.get_database(origin, db_name) {
let _ = sender.send(db.version);
};
let _ = sender.send(db.version().map_err(BackendError::from));
} else {
let _ = sender.send(Err(BackendError::DbNotFound));
}
},
SyncOperation::RegisterNewTxn(sender, origin, db_name) => {
if let Some(db) = self.get_database_mut(origin, db_name) {