mirror of
https://github.com/servo/servo.git
synced 2025-09-10 15:08:21 +01:00
indexeddb: Use UUIDs instead of sanitization of object store names (#38944)
Sanitization of object store names brought some problems because of replacing special characters and making it impossible to have certain object store names that are allowed by the spec. These changes make sure deterministic UUIDs are used for file paths plus object store names are inserted into SQLite without sanitization. Testing: Covered by existing tests and new unit tests were added. Fixes: #37569 --------- Signed-off-by: Rodion Borovyk <rodion.borovyk@gmail.com>
This commit is contained in:
parent
559b05c1b3
commit
0089e652c5
6 changed files with 119 additions and 111 deletions
7
Cargo.lock
generated
7
Cargo.lock
generated
|
@ -8138,6 +8138,12 @@ dependencies = [
|
||||||
"digest",
|
"digest",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "sha1_smol"
|
||||||
|
version = "1.0.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sha2"
|
name = "sha2"
|
||||||
version = "0.10.9"
|
version = "0.10.9"
|
||||||
|
@ -9477,6 +9483,7 @@ dependencies = [
|
||||||
"getrandom 0.3.3",
|
"getrandom 0.3.3",
|
||||||
"js-sys",
|
"js-sys",
|
||||||
"serde",
|
"serde",
|
||||||
|
"sha1_smol",
|
||||||
"wasm-bindgen",
|
"wasm-bindgen",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
|
@ -168,7 +168,7 @@ unicode-script = "0.5"
|
||||||
unicode-segmentation = "1.12.0"
|
unicode-segmentation = "1.12.0"
|
||||||
url = "2.5"
|
url = "2.5"
|
||||||
urlpattern = "0.3"
|
urlpattern = "0.3"
|
||||||
uuid = { version = "1.18.0", features = ["v4"] }
|
uuid = { version = "1.18.0", features = ["v4", "v5"] }
|
||||||
vello = { git = "https://github.com/linebender/vello", rev = "472c43ccc80c731d32d167c9e9748c78df1977f4" }
|
vello = { git = "https://github.com/linebender/vello", rev = "472c43ccc80c731d32d167c9e9748c78df1977f4" }
|
||||||
vello_cpu = { git = "https://github.com/linebender/vello", rev = "472c43ccc80c731d32d167c9e9748c78df1977f4" }
|
vello_cpu = { git = "https://github.com/linebender/vello", rev = "472c43ccc80c731d32d167c9e9748c78df1977f4" }
|
||||||
webdriver = "0.53.0"
|
webdriver = "0.53.0"
|
||||||
|
|
|
@ -11,41 +11,8 @@ pub use self::sqlite::SqliteEngine;
|
||||||
|
|
||||||
mod sqlite;
|
mod sqlite;
|
||||||
|
|
||||||
#[derive(Clone, Eq, Hash, PartialEq)]
|
|
||||||
pub struct SanitizedName {
|
|
||||||
name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SanitizedName {
|
|
||||||
pub fn new(name: String) -> SanitizedName {
|
|
||||||
let name = name.replace("https://", "");
|
|
||||||
let name = name.replace("http://", "");
|
|
||||||
// FIXME:(arihant2math) Disallowing special characters might be a big problem,
|
|
||||||
// but better safe than sorry. E.g. the db name '../other_origin/db',
|
|
||||||
// would let us access databases from another origin.
|
|
||||||
let name = name
|
|
||||||
.chars()
|
|
||||||
.map(|c| match c {
|
|
||||||
'A'..='Z' => c,
|
|
||||||
'a'..='z' => c,
|
|
||||||
'0'..='9' => c,
|
|
||||||
'-' => c,
|
|
||||||
'_' => c,
|
|
||||||
_ => '-',
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
SanitizedName { name }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::fmt::Display for SanitizedName {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
write!(f, "{}", self.name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct KvsOperation {
|
pub struct KvsOperation {
|
||||||
pub store_name: SanitizedName,
|
pub store_name: String,
|
||||||
pub operation: AsyncOperation,
|
pub operation: AsyncOperation,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,14 +29,14 @@ pub trait KvsEngine {
|
||||||
|
|
||||||
fn create_store(
|
fn create_store(
|
||||||
&self,
|
&self,
|
||||||
store_name: SanitizedName,
|
store_name: &str,
|
||||||
key_path: Option<KeyPath>,
|
key_path: Option<KeyPath>,
|
||||||
auto_increment: bool,
|
auto_increment: bool,
|
||||||
) -> Result<CreateObjectResult, Self::Error>;
|
) -> Result<CreateObjectResult, Self::Error>;
|
||||||
|
|
||||||
fn delete_store(&self, store_name: SanitizedName) -> Result<(), Self::Error>;
|
fn delete_store(&self, store_name: &str) -> Result<(), Self::Error>;
|
||||||
|
|
||||||
fn close_store(&self, store_name: SanitizedName) -> Result<(), Self::Error>;
|
fn close_store(&self, store_name: &str) -> Result<(), Self::Error>;
|
||||||
|
|
||||||
fn delete_database(self) -> Result<(), Self::Error>;
|
fn delete_database(self) -> Result<(), Self::Error>;
|
||||||
|
|
||||||
|
@ -78,22 +45,18 @@ pub trait KvsEngine {
|
||||||
transaction: KvsTransaction,
|
transaction: KvsTransaction,
|
||||||
) -> oneshot::Receiver<Option<Vec<u8>>>;
|
) -> oneshot::Receiver<Option<Vec<u8>>>;
|
||||||
|
|
||||||
fn has_key_generator(&self, store_name: SanitizedName) -> bool;
|
fn has_key_generator(&self, store_name: &str) -> bool;
|
||||||
fn key_path(&self, store_name: SanitizedName) -> Option<KeyPath>;
|
fn key_path(&self, store_name: &str) -> Option<KeyPath>;
|
||||||
|
|
||||||
fn create_index(
|
fn create_index(
|
||||||
&self,
|
&self,
|
||||||
store_name: SanitizedName,
|
store_name: &str,
|
||||||
index_name: String,
|
index_name: String,
|
||||||
key_path: KeyPath,
|
key_path: KeyPath,
|
||||||
unique: bool,
|
unique: bool,
|
||||||
multi_entry: bool,
|
multi_entry: bool,
|
||||||
) -> Result<CreateObjectResult, Self::Error>;
|
) -> Result<CreateObjectResult, Self::Error>;
|
||||||
fn delete_index(
|
fn delete_index(&self, store_name: &str, index_name: String) -> Result<(), Self::Error>;
|
||||||
&self,
|
|
||||||
store_name: SanitizedName,
|
|
||||||
index_name: String,
|
|
||||||
) -> Result<(), Self::Error>;
|
|
||||||
|
|
||||||
fn version(&self) -> Result<u64, Self::Error>;
|
fn version(&self) -> Result<u64, Self::Error>;
|
||||||
fn set_version(&self, version: u64) -> Result<(), Self::Error>;
|
fn set_version(&self, version: u64) -> Result<(), Self::Error>;
|
||||||
|
|
|
@ -16,7 +16,7 @@ use sea_query_rusqlite::RusqliteBinder;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use tokio::sync::oneshot;
|
use tokio::sync::oneshot;
|
||||||
|
|
||||||
use crate::indexeddb::engines::{KvsEngine, KvsTransaction, SanitizedName};
|
use crate::indexeddb::engines::{KvsEngine, KvsTransaction};
|
||||||
use crate::indexeddb::idb_thread::IndexedDBDescription;
|
use crate::indexeddb::idb_thread::IndexedDBDescription;
|
||||||
use crate::resource_thread::CoreResourceThreadPool;
|
use crate::resource_thread::CoreResourceThreadPool;
|
||||||
|
|
||||||
|
@ -244,7 +244,7 @@ impl KvsEngine for SqliteEngine {
|
||||||
|
|
||||||
fn create_store(
|
fn create_store(
|
||||||
&self,
|
&self,
|
||||||
store_name: SanitizedName,
|
store_name: &str,
|
||||||
key_path: Option<KeyPath>,
|
key_path: Option<KeyPath>,
|
||||||
auto_increment: bool,
|
auto_increment: bool,
|
||||||
) -> Result<CreateObjectResult, Self::Error> {
|
) -> Result<CreateObjectResult, Self::Error> {
|
||||||
|
@ -267,7 +267,7 @@ impl KvsEngine for SqliteEngine {
|
||||||
Ok(CreateObjectResult::Created)
|
Ok(CreateObjectResult::Created)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn delete_store(&self, store_name: SanitizedName) -> Result<(), Self::Error> {
|
fn delete_store(&self, store_name: &str) -> Result<(), Self::Error> {
|
||||||
let result = self.connection.execute(
|
let result = self.connection.execute(
|
||||||
"DELETE FROM object_store WHERE name = ?",
|
"DELETE FROM object_store WHERE name = ?",
|
||||||
params![store_name.to_string()],
|
params![store_name.to_string()],
|
||||||
|
@ -281,7 +281,7 @@ impl KvsEngine for SqliteEngine {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn close_store(&self, _store_name: SanitizedName) -> Result<(), Self::Error> {
|
fn close_store(&self, _store_name: &str) -> Result<(), Self::Error> {
|
||||||
// TODO: do something
|
// TODO: do something
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -430,7 +430,7 @@ impl KvsEngine for SqliteEngine {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: we should be able to error out here, maybe change the trait definition?
|
// TODO: we should be able to error out here, maybe change the trait definition?
|
||||||
fn has_key_generator(&self, store_name: SanitizedName) -> bool {
|
fn has_key_generator(&self, store_name: &str) -> bool {
|
||||||
self.connection
|
self.connection
|
||||||
.prepare("SELECT * FROM object_store WHERE name = ?")
|
.prepare("SELECT * FROM object_store WHERE name = ?")
|
||||||
.and_then(|mut stmt| {
|
.and_then(|mut stmt| {
|
||||||
|
@ -445,7 +445,7 @@ impl KvsEngine for SqliteEngine {
|
||||||
.unwrap_or_default()
|
.unwrap_or_default()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn key_path(&self, store_name: SanitizedName) -> Option<KeyPath> {
|
fn key_path(&self, store_name: &str) -> Option<KeyPath> {
|
||||||
self.connection
|
self.connection
|
||||||
.prepare("SELECT * FROM object_store WHERE name = ?")
|
.prepare("SELECT * FROM object_store WHERE name = ?")
|
||||||
.and_then(|mut stmt| {
|
.and_then(|mut stmt| {
|
||||||
|
@ -464,7 +464,7 @@ impl KvsEngine for SqliteEngine {
|
||||||
|
|
||||||
fn create_index(
|
fn create_index(
|
||||||
&self,
|
&self,
|
||||||
store_name: SanitizedName,
|
store_name: &str,
|
||||||
index_name: String,
|
index_name: String,
|
||||||
key_path: KeyPath,
|
key_path: KeyPath,
|
||||||
unique: bool,
|
unique: bool,
|
||||||
|
@ -499,11 +499,7 @@ impl KvsEngine for SqliteEngine {
|
||||||
Ok(CreateObjectResult::Created)
|
Ok(CreateObjectResult::Created)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn delete_index(
|
fn delete_index(&self, store_name: &str, index_name: String) -> Result<(), Self::Error> {
|
||||||
&self,
|
|
||||||
store_name: SanitizedName,
|
|
||||||
index_name: String,
|
|
||||||
) -> Result<(), Self::Error> {
|
|
||||||
let object_store = self.connection.query_row(
|
let object_store = self.connection.query_row(
|
||||||
"SELECT * FROM object_store WHERE name = ?",
|
"SELECT * FROM object_store WHERE name = ?",
|
||||||
params![store_name.to_string()],
|
params![store_name.to_string()],
|
||||||
|
|
|
@ -16,10 +16,9 @@ use net_traits::indexeddb_thread::{
|
||||||
};
|
};
|
||||||
use servo_config::pref;
|
use servo_config::pref;
|
||||||
use servo_url::origin::ImmutableOrigin;
|
use servo_url::origin::ImmutableOrigin;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::indexeddb::engines::{
|
use crate::indexeddb::engines::{KvsEngine, KvsOperation, KvsTransaction, SqliteEngine};
|
||||||
KvsEngine, KvsOperation, KvsTransaction, SanitizedName, SqliteEngine,
|
|
||||||
};
|
|
||||||
use crate::resource_thread::CoreResourceThreadPool;
|
use crate::resource_thread::CoreResourceThreadPool;
|
||||||
|
|
||||||
pub trait IndexedDBThreadFactory {
|
pub trait IndexedDBThreadFactory {
|
||||||
|
@ -54,15 +53,24 @@ pub struct IndexedDBDescription {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IndexedDBDescription {
|
impl IndexedDBDescription {
|
||||||
|
// randomly generated namespace for our purposes
|
||||||
|
const NAMESPACE_SERVO_IDB: &uuid::Uuid = &Uuid::from_bytes([
|
||||||
|
0x37, 0x9e, 0x56, 0xb0, 0x1a, 0x76, 0x44, 0xc2, 0xa0, 0xdb, 0xe2, 0x18, 0xc5, 0xc8, 0xa3,
|
||||||
|
0x5d,
|
||||||
|
]);
|
||||||
// Converts the database description to a folder name where all
|
// Converts the database description to a folder name where all
|
||||||
// data for this database is stored
|
// data for this database is stored
|
||||||
pub(super) fn as_path(&self) -> PathBuf {
|
pub(super) fn as_path(&self) -> PathBuf {
|
||||||
let mut path = PathBuf::new();
|
let mut path = PathBuf::new();
|
||||||
|
|
||||||
let sanitized_origin = SanitizedName::new(self.origin.ascii_serialization());
|
// uuid v5 is deterministic
|
||||||
let sanitized_name = SanitizedName::new(self.name.clone());
|
let origin_uuid = Uuid::new_v5(
|
||||||
path.push(sanitized_origin.to_string());
|
Self::NAMESPACE_SERVO_IDB,
|
||||||
path.push(sanitized_name.to_string());
|
self.origin.ascii_serialization().as_bytes(),
|
||||||
|
);
|
||||||
|
let db_name_uuid = Uuid::new_v5(Self::NAMESPACE_SERVO_IDB, self.name.as_bytes());
|
||||||
|
path.push(origin_uuid.to_string());
|
||||||
|
path.push(db_name_uuid.to_string());
|
||||||
|
|
||||||
path
|
path
|
||||||
}
|
}
|
||||||
|
@ -85,7 +93,7 @@ impl<E: KvsEngine> IndexedDBEnvironment<E> {
|
||||||
|
|
||||||
fn queue_operation(
|
fn queue_operation(
|
||||||
&mut self,
|
&mut self,
|
||||||
store_name: SanitizedName,
|
store_name: &str,
|
||||||
serial_number: u64,
|
serial_number: u64,
|
||||||
mode: IndexedDBTxnMode,
|
mode: IndexedDBTxnMode,
|
||||||
operation: AsyncOperation,
|
operation: AsyncOperation,
|
||||||
|
@ -99,7 +107,7 @@ impl<E: KvsEngine> IndexedDBEnvironment<E> {
|
||||||
.requests
|
.requests
|
||||||
.push_back(KvsOperation {
|
.push_back(KvsOperation {
|
||||||
operation,
|
operation,
|
||||||
store_name,
|
store_name: String::from(store_name),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -120,17 +128,17 @@ impl<E: KvsEngine> IndexedDBEnvironment<E> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn has_key_generator(&self, store_name: SanitizedName) -> bool {
|
fn has_key_generator(&self, store_name: &str) -> bool {
|
||||||
self.engine.has_key_generator(store_name)
|
self.engine.has_key_generator(store_name)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn key_path(&self, store_name: SanitizedName) -> Option<KeyPath> {
|
fn key_path(&self, store_name: &str) -> Option<KeyPath> {
|
||||||
self.engine.key_path(store_name)
|
self.engine.key_path(store_name)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_index(
|
fn create_index(
|
||||||
&self,
|
&self,
|
||||||
store_name: SanitizedName,
|
store_name: &str,
|
||||||
index_name: String,
|
index_name: String,
|
||||||
key_path: KeyPath,
|
key_path: KeyPath,
|
||||||
unique: bool,
|
unique: bool,
|
||||||
|
@ -141,7 +149,7 @@ impl<E: KvsEngine> IndexedDBEnvironment<E> {
|
||||||
.map_err(|err| format!("{err:?}"))
|
.map_err(|err| format!("{err:?}"))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn delete_index(&self, store_name: SanitizedName, index_name: String) -> DbResult<()> {
|
fn delete_index(&self, store_name: &str, index_name: String) -> DbResult<()> {
|
||||||
self.engine
|
self.engine
|
||||||
.delete_index(store_name, index_name)
|
.delete_index(store_name, index_name)
|
||||||
.map_err(|err| format!("{err:?}"))
|
.map_err(|err| format!("{err:?}"))
|
||||||
|
@ -149,7 +157,7 @@ impl<E: KvsEngine> IndexedDBEnvironment<E> {
|
||||||
|
|
||||||
fn create_object_store(
|
fn create_object_store(
|
||||||
&mut self,
|
&mut self,
|
||||||
store_name: SanitizedName,
|
store_name: &str,
|
||||||
key_path: Option<KeyPath>,
|
key_path: Option<KeyPath>,
|
||||||
auto_increment: bool,
|
auto_increment: bool,
|
||||||
) -> DbResult<CreateObjectResult> {
|
) -> DbResult<CreateObjectResult> {
|
||||||
|
@ -158,7 +166,7 @@ impl<E: KvsEngine> IndexedDBEnvironment<E> {
|
||||||
.map_err(|err| format!("{err:?}"))
|
.map_err(|err| format!("{err:?}"))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn delete_object_store(&mut self, store_name: SanitizedName) -> DbResult<()> {
|
fn delete_object_store(&mut self, store_name: &str) -> DbResult<()> {
|
||||||
let result = self.engine.delete_store(store_name);
|
let result = self.engine.delete_store(store_name);
|
||||||
result.map_err(|err| format!("{err:?}"))
|
result.map_err(|err| format!("{err:?}"))
|
||||||
}
|
}
|
||||||
|
@ -239,10 +247,9 @@ impl IndexedDBManager {
|
||||||
self.handle_sync_operation(operation);
|
self.handle_sync_operation(operation);
|
||||||
},
|
},
|
||||||
IndexedDBThreadMsg::Async(origin, db_name, store_name, txn, mode, operation) => {
|
IndexedDBThreadMsg::Async(origin, db_name, store_name, txn, mode, operation) => {
|
||||||
let store_name = SanitizedName::new(store_name);
|
|
||||||
if let Some(db) = self.get_database_mut(origin, db_name) {
|
if let Some(db) = self.get_database_mut(origin, db_name) {
|
||||||
// Queues an operation for a transaction without starting it
|
// Queues an operation for a transaction without starting it
|
||||||
db.queue_operation(store_name, txn, mode, operation);
|
db.queue_operation(&store_name, txn, mode, operation);
|
||||||
// FIXME:(arihant2math) Schedule transactions properly
|
// FIXME:(arihant2math) Schedule transactions properly
|
||||||
// while db.transactions.iter().any(|s| s.1.mode == IndexedDBTxnMode::Readwrite) {
|
// while db.transactions.iter().any(|s| s.1.mode == IndexedDBTxnMode::Readwrite) {
|
||||||
// std::hint::spin_loop();
|
// std::hint::spin_loop();
|
||||||
|
@ -335,17 +342,15 @@ impl IndexedDBManager {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
SyncOperation::HasKeyGenerator(sender, origin, db_name, store_name) => {
|
SyncOperation::HasKeyGenerator(sender, origin, db_name, store_name) => {
|
||||||
let store_name = SanitizedName::new(store_name);
|
|
||||||
let result = self
|
let result = self
|
||||||
.get_database(origin, db_name)
|
.get_database(origin, db_name)
|
||||||
.map(|db| db.has_key_generator(store_name));
|
.map(|db| db.has_key_generator(&store_name));
|
||||||
let _ = sender.send(result.ok_or(BackendError::DbNotFound));
|
let _ = sender.send(result.ok_or(BackendError::DbNotFound));
|
||||||
},
|
},
|
||||||
SyncOperation::KeyPath(sender, origin, db_name, store_name) => {
|
SyncOperation::KeyPath(sender, origin, db_name, store_name) => {
|
||||||
let store_name = SanitizedName::new(store_name);
|
|
||||||
let result = self
|
let result = self
|
||||||
.get_database(origin, db_name)
|
.get_database(origin, db_name)
|
||||||
.map(|db| db.key_path(store_name));
|
.map(|db| db.key_path(&store_name));
|
||||||
let _ = sender.send(result.ok_or(BackendError::DbNotFound));
|
let _ = sender.send(result.ok_or(BackendError::DbNotFound));
|
||||||
},
|
},
|
||||||
SyncOperation::CreateIndex(
|
SyncOperation::CreateIndex(
|
||||||
|
@ -358,19 +363,17 @@ impl IndexedDBManager {
|
||||||
unique,
|
unique,
|
||||||
multi_entry,
|
multi_entry,
|
||||||
) => {
|
) => {
|
||||||
let store_name = SanitizedName::new(store_name);
|
|
||||||
if let Some(db) = self.get_database(origin, db_name) {
|
if let Some(db) = self.get_database(origin, db_name) {
|
||||||
let result =
|
let result =
|
||||||
db.create_index(store_name, index_name, key_path, unique, multi_entry);
|
db.create_index(&store_name, index_name, key_path, unique, multi_entry);
|
||||||
let _ = sender.send(result.map_err(BackendError::from));
|
let _ = sender.send(result.map_err(BackendError::from));
|
||||||
} else {
|
} else {
|
||||||
let _ = sender.send(Err(BackendError::DbNotFound));
|
let _ = sender.send(Err(BackendError::DbNotFound));
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
SyncOperation::DeleteIndex(sender, origin, db_name, store_name, index_name) => {
|
SyncOperation::DeleteIndex(sender, origin, db_name, store_name, index_name) => {
|
||||||
let store_name = SanitizedName::new(store_name);
|
|
||||||
if let Some(db) = self.get_database(origin, db_name) {
|
if let Some(db) = self.get_database(origin, db_name) {
|
||||||
let result = db.delete_index(store_name, index_name);
|
let result = db.delete_index(&store_name, index_name);
|
||||||
let _ = sender.send(result.map_err(BackendError::from));
|
let _ = sender.send(result.map_err(BackendError::from));
|
||||||
} else {
|
} else {
|
||||||
let _ = sender.send(Err(BackendError::DbNotFound));
|
let _ = sender.send(Err(BackendError::DbNotFound));
|
||||||
|
@ -399,18 +402,16 @@ impl IndexedDBManager {
|
||||||
key_paths,
|
key_paths,
|
||||||
auto_increment,
|
auto_increment,
|
||||||
) => {
|
) => {
|
||||||
let store_name = SanitizedName::new(store_name);
|
|
||||||
if let Some(db) = self.get_database_mut(origin, db_name) {
|
if let Some(db) = self.get_database_mut(origin, db_name) {
|
||||||
let result = db.create_object_store(store_name, key_paths, auto_increment);
|
let result = db.create_object_store(&store_name, key_paths, auto_increment);
|
||||||
let _ = sender.send(result.map_err(BackendError::from));
|
let _ = sender.send(result.map_err(BackendError::from));
|
||||||
} else {
|
} else {
|
||||||
let _ = sender.send(Err(BackendError::DbNotFound));
|
let _ = sender.send(Err(BackendError::DbNotFound));
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
SyncOperation::DeleteObjectStore(sender, origin, db_name, store_name) => {
|
SyncOperation::DeleteObjectStore(sender, origin, db_name, store_name) => {
|
||||||
let store_name = SanitizedName::new(store_name);
|
|
||||||
if let Some(db) = self.get_database_mut(origin, db_name) {
|
if let Some(db) = self.get_database_mut(origin, db_name) {
|
||||||
let result = db.delete_object_store(store_name);
|
let result = db.delete_object_store(&store_name);
|
||||||
let _ = sender.send(result.map_err(BackendError::from));
|
let _ = sender.send(result.map_err(BackendError::from));
|
||||||
} else {
|
} else {
|
||||||
let _ = sender.send(Err(BackendError::DbNotFound));
|
let _ = sender.send(Err(BackendError::DbNotFound));
|
||||||
|
|
|
@ -4,9 +4,7 @@
|
||||||
use std::collections::VecDeque;
|
use std::collections::VecDeque;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use net::indexeddb::engines::{
|
use net::indexeddb::engines::{KvsEngine, KvsOperation, KvsTransaction, SqliteEngine};
|
||||||
KvsEngine, KvsOperation, KvsTransaction, SanitizedName, SqliteEngine,
|
|
||||||
};
|
|
||||||
use net::indexeddb::idb_thread::IndexedDBDescription;
|
use net::indexeddb::idb_thread::IndexedDBDescription;
|
||||||
use net::resource_thread::CoreResourceThreadPool;
|
use net::resource_thread::CoreResourceThreadPool;
|
||||||
use net_traits::indexeddb_thread::{
|
use net_traits::indexeddb_thread::{
|
||||||
|
@ -73,13 +71,13 @@ fn test_create_store() {
|
||||||
thread_pool,
|
thread_pool,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let store_name = SanitizedName::new("test_store".to_string());
|
let store_name = "test_store";
|
||||||
let result = db.create_store(store_name.clone(), None, true);
|
let result = db.create_store(store_name, None, true);
|
||||||
assert!(result.is_ok());
|
assert!(result.is_ok());
|
||||||
let create_result = result.unwrap();
|
let create_result = result.unwrap();
|
||||||
assert_eq!(create_result, CreateObjectResult::Created);
|
assert_eq!(create_result, CreateObjectResult::Created);
|
||||||
// Try to create the same store again
|
// Try to create the same store again
|
||||||
let result = db.create_store(store_name.clone(), None, false);
|
let result = db.create_store(store_name, None, false);
|
||||||
assert!(result.is_ok());
|
assert!(result.is_ok());
|
||||||
let create_result = result.unwrap();
|
let create_result = result.unwrap();
|
||||||
assert_eq!(create_result, CreateObjectResult::AlreadyExists);
|
assert_eq!(create_result, CreateObjectResult::AlreadyExists);
|
||||||
|
@ -87,6 +85,53 @@ fn test_create_store() {
|
||||||
assert!(db.has_key_generator(store_name));
|
assert!(db.has_key_generator(store_name));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_create_store_empty_name() {
|
||||||
|
let base_dir = tempfile::tempdir().expect("Failed to create temp dir");
|
||||||
|
let thread_pool = get_pool();
|
||||||
|
let db = SqliteEngine::new(
|
||||||
|
base_dir.path(),
|
||||||
|
&IndexedDBDescription {
|
||||||
|
name: "test_db".to_string(),
|
||||||
|
origin: test_origin(),
|
||||||
|
},
|
||||||
|
thread_pool,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
let store_name = "";
|
||||||
|
let result = db.create_store(store_name, None, true);
|
||||||
|
assert!(result.is_ok());
|
||||||
|
let create_result = result.unwrap();
|
||||||
|
assert_eq!(create_result, CreateObjectResult::Created);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_injection() {
|
||||||
|
let base_dir = tempfile::tempdir().expect("Failed to create temp dir");
|
||||||
|
let thread_pool = get_pool();
|
||||||
|
let db = SqliteEngine::new(
|
||||||
|
base_dir.path(),
|
||||||
|
&IndexedDBDescription {
|
||||||
|
name: "test_db".to_string(),
|
||||||
|
origin: test_origin(),
|
||||||
|
},
|
||||||
|
thread_pool,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
// Create a normal store
|
||||||
|
let store_name1 = "test_store";
|
||||||
|
let result = db.create_store(store_name1, None, true);
|
||||||
|
assert!(result.is_ok());
|
||||||
|
let create_result = result.unwrap();
|
||||||
|
assert_eq!(create_result, CreateObjectResult::Created);
|
||||||
|
// Injection
|
||||||
|
let store_name2 = "' OR 1=1 -- -";
|
||||||
|
let result = db.create_store(store_name2, None, false);
|
||||||
|
assert!(result.is_ok());
|
||||||
|
let create_result = result.unwrap();
|
||||||
|
assert_eq!(create_result, CreateObjectResult::Created);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_key_path() {
|
fn test_key_path() {
|
||||||
let base_dir = tempfile::tempdir().expect("Failed to create temp dir");
|
let base_dir = tempfile::tempdir().expect("Failed to create temp dir");
|
||||||
|
@ -100,12 +145,8 @@ fn test_key_path() {
|
||||||
thread_pool,
|
thread_pool,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let store_name = SanitizedName::new("test_store".to_string());
|
let store_name = "test_store";
|
||||||
let result = db.create_store(
|
let result = db.create_store(store_name, Some(KeyPath::String("test".to_string())), true);
|
||||||
store_name.clone(),
|
|
||||||
Some(KeyPath::String("test".to_string())),
|
|
||||||
true,
|
|
||||||
);
|
|
||||||
assert!(result.is_ok());
|
assert!(result.is_ok());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
db.key_path(store_name),
|
db.key_path(store_name),
|
||||||
|
@ -126,16 +167,16 @@ fn test_delete_store() {
|
||||||
thread_pool,
|
thread_pool,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
db.create_store(SanitizedName::new("test_store".to_string()), None, false)
|
db.create_store("test_store", None, false)
|
||||||
.expect("Failed to create store");
|
.expect("Failed to create store");
|
||||||
// Delete the store
|
// Delete the store
|
||||||
db.delete_store(SanitizedName::new("test_store".to_string()))
|
db.delete_store("test_store")
|
||||||
.expect("Failed to delete store");
|
.expect("Failed to delete store");
|
||||||
// Try to delete the same store again
|
// Try to delete the same store again
|
||||||
let result = db.delete_store(SanitizedName::new("test_store".into()));
|
let result = db.delete_store("test_store");
|
||||||
assert!(result.is_err());
|
assert!(result.is_err());
|
||||||
// Try to delete a non-existing store
|
// Try to delete a non-existing store
|
||||||
let result = db.delete_store(SanitizedName::new("test_store".into()));
|
let result = db.delete_store("test_store");
|
||||||
// Should work as per spec
|
// Should work as per spec
|
||||||
assert!(result.is_err());
|
assert!(result.is_err());
|
||||||
}
|
}
|
||||||
|
@ -153,8 +194,8 @@ fn test_async_operations() {
|
||||||
thread_pool,
|
thread_pool,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let store_name = SanitizedName::new("test_store".to_string());
|
let store_name = "test_store";
|
||||||
db.create_store(store_name.clone(), None, false)
|
db.create_store(store_name, None, false)
|
||||||
.expect("Failed to create store");
|
.expect("Failed to create store");
|
||||||
let channel = ipc_channel::ipc::channel().unwrap();
|
let channel = ipc_channel::ipc::channel().unwrap();
|
||||||
let channel2 = ipc_channel::ipc::channel().unwrap();
|
let channel2 = ipc_channel::ipc::channel().unwrap();
|
||||||
|
@ -166,7 +207,7 @@ fn test_async_operations() {
|
||||||
mode: IndexedDBTxnMode::Readwrite,
|
mode: IndexedDBTxnMode::Readwrite,
|
||||||
requests: VecDeque::from(vec![
|
requests: VecDeque::from(vec![
|
||||||
KvsOperation {
|
KvsOperation {
|
||||||
store_name: store_name.clone(),
|
store_name: store_name.to_owned(),
|
||||||
operation: AsyncOperation::ReadWrite(AsyncReadWriteOperation::PutItem {
|
operation: AsyncOperation::ReadWrite(AsyncReadWriteOperation::PutItem {
|
||||||
sender: channel.0,
|
sender: channel.0,
|
||||||
key: IndexedDBKeyType::Number(1.0),
|
key: IndexedDBKeyType::Number(1.0),
|
||||||
|
@ -175,35 +216,35 @@ fn test_async_operations() {
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
KvsOperation {
|
KvsOperation {
|
||||||
store_name: store_name.clone(),
|
store_name: store_name.to_owned(),
|
||||||
operation: AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetItem {
|
operation: AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetItem {
|
||||||
sender: channel2.0,
|
sender: channel2.0,
|
||||||
key_range: IndexedDBKeyRange::only(IndexedDBKeyType::Number(1.0)),
|
key_range: IndexedDBKeyRange::only(IndexedDBKeyType::Number(1.0)),
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
KvsOperation {
|
KvsOperation {
|
||||||
store_name: store_name.clone(),
|
store_name: store_name.to_owned(),
|
||||||
operation: AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetItem {
|
operation: AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetItem {
|
||||||
sender: channel3.0,
|
sender: channel3.0,
|
||||||
key_range: IndexedDBKeyRange::only(IndexedDBKeyType::Number(5.0)),
|
key_range: IndexedDBKeyRange::only(IndexedDBKeyType::Number(5.0)),
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
KvsOperation {
|
KvsOperation {
|
||||||
store_name: store_name.clone(),
|
store_name: store_name.to_owned(),
|
||||||
operation: AsyncOperation::ReadOnly(AsyncReadOnlyOperation::Count {
|
operation: AsyncOperation::ReadOnly(AsyncReadOnlyOperation::Count {
|
||||||
sender: channel4.0,
|
sender: channel4.0,
|
||||||
key_range: IndexedDBKeyRange::only(IndexedDBKeyType::Number(1.0)),
|
key_range: IndexedDBKeyRange::only(IndexedDBKeyType::Number(1.0)),
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
KvsOperation {
|
KvsOperation {
|
||||||
store_name: store_name.clone(),
|
store_name: store_name.to_owned(),
|
||||||
operation: AsyncOperation::ReadWrite(AsyncReadWriteOperation::RemoveItem {
|
operation: AsyncOperation::ReadWrite(AsyncReadWriteOperation::RemoveItem {
|
||||||
sender: channel5.0,
|
sender: channel5.0,
|
||||||
key: IndexedDBKeyType::Number(1.0),
|
key: IndexedDBKeyType::Number(1.0),
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
KvsOperation {
|
KvsOperation {
|
||||||
store_name: store_name.clone(),
|
store_name: store_name.to_owned(),
|
||||||
operation: AsyncOperation::ReadWrite(AsyncReadWriteOperation::Clear(channel6.0)),
|
operation: AsyncOperation::ReadWrite(AsyncReadWriteOperation::Clear(channel6.0)),
|
||||||
},
|
},
|
||||||
]),
|
]),
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue