mirror of
https://github.com/servo/servo.git
synced 2025-08-26 07:38:21 +01:00
net: fix indexeddb backend bugs (#38744)
Fix a large number of backend issues that were masking everything else. There probably is still more, but it'll take more integration/unit testing to find it. Testing: WPT Fixes: #38743 --------- Signed-off-by: Ashwin Naren <arihant2math@gmail.com>
This commit is contained in:
parent
f2294db95b
commit
d0a8f27241
15 changed files with 113 additions and 146 deletions
19
Cargo.lock
generated
19
Cargo.lock
generated
|
@ -5440,6 +5440,7 @@ dependencies = [
|
|||
"rustls-pemfile",
|
||||
"rustls-pki-types",
|
||||
"sea-query",
|
||||
"sea-query-rusqlite",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"servo_arc",
|
||||
|
@ -7334,9 +7335,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "sea-query"
|
||||
version = "0.32.7"
|
||||
version = "1.0.0-rc.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8a5d1c518eaf5eda38e5773f902b26ab6d5e9e9e2bb2349ca6c64cf96f80448c"
|
||||
checksum = "fa6a49d47916bc2f384c2aa3ff07ef8a9eac12a367f63274e6db5c9e96f508c8"
|
||||
dependencies = [
|
||||
"inherent",
|
||||
"sea-query-derive",
|
||||
|
@ -7344,9 +7345,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "sea-query-derive"
|
||||
version = "0.4.3"
|
||||
version = "1.0.0-rc.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bae0cbad6ab996955664982739354128c58d16e126114fe88c2a493642502aab"
|
||||
checksum = "217e9422de35f26c16c5f671fce3c075a65e10322068dbc66078428634af6195"
|
||||
dependencies = [
|
||||
"darling",
|
||||
"heck 0.4.1",
|
||||
|
@ -7356,6 +7357,16 @@ dependencies = [
|
|||
"thiserror 2.0.12",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sea-query-rusqlite"
|
||||
version = "0.8.0-rc.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "480e20a53f87a48779a24bb041666d676e576ee003b8689ce28e4c48dccd1284"
|
||||
dependencies = [
|
||||
"rusqlite",
|
||||
"sea-query",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "selectors"
|
||||
version = "0.31.0"
|
||||
|
|
|
@ -60,7 +60,8 @@ rustls-pemfile = { workspace = true }
|
|||
rustls-pki-types = { workspace = true }
|
||||
resvg = { workspace = true }
|
||||
rusqlite = { version = "0.37", features = ["bundled"] }
|
||||
sea-query = { version = "0.32", default-features = false, features = ["derive", "backend-sqlite"] }
|
||||
sea-query = { version = "1.0.0-rc.9", default-features = false, features = ["derive", "backend-sqlite"] }
|
||||
sea-query-rusqlite = { version = "0.8.0-rc.8" }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
servo_arc = { workspace = true }
|
||||
|
|
|
@ -12,6 +12,7 @@ use net_traits::indexeddb_thread::{
|
|||
};
|
||||
use rusqlite::{Connection, Error, OptionalExtension, params};
|
||||
use sea_query::{Condition, Expr, ExprTrait, IntoCondition, SqliteQueryBuilder};
|
||||
use sea_query_rusqlite::RusqliteBinder;
|
||||
use serde::Serialize;
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
|
@ -40,7 +41,7 @@ fn range_to_query(range: IndexedDBKeyRange) -> Condition {
|
|||
// Special case for optimization
|
||||
if let Some(singleton) = range.as_singleton() {
|
||||
let encoded = bincode::serialize(singleton).unwrap();
|
||||
return Expr::column(object_data_model::Column::Data)
|
||||
return Expr::column(object_data_model::Column::Key)
|
||||
.eq(encoded)
|
||||
.into_condition();
|
||||
}
|
||||
|
@ -48,18 +49,18 @@ fn range_to_query(range: IndexedDBKeyRange) -> Condition {
|
|||
if let Some(upper) = range.upper.as_ref() {
|
||||
let upper_bytes = bincode::serialize(upper).unwrap();
|
||||
let query = if range.upper_open {
|
||||
Expr::column(object_data_model::Column::Data).lt(upper_bytes)
|
||||
Expr::column(object_data_model::Column::Key).lt(upper_bytes)
|
||||
} else {
|
||||
Expr::column(object_data_model::Column::Data).lte(upper_bytes)
|
||||
Expr::column(object_data_model::Column::Key).lte(upper_bytes)
|
||||
};
|
||||
parts.push(query);
|
||||
}
|
||||
if let Some(lower) = range.lower.as_ref() {
|
||||
let lower_bytes = bincode::serialize(lower).unwrap();
|
||||
let query = if range.upper_open {
|
||||
Expr::column(object_data_model::Column::Data).gt(lower_bytes)
|
||||
Expr::column(object_data_model::Column::Key).gt(lower_bytes)
|
||||
} else {
|
||||
Expr::column(object_data_model::Column::Data).gte(lower_bytes)
|
||||
Expr::column(object_data_model::Column::Key).gte(lower_bytes)
|
||||
};
|
||||
parts.push(query);
|
||||
}
|
||||
|
@ -143,13 +144,21 @@ impl SqliteEngine {
|
|||
key_range: IndexedDBKeyRange,
|
||||
) -> Result<Option<object_data_model::Model>, Error> {
|
||||
let query = range_to_query(key_range);
|
||||
let stmt = sea_query::Query::select()
|
||||
let (sql, values) = sea_query::Query::select()
|
||||
.from(object_data_model::Column::Table)
|
||||
.columns(vec![
|
||||
object_data_model::Column::ObjectStoreId,
|
||||
object_data_model::Column::Key,
|
||||
object_data_model::Column::Data,
|
||||
])
|
||||
.and_where(query.and(Expr::col(object_data_model::Column::ObjectStoreId).is(store.id)))
|
||||
.to_owned();
|
||||
.limit(1)
|
||||
.build_rusqlite(SqliteQueryBuilder);
|
||||
connection
|
||||
.prepare(&stmt.build(SqliteQueryBuilder).0)?
|
||||
.query_one((), |row| object_data_model::Model::try_from(row))
|
||||
.prepare(&sql)?
|
||||
.query_one(&*values.as_params(), |row| {
|
||||
object_data_model::Model::try_from(row)
|
||||
})
|
||||
.optional()
|
||||
}
|
||||
|
||||
|
@ -221,14 +230,14 @@ impl SqliteEngine {
|
|||
key_range: IndexedDBKeyRange,
|
||||
) -> Result<usize, Error> {
|
||||
let query = range_to_query(key_range);
|
||||
let count = sea_query::Query::select()
|
||||
let (sql, values) = sea_query::Query::select()
|
||||
.expr(Expr::col(object_data_model::Column::Key).count())
|
||||
.from(object_data_model::Column::Table)
|
||||
.and_where(query.and(Expr::col(object_data_model::Column::ObjectStoreId).is(store.id)))
|
||||
.to_owned();
|
||||
.build_rusqlite(SqliteQueryBuilder);
|
||||
connection
|
||||
.prepare(&count.build(SqliteQueryBuilder).0)?
|
||||
.query_row((), |row| row.get(0))
|
||||
.prepare(&sql)?
|
||||
.query_row(&*values.as_params(), |row| row.get(0))
|
||||
.map(|count: i64| count as usize)
|
||||
}
|
||||
}
|
||||
|
@ -244,7 +253,7 @@ impl KvsEngine for SqliteEngine {
|
|||
) -> Result<CreateObjectResult, Self::Error> {
|
||||
let mut stmt = self
|
||||
.connection
|
||||
.prepare("SELECT 1 FROM object_store WHERE name = ?")?;
|
||||
.prepare("SELECT * FROM object_store WHERE name = ?")?;
|
||||
if stmt.exists(params![store_name.to_string()])? {
|
||||
// Store already exists
|
||||
return Ok(CreateObjectResult::AlreadyExists);
|
||||
|
@ -307,7 +316,7 @@ impl KvsEngine for SqliteEngine {
|
|||
let connection = Connection::open(path).unwrap();
|
||||
for request in transaction.requests {
|
||||
let object_store = connection
|
||||
.prepare("SELECT 1 FROM object_store WHERE name = ?")
|
||||
.prepare("SELECT * FROM object_store WHERE name = ?")
|
||||
.and_then(|mut stmt| {
|
||||
stmt.query_row(params![request.store_name.to_string()], |row| {
|
||||
object_store_model::Model::try_from(row)
|
||||
|
@ -471,7 +480,7 @@ impl KvsEngine for SqliteEngine {
|
|||
)?;
|
||||
|
||||
let index_exists: bool = self.connection.query_row(
|
||||
"SELECT EXISTS(SELECT 1 FROM object_store_index WHERE name = ? AND object_store_id = ?)",
|
||||
"SELECT EXISTS(SELECT * FROM object_store_index WHERE name = ? AND object_store_id = ?)",
|
||||
params![index_name.to_string(), object_store.id],
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
|
@ -537,8 +546,8 @@ mod tests {
|
|||
use std::sync::Arc;
|
||||
|
||||
use net_traits::indexeddb_thread::{
|
||||
AsyncOperation, AsyncReadWriteOperation, CreateObjectResult, IndexedDBKeyType,
|
||||
IndexedDBTxnMode, KeyPath,
|
||||
AsyncOperation, AsyncReadOnlyOperation, AsyncReadWriteOperation, CreateObjectResult,
|
||||
IndexedDBKeyType, IndexedDBTxnMode, KeyPath,
|
||||
};
|
||||
use servo_url::ImmutableOrigin;
|
||||
use url::Host;
|
||||
|
@ -693,21 +702,71 @@ mod tests {
|
|||
let store_name = SanitizedName::new("test_store".to_string());
|
||||
db.create_store(store_name.clone(), None, false)
|
||||
.expect("Failed to create store");
|
||||
let channel = ipc_channel::ipc::channel().unwrap();
|
||||
let channel2 = ipc_channel::ipc::channel().unwrap();
|
||||
let channel3 = ipc_channel::ipc::channel().unwrap();
|
||||
let channel4 = ipc_channel::ipc::channel().unwrap();
|
||||
let channel5 = ipc_channel::ipc::channel().unwrap();
|
||||
let channel6 = ipc_channel::ipc::channel().unwrap();
|
||||
let rx = db.process_transaction(KvsTransaction {
|
||||
mode: IndexedDBTxnMode::Readwrite,
|
||||
requests: VecDeque::from(vec![
|
||||
// TODO: Test other operations
|
||||
KvsOperation {
|
||||
store_name: store_name.clone(),
|
||||
operation: AsyncOperation::ReadWrite(AsyncReadWriteOperation::PutItem {
|
||||
sender: ipc_channel::ipc::channel().unwrap().0,
|
||||
sender: channel.0,
|
||||
key: IndexedDBKeyType::Number(1.0),
|
||||
value: vec![],
|
||||
value: vec![1, 2, 3],
|
||||
should_overwrite: false,
|
||||
}),
|
||||
},
|
||||
KvsOperation {
|
||||
store_name: store_name.clone(),
|
||||
operation: AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetItem {
|
||||
sender: channel2.0,
|
||||
key_range: super::IndexedDBKeyRange::only(IndexedDBKeyType::Number(1.0)),
|
||||
}),
|
||||
},
|
||||
KvsOperation {
|
||||
store_name: store_name.clone(),
|
||||
operation: AsyncOperation::ReadOnly(AsyncReadOnlyOperation::GetItem {
|
||||
sender: channel3.0,
|
||||
key_range: super::IndexedDBKeyRange::only(IndexedDBKeyType::Number(5.0)),
|
||||
}),
|
||||
},
|
||||
KvsOperation {
|
||||
store_name: store_name.clone(),
|
||||
operation: AsyncOperation::ReadOnly(AsyncReadOnlyOperation::Count {
|
||||
sender: channel4.0,
|
||||
key_range: super::IndexedDBKeyRange::only(IndexedDBKeyType::Number(1.0)),
|
||||
}),
|
||||
},
|
||||
KvsOperation {
|
||||
store_name: store_name.clone(),
|
||||
operation: AsyncOperation::ReadWrite(AsyncReadWriteOperation::RemoveItem {
|
||||
sender: channel5.0,
|
||||
key: IndexedDBKeyType::Number(1.0),
|
||||
}),
|
||||
},
|
||||
KvsOperation {
|
||||
store_name: store_name.clone(),
|
||||
operation: AsyncOperation::ReadWrite(AsyncReadWriteOperation::Clear(
|
||||
channel6.0,
|
||||
)),
|
||||
},
|
||||
]),
|
||||
});
|
||||
let _ = rx.blocking_recv().unwrap();
|
||||
channel.1.recv().unwrap().unwrap();
|
||||
let get_result = channel2.1.recv().unwrap();
|
||||
let value = get_result.unwrap();
|
||||
assert_eq!(value, Some(vec![1, 2, 3]));
|
||||
let get_result = channel3.1.recv().unwrap();
|
||||
let value = get_result.unwrap();
|
||||
assert_eq!(value, None);
|
||||
let amount = channel4.1.recv().unwrap().unwrap();
|
||||
assert_eq!(amount, 1);
|
||||
channel5.1.recv().unwrap().unwrap();
|
||||
channel6.1.recv().unwrap().unwrap();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ use sea_query::Iden;
|
|||
#[derive(Iden)]
|
||||
#[expect(unused)]
|
||||
pub enum Column {
|
||||
#[iden = "database"]
|
||||
Table,
|
||||
Name,
|
||||
Origin,
|
||||
|
|
|
@ -6,6 +6,7 @@ use sea_query::Iden;
|
|||
|
||||
#[derive(Iden)]
|
||||
pub enum Column {
|
||||
#[iden = "object_data"]
|
||||
Table,
|
||||
ObjectStoreId,
|
||||
Key,
|
||||
|
|
|
@ -6,6 +6,7 @@ use sea_query::Iden;
|
|||
#[derive(Iden)]
|
||||
#[expect(unused)]
|
||||
pub enum Column {
|
||||
#[iden = "object_store_index"]
|
||||
Table,
|
||||
ObjectStoreId,
|
||||
Name,
|
||||
|
|
|
@ -7,6 +7,7 @@ use sea_query::Iden;
|
|||
#[derive(Iden)]
|
||||
#[expect(unused)]
|
||||
pub enum Column {
|
||||
#[iden = "object_store"]
|
||||
Table,
|
||||
Id,
|
||||
Name,
|
||||
|
|
|
@ -12,8 +12,8 @@ use js::jsval::{DoubleValue, JSVal, UndefinedValue};
|
|||
use js::rust::HandleValue;
|
||||
use net_traits::IpcSend;
|
||||
use net_traits::indexeddb_thread::{
|
||||
AsyncOperation, BackendResult, IndexedDBKeyType, IndexedDBThreadMsg, IndexedDBTxnMode,
|
||||
PutItemResult,
|
||||
AsyncOperation, BackendError, BackendResult, IndexedDBKeyType, IndexedDBThreadMsg,
|
||||
IndexedDBTxnMode, PutItemResult,
|
||||
};
|
||||
use profile_traits::ipc::IpcReceiver;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
@ -296,7 +296,11 @@ impl IDBRequest {
|
|||
let response_listener = response_listener.clone();
|
||||
task_source.queue(task!(request_callback: move || {
|
||||
response_listener.handle_async_request_finished(
|
||||
message.expect("Could not unwrap message").map(|t| t.into()));
|
||||
message.expect("Could not unwrap message").inspect_err(|e| {
|
||||
if let BackendError::DbErr(e) = e {
|
||||
error!("Error in IndexedDB operation: {}", e);
|
||||
}
|
||||
}).map(|t| t.into()));
|
||||
}));
|
||||
}),
|
||||
);
|
||||
|
|
|
@ -4,16 +4,13 @@
|
|||
expected: TIMEOUT
|
||||
|
||||
[Cross-realm IDBObjectStore::add() method from detached <iframe> works as expected]
|
||||
expected: TIMEOUT
|
||||
expected: FAIL
|
||||
|
||||
[Cross-realm IDBObjectStore::delete() method from detached <iframe> works as expected]
|
||||
expected: TIMEOUT
|
||||
expected: FAIL
|
||||
|
||||
[Cross-realm IDBObjectStore::clear() method from detached <iframe> works as expected]
|
||||
expected: TIMEOUT
|
||||
|
||||
[Cross-realm IDBObjectStore::getKey() method from detached <iframe> works as expected]
|
||||
expected: TIMEOUT
|
||||
expected: FAIL
|
||||
|
||||
[Cross-realm IDBObjectStore::getAll() method from detached <iframe> works as expected]
|
||||
expected: FAIL
|
||||
|
@ -29,6 +26,3 @@
|
|||
|
||||
[Cross-realm IDBObjectStore::openKeyCursor() method from detached <iframe> works as expected]
|
||||
expected: FAIL
|
||||
|
||||
[Cross-realm IDBObjectStore::get() method from detached <iframe> works as expected]
|
||||
expected: TIMEOUT
|
||||
|
|
|
@ -1,11 +1,7 @@
|
|||
[idbobjectstore_count.any.worker.html]
|
||||
expected: TIMEOUT
|
||||
[Returns the number of records in the object store ]
|
||||
expected: FAIL
|
||||
|
||||
[Returns the number of records that have keys within the range ]
|
||||
expected: TIMEOUT
|
||||
|
||||
[Returns the number of records that have keys with the key]
|
||||
expected: FAIL
|
||||
|
||||
|
@ -17,13 +13,9 @@
|
|||
expected: ERROR
|
||||
|
||||
[idbobjectstore_count.any.html]
|
||||
expected: TIMEOUT
|
||||
[Returns the number of records in the object store ]
|
||||
expected: FAIL
|
||||
|
||||
[Returns the number of records that have keys within the range ]
|
||||
expected: TIMEOUT
|
||||
|
||||
[Returns the number of records that have keys with the key]
|
||||
expected: FAIL
|
||||
|
||||
|
|
|
@ -15,9 +15,6 @@
|
|||
[If the object store has been deleted, the implementation must throw a DOMException of type InvalidStateError]
|
||||
expected: FAIL
|
||||
|
||||
[delete() key doesn't match any records]
|
||||
expected: FAIL
|
||||
|
||||
|
||||
[idbobjectstore_delete.any.serviceworker.html]
|
||||
expected: ERROR
|
||||
|
@ -39,9 +36,6 @@
|
|||
[If the object store has been deleted, the implementation must throw a DOMException of type InvalidStateError]
|
||||
expected: FAIL
|
||||
|
||||
[delete() key doesn't match any records]
|
||||
expected: FAIL
|
||||
|
||||
|
||||
[idbobjectstore_delete.any.sharedworker.html]
|
||||
expected: ERROR
|
||||
|
|
|
@ -2,45 +2,13 @@
|
|||
expected: ERROR
|
||||
|
||||
[idbobjectstore_get.any.html]
|
||||
expected: TIMEOUT
|
||||
[Returns the record with the first key in the range]
|
||||
expected: TIMEOUT
|
||||
|
||||
[When a transaction is aborted, throw TransactionInactiveError]
|
||||
expected: FAIL
|
||||
|
||||
[Key is a number]
|
||||
expected: TIMEOUT
|
||||
|
||||
[Key is a string]
|
||||
expected: TIMEOUT
|
||||
|
||||
[Key is a date]
|
||||
expected: TIMEOUT
|
||||
|
||||
[Attempts to retrieve a record that doesn't exist]
|
||||
expected: FAIL
|
||||
|
||||
|
||||
[idbobjectstore_get.any.serviceworker.html]
|
||||
expected: ERROR
|
||||
|
||||
[idbobjectstore_get.any.worker.html]
|
||||
expected: TIMEOUT
|
||||
[Returns the record with the first key in the range]
|
||||
expected: TIMEOUT
|
||||
|
||||
[When a transaction is aborted, throw TransactionInactiveError]
|
||||
expected: FAIL
|
||||
|
||||
[Key is a number]
|
||||
expected: TIMEOUT
|
||||
|
||||
[Key is a string]
|
||||
expected: TIMEOUT
|
||||
|
||||
[Key is a date]
|
||||
expected: TIMEOUT
|
||||
|
||||
[Attempts to retrieve a record that doesn't exist]
|
||||
expected: FAIL
|
||||
|
|
38
tests/wpt/meta/IndexedDB/value.any.js.ini
vendored
38
tests/wpt/meta/IndexedDB/value.any.js.ini
vendored
|
@ -1,47 +1,9 @@
|
|||
[value.any.html]
|
||||
expected: TIMEOUT
|
||||
[Values - Array]
|
||||
expected: TIMEOUT
|
||||
|
||||
[BigInts as values in IndexedDB - primitive BigInt]
|
||||
expected: TIMEOUT
|
||||
|
||||
[BigInts as values in IndexedDB - BigInt object]
|
||||
expected: TIMEOUT
|
||||
|
||||
[BigInts as values in IndexedDB - primitive BigInt inside object]
|
||||
expected: TIMEOUT
|
||||
|
||||
[BigInts as values in IndexedDB - BigInt object inside object]
|
||||
expected: TIMEOUT
|
||||
|
||||
[Values - Date]
|
||||
expected: TIMEOUT
|
||||
|
||||
|
||||
[value.any.sharedworker.html]
|
||||
expected: ERROR
|
||||
|
||||
[value.any.worker.html]
|
||||
expected: TIMEOUT
|
||||
[Values - Array]
|
||||
expected: TIMEOUT
|
||||
|
||||
[BigInts as values in IndexedDB - primitive BigInt]
|
||||
expected: TIMEOUT
|
||||
|
||||
[BigInts as values in IndexedDB - BigInt object]
|
||||
expected: TIMEOUT
|
||||
|
||||
[BigInts as values in IndexedDB - primitive BigInt inside object]
|
||||
expected: TIMEOUT
|
||||
|
||||
[BigInts as values in IndexedDB - BigInt object inside object]
|
||||
expected: TIMEOUT
|
||||
|
||||
[Values - Date]
|
||||
expected: TIMEOUT
|
||||
|
||||
|
||||
[value.any.serviceworker.html]
|
||||
expected: ERROR
|
||||
|
|
|
@ -2,28 +2,8 @@
|
|||
expected: ERROR
|
||||
|
||||
[value_recursive.any.html]
|
||||
expected: TIMEOUT
|
||||
[Recursive value - array directly contains self]
|
||||
expected: TIMEOUT
|
||||
|
||||
[Recursive value - array indirectly contains self]
|
||||
expected: TIMEOUT
|
||||
|
||||
[Recursive value - array member contains self]
|
||||
expected: TIMEOUT
|
||||
|
||||
|
||||
[value_recursive.any.worker.html]
|
||||
expected: TIMEOUT
|
||||
[Recursive value - array directly contains self]
|
||||
expected: TIMEOUT
|
||||
|
||||
[Recursive value - array indirectly contains self]
|
||||
expected: TIMEOUT
|
||||
|
||||
[Recursive value - array member contains self]
|
||||
expected: TIMEOUT
|
||||
|
||||
|
||||
[value_recursive.any.sharedworker.html]
|
||||
expected: ERROR
|
||||
|
|
|
@ -5,12 +5,10 @@
|
|||
expected: ERROR
|
||||
|
||||
[writer-starvation.any.worker.html]
|
||||
expected: TIMEOUT
|
||||
[IDB read requests should not starve write requests]
|
||||
expected: TIMEOUT
|
||||
expected: FAIL
|
||||
|
||||
|
||||
[writer-starvation.any.html]
|
||||
expected: TIMEOUT
|
||||
[IDB read requests should not starve write requests]
|
||||
expected: TIMEOUT
|
||||
expected: FAIL
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue