diff --git a/crates/matrix-sdk-common/src/linked_chunk/mod.rs b/crates/matrix-sdk-common/src/linked_chunk/mod.rs index 40e86f688bb..6be53714535 100644 --- a/crates/matrix-sdk-common/src/linked_chunk/mod.rs +++ b/crates/matrix-sdk-common/src/linked_chunk/mod.rs @@ -107,6 +107,7 @@ use std::{ pub use as_vector::*; pub use order_tracker::OrderTracker; use ruma::{EventId, OwnedEventId, OwnedRoomId, RoomId}; +use serde::{Deserialize, Serialize}; pub use updates::*; /// An identifier for a linked chunk; borrowed variant. @@ -116,6 +117,17 @@ pub enum LinkedChunkId<'a> { Thread(&'a RoomId, &'a EventId), } +impl Display for LinkedChunkId<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Room(room_id) => write!(f, "{room_id}"), + Self::Thread(room_id, thread_root) => { + write!(f, "{room_id}:thread:{thread_root}") + } + } + } +} + impl LinkedChunkId<'_> { pub fn storage_key(&self) -> impl '_ + AsRef<[u8]> { match self { @@ -134,6 +146,12 @@ impl LinkedChunkId<'_> { } } +impl<'a> From<&'a OwnedLinkedChunkId> for LinkedChunkId<'a> { + fn from(value: &'a OwnedLinkedChunkId) -> Self { + value.as_ref() + } +} + impl PartialEq<&OwnedLinkedChunkId> for LinkedChunkId<'_> { fn eq(&self, other: &&OwnedLinkedChunkId) -> bool { match (self, other) { @@ -154,7 +172,7 @@ impl PartialEq> for OwnedLinkedChunkId { } /// An identifier for a linked chunk; owned variant. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] +#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] pub enum OwnedLinkedChunkId { Room(OwnedRoomId), Thread(OwnedRoomId, OwnedEventId), @@ -162,18 +180,12 @@ pub enum OwnedLinkedChunkId { impl Display for OwnedLinkedChunkId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - OwnedLinkedChunkId::Room(room_id) => write!(f, "{room_id}"), - OwnedLinkedChunkId::Thread(room_id, thread_root) => { - write!(f, "{room_id}:thread:{thread_root}") - } - } + self.as_ref().fmt(f) } } impl OwnedLinkedChunkId { - #[cfg(test)] - fn as_ref(&self) -> LinkedChunkId<'_> { + pub fn as_ref(&self) -> LinkedChunkId<'_> { match self { OwnedLinkedChunkId::Room(room_id) => LinkedChunkId::Room(room_id.as_ref()), OwnedLinkedChunkId::Thread(room_id, event_id) => { @@ -190,6 +202,12 @@ impl OwnedLinkedChunkId { } } +impl From> for OwnedLinkedChunkId { + fn from(value: LinkedChunkId<'_>) -> Self { + value.to_owned() + } +} + /// Errors of [`LinkedChunk`]. #[derive(thiserror::Error, Debug)] pub enum Error { diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/integration_tests.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/integration_tests.rs index 1acea02313a..ffe6a12406a 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/integration_tests.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/integration_tests.rs @@ -684,98 +684,3 @@ macro_rules! indexeddb_event_cache_store_integration_tests { } }; } - -// This is copied from `matrix_sdk_base::event_cache::store::integration_tests` -// for the time being, because the IndexedDB implementation of `EventCacheStore` -// is being completed iteratively. So, we are only bringing over the tests -// relevant to the implemented functions. At the moment, this includes the -// following. -// -// - EventCacheStore::handle_linked_chunk_updates -// - EventCacheStore::load_all_chunks -// -// When all functions are implemented, we can get rid of this macro and use the -// one from `matrix_sdk_base`. -#[macro_export] -macro_rules! event_cache_store_integration_tests { - () => { - mod event_cache_store_integration_tests { - use matrix_sdk_base::event_cache::store::{ - EventCacheStoreIntegrationTests, IntoEventCacheStore, - }; - use matrix_sdk_test::async_test; - - use super::get_event_cache_store; - - #[async_test] - async fn test_handle_updates_and_rebuild_linked_chunk() { - let event_cache_store = - get_event_cache_store().await.unwrap().into_event_cache_store(); - event_cache_store.test_handle_updates_and_rebuild_linked_chunk().await; - } - - #[async_test] - async fn test_linked_chunk_incremental_loading() { - let event_cache_store = - get_event_cache_store().await.unwrap().into_event_cache_store(); - event_cache_store.test_linked_chunk_incremental_loading().await; - } - - #[async_test] - async fn test_rebuild_empty_linked_chunk() { - let event_cache_store = - get_event_cache_store().await.unwrap().into_event_cache_store(); - event_cache_store.test_rebuild_empty_linked_chunk().await; - } - - #[async_test] - async fn test_load_all_chunks_metadata() { - let event_cache_store = - get_event_cache_store().await.unwrap().into_event_cache_store(); - event_cache_store.test_load_all_chunks_metadata().await; - } - - #[async_test] - async fn test_clear_all_linked_chunks() { - let event_cache_store = - get_event_cache_store().await.unwrap().into_event_cache_store(); - event_cache_store.test_clear_all_linked_chunks().await; - } - - #[async_test] - async fn test_remove_room() { - let event_cache_store = - get_event_cache_store().await.unwrap().into_event_cache_store(); - event_cache_store.test_remove_room().await; - } - - #[async_test] - async fn test_filter_duplicated_events() { - let event_cache_store = - get_event_cache_store().await.unwrap().into_event_cache_store(); - event_cache_store.test_filter_duplicated_events().await; - } - - #[async_test] - async fn test_find_event() { - let event_cache_store = - get_event_cache_store().await.unwrap().into_event_cache_store(); - event_cache_store.test_find_event().await; - } - - #[async_test] - async fn test_find_event_relations() { - let event_cache_store = - get_event_cache_store().await.unwrap().into_event_cache_store(); - event_cache_store.test_find_event_relations().await; - } - - #[async_test] - async fn test_save_event() { - let event_cache_store = - get_event_cache_store().await.unwrap().into_event_cache_store(); - event_cache_store.test_save_event().await; - } - } - }; -} diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs index 6745feca380..ef5db3eb5bf 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs @@ -112,12 +112,15 @@ pub mod v1 { pub const LEASES: &str = "leases"; pub const LEASES_KEY_PATH: &str = "id"; pub const ROOMS: &str = "rooms"; + pub const LINKED_CHUNK_IDS: &str = "linked_chunk_ids"; pub const LINKED_CHUNKS: &str = "linked_chunks"; pub const LINKED_CHUNKS_KEY_PATH: &str = "id"; pub const LINKED_CHUNKS_NEXT: &str = "linked_chunks_next"; pub const LINKED_CHUNKS_NEXT_KEY_PATH: &str = "next"; pub const EVENTS: &str = "events"; pub const EVENTS_KEY_PATH: &str = "id"; + pub const EVENTS_ROOM: &str = "events_room"; + pub const EVENTS_ROOM_KEY_PATH: &str = "room"; pub const EVENTS_POSITION: &str = "events_position"; pub const EVENTS_POSITION_KEY_PATH: &str = "position"; pub const EVENTS_RELATION: &str = "events_relation"; @@ -169,6 +172,7 @@ pub mod v1 { /// Create an object store for tracking information about events. /// /// * Primary Key - `id` + /// * Index (unique) - `room` - tracks whether an event is in a given room /// * Index (unique) - `position` - tracks position of an event in linked /// chunks /// * Index - `relation` - tracks any event to which the given event is @@ -178,6 +182,14 @@ pub mod v1 { object_store_params.key_path(Some(&keys::EVENTS_KEY_PATH.into())); let events = db.create_object_store_with_params(keys::EVENTS, &object_store_params)?; + let events_room_params = IdbIndexParameters::new(); + events_room_params.set_unique(true); + events.create_index_with_params( + keys::EVENTS_ROOM, + &keys::EVENTS_ROOM_KEY_PATH.into(), + &events_room_params, + ); + let events_position_params = IdbIndexParameters::new(); events_position_params.set_unique(true); events.create_index_with_params( diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 2d7ec100fbf..72376cbcc82 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -168,9 +168,6 @@ impl_event_cache_store! { ) -> Result<(), IndexeddbEventCacheStoreError> { let _timer = timer!("method"); - let linked_chunk_id = linked_chunk_id.to_owned(); - let room_id = linked_chunk_id.room_id(); - let transaction = self.transaction( &[keys::LINKED_CHUNKS, keys::GAPS, keys::EVENTS], IdbTransactionMode::Readwrite, @@ -179,63 +176,55 @@ impl_event_cache_store! { for update in updates { match update { Update::NewItemsChunk { previous, new, next } => { - trace!(%room_id, "Inserting new chunk (prev={previous:?}, new={new:?}, next={next:?})"); + trace!(%linked_chunk_id, "Inserting new chunk (prev={previous:?}, new={new:?}, next={next:?})"); transaction - .add_chunk( - &types::Chunk { - room_id: room_id.to_owned(), - identifier: new.index(), - previous: previous.map(|i| i.index()), - next: next.map(|i| i.index()), - chunk_type: ChunkType::Event, - }, - ) + .add_chunk(&types::Chunk { + linked_chunk_id: linked_chunk_id.to_owned(), + identifier: new.index(), + previous: previous.map(|i| i.index()), + next: next.map(|i| i.index()), + chunk_type: ChunkType::Event, + }) .await?; } Update::NewGapChunk { previous, new, next, gap } => { - trace!(%room_id, "Inserting new gap (prev={previous:?}, new={new:?}, next={next:?})"); + trace!(%linked_chunk_id, "Inserting new gap (prev={previous:?}, new={new:?}, next={next:?})"); transaction - .add_item( - &types::Gap { - room_id: room_id.to_owned(), - chunk_identifier: new.index(), - prev_token: gap.prev_token, - }, - ) + .add_item(&types::Gap { + linked_chunk_id: linked_chunk_id.to_owned(), + chunk_identifier: new.index(), + prev_token: gap.prev_token, + }) .await?; transaction - .add_chunk( - &types::Chunk { - room_id: room_id.to_owned(), - identifier: new.index(), - previous: previous.map(|i| i.index()), - next: next.map(|i| i.index()), - chunk_type: ChunkType::Gap, - }, - ) + .add_chunk(&types::Chunk { + linked_chunk_id: linked_chunk_id.to_owned(), + identifier: new.index(), + previous: previous.map(|i| i.index()), + next: next.map(|i| i.index()), + chunk_type: ChunkType::Gap, + }) .await?; } Update::RemoveChunk(chunk_id) => { - trace!("Removing chunk {chunk_id:?}"); - transaction.delete_chunk_by_id(room_id, chunk_id).await?; + trace!(%linked_chunk_id, "Removing chunk {chunk_id:?}"); + transaction.delete_chunk_by_id(linked_chunk_id, chunk_id).await?; } Update::PushItems { at, items } => { let chunk_identifier = at.chunk_identifier().index(); - trace!(%room_id, "pushing {} items @ {chunk_identifier}", items.len()); + trace!(%linked_chunk_id, "pushing {} items @ {chunk_identifier}", items.len()); for (i, item) in items.into_iter().enumerate() { transaction - .put_event( - &types::Event::InBand(InBandEvent { - room_id: room_id.to_owned(), - content: item, - position: types::Position { - chunk_identifier, - index: at.index() + i, - }, - }), - ) + .put_event(&types::Event::InBand(InBandEvent { + linked_chunk_id: linked_chunk_id.to_owned(), + content: item, + position: types::Position { + chunk_identifier, + index: at.index() + i, + }, + })) .await?; } } @@ -243,42 +232,42 @@ impl_event_cache_store! { let chunk_id = at.chunk_identifier().index(); let index = at.index(); - trace!(%room_id, "replacing item @ {chunk_id}:{index}"); + trace!(%linked_chunk_id, "replacing item @ {chunk_id}:{index}"); transaction - .put_event( - &types::Event::InBand(InBandEvent { - room_id: room_id.to_owned(), - content: item, - position: at.into(), - }), - ) + .put_event(&types::Event::InBand(InBandEvent { + linked_chunk_id: linked_chunk_id.to_owned(), + content: item, + position: at.into(), + })) .await?; } Update::RemoveItem { at } => { let chunk_id = at.chunk_identifier().index(); let index = at.index(); - trace!(%room_id, "removing item @ {chunk_id}:{index}"); + trace!(%linked_chunk_id, "removing item @ {chunk_id}:{index}"); - transaction.delete_event_by_position(room_id, at.into()).await?; + transaction.delete_event_by_position(linked_chunk_id, at.into()).await?; } Update::DetachLastItems { at } => { let chunk_id = at.chunk_identifier().index(); let index = at.index(); - trace!(%room_id, "detaching last items @ {chunk_id}:{index}"); + trace!(%linked_chunk_id, "detaching last items @ {chunk_id}:{index}"); - transaction.delete_events_by_chunk_from_index(room_id, at.into()).await?; + transaction + .delete_events_by_chunk_from_index(linked_chunk_id, at.into()) + .await?; } Update::StartReattachItems | Update::EndReattachItems => { // Nothing? See sqlite implementation } Update::Clear => { - trace!(%room_id, "clearing room"); - transaction.delete_chunks_in_room(room_id).await?; - transaction.delete_events_in_room(room_id).await?; - transaction.delete_gaps_in_room(room_id).await?; + trace!(%linked_chunk_id, "clearing room"); + transaction.delete_chunks_by_linked_chunk_id(linked_chunk_id).await?; + transaction.delete_events_by_linked_chunk_id(linked_chunk_id).await?; + transaction.delete_gaps_by_linked_chunk_id(linked_chunk_id).await?; } } } @@ -293,19 +282,16 @@ impl_event_cache_store! { ) -> Result>, IndexeddbEventCacheStoreError> { let _ = timer!("method"); - let linked_chunk_id = linked_chunk_id.to_owned(); - let room_id = linked_chunk_id.room_id(); - let transaction = self.transaction( &[keys::LINKED_CHUNKS, keys::GAPS, keys::EVENTS], IdbTransactionMode::Readwrite, )?; let mut raw_chunks = Vec::new(); - let chunks = transaction.get_chunks_in_room(room_id).await?; + let chunks = transaction.get_chunks_by_linked_chunk_id(linked_chunk_id).await?; for chunk in chunks { if let Some(raw_chunk) = transaction - .load_chunk_by_id(room_id, ChunkIdentifier::new(chunk.identifier)) + .load_chunk_by_id(linked_chunk_id, ChunkIdentifier::new(chunk.identifier)) .await? { raw_chunks.push(raw_chunk); @@ -331,19 +317,17 @@ impl_event_cache_store! { // https://github.com/matrix-org/matrix-rust-sdk/pull/5382. let _ = timer!("method"); - let linked_chunk_id = linked_chunk_id.to_owned(); - let room_id = linked_chunk_id.room_id(); - let transaction = self.transaction( &[keys::LINKED_CHUNKS, keys::EVENTS, keys::GAPS], IdbTransactionMode::Readwrite, )?; let mut raw_chunks = Vec::new(); - let chunks = transaction.get_chunks_in_room(room_id).await?; + let chunks = transaction.get_chunks_by_linked_chunk_id(linked_chunk_id).await?; for chunk in chunks { let chunk_id = ChunkIdentifier::new(chunk.identifier); - let num_items = transaction.get_events_count_by_chunk(room_id, chunk_id).await?; + let num_items = + transaction.get_events_count_by_chunk(linked_chunk_id, chunk_id).await?; raw_chunks.push(ChunkMetadata { num_items, previous: chunk.previous.map(ChunkIdentifier::new), @@ -364,20 +348,20 @@ impl_event_cache_store! { > { let _timer = timer!("method"); - let linked_chunk_id = linked_chunk_id.to_owned(); - let room_id = linked_chunk_id.room_id(); + let owned_linked_chunk_id = linked_chunk_id.to_owned(); + let room_id = owned_linked_chunk_id.room_id(); let transaction = self.transaction( &[keys::LINKED_CHUNKS, keys::EVENTS, keys::GAPS], IdbTransactionMode::Readonly, )?; - if transaction.get_chunks_count_in_room(room_id).await? == 0 { + if transaction.get_chunks_count_by_linked_chunk_id(linked_chunk_id).await? == 0 { return Ok((None, ChunkIdentifierGenerator::new_from_scratch())); } // Now that we know we have some chunks in the room, we query IndexedDB // for the last chunk in the room by getting the chunk which does not // have a next chunk. - match transaction.get_chunk_by_next_chunk_id(room_id, None).await { + match transaction.get_chunk_by_next_chunk_id(linked_chunk_id, None).await { Err(IndexeddbEventCacheStoreTransactionError::ItemIsNotUnique) => { // If there are multiple chunks that do not have a next chunk, that // means we have more than one last chunk, which means that we have @@ -388,20 +372,20 @@ impl_event_cache_store! { // There was some error querying IndexedDB, but it is not necessarily // a violation of our data constraints. Err(e.into()) - }, + } Ok(None) => { // If there is no chunk without a next chunk, that means every chunk // points to another chunk, which means that we have a cycle in our list. Err(IndexeddbEventCacheStoreError::ChunksContainCycle) - }, + } Ok(Some(last_chunk)) => { let last_chunk_identifier = ChunkIdentifier::new(last_chunk.identifier); let last_raw_chunk = transaction - .load_chunk_by_id(room_id, last_chunk_identifier) + .load_chunk_by_id(linked_chunk_id, last_chunk_identifier) .await? .ok_or(IndexeddbEventCacheStoreError::UnableToLoadChunk)?; let max_chunk_id = transaction - .get_max_chunk_by_id(room_id) + .get_max_chunk_by_id(linked_chunk_id) .await? .map(|chunk| ChunkIdentifier::new(chunk.identifier)) .ok_or(IndexeddbEventCacheStoreError::NoMaxChunkId)?; @@ -420,16 +404,18 @@ impl_event_cache_store! { ) -> Result>, IndexeddbEventCacheStoreError> { let _timer = timer!("method"); - let linked_chunk_id = linked_chunk_id.to_owned(); - let room_id = linked_chunk_id.room_id(); let transaction = self.transaction( &[keys::LINKED_CHUNKS, keys::EVENTS, keys::GAPS], IdbTransactionMode::Readonly, )?; - if let Some(chunk) = transaction.get_chunk_by_id(room_id, before_chunk_identifier).await? { + if let Some(chunk) = + transaction.get_chunk_by_id(linked_chunk_id, before_chunk_identifier).await? + { if let Some(previous_identifier) = chunk.previous { let previous_identifier = ChunkIdentifier::new(previous_identifier); - return Ok(transaction.load_chunk_by_id(room_id, previous_identifier).await?); + return Ok(transaction + .load_chunk_by_id(linked_chunk_id, previous_identifier) + .await?); } } Ok(None) @@ -462,14 +448,11 @@ impl_event_cache_store! { return Ok(Vec::new()); } - let linked_chunk_id = linked_chunk_id.to_owned(); - let room_id = linked_chunk_id.room_id(); - let transaction = - self.transaction(&[keys::EVENTS], IdbTransactionMode::Readonly)?; + let transaction = self.transaction(&[keys::EVENTS], IdbTransactionMode::Readonly)?; let mut duplicated = Vec::new(); for event_id in events { if let Some(types::Event::InBand(event)) = - transaction.get_event_by_id(room_id, &event_id).await? + transaction.get_event_by_id(linked_chunk_id, &event_id).await? { duplicated.push((event_id, event.position.into())); } @@ -485,10 +468,9 @@ impl_event_cache_store! { ) -> Result, IndexeddbEventCacheStoreError> { let _timer = timer!("method"); - let transaction = - self.transaction(&[keys::EVENTS], IdbTransactionMode::Readonly)?; + let transaction = self.transaction(&[keys::EVENTS], IdbTransactionMode::Readonly)?; transaction - .get_event_by_id(room_id, event_id) + .get_event_by_room(room_id, event_id) .await .map(|ok| ok.map(Into::into)) .map_err(Into::into) @@ -503,8 +485,7 @@ impl_event_cache_store! { ) -> Result)>, IndexeddbEventCacheStoreError> { let _timer = timer!("method"); - let transaction = - self.transaction(&[keys::EVENTS], IdbTransactionMode::Readonly)?; + let transaction = self.transaction(&[keys::EVENTS], IdbTransactionMode::Readonly)?; let mut related_events = Vec::new(); match filters { @@ -519,9 +500,7 @@ impl_event_cache_store! { } } _ => { - for event in - transaction.get_events_by_related_event(room_id, event_id).await? - { + for event in transaction.get_events_by_related_event(room_id, event_id).await? { let position = event.position().map(Into::into); related_events.push((event.into(), position)); } @@ -542,11 +521,14 @@ impl_event_cache_store! { error!(%room_id, "Trying to save an event with no ID"); return Ok(()); }; - let transaction = - self.transaction(&[keys::EVENTS], IdbTransactionMode::Readwrite)?; - let event = match transaction.get_event_by_id(room_id, &event_id).await? { + let transaction = self.transaction(&[keys::EVENTS], IdbTransactionMode::Readwrite)?; + let event = match transaction.get_event_by_room(room_id, &event_id).await? { Some(mut inner) => inner.with_content(event), - None => types::Event::OutOfBand(OutOfBandEvent { room_id: room_id.to_owned(), content: event, position: () }), + None => types::Event::OutOfBand(OutOfBandEvent { + linked_chunk_id: LinkedChunkId::Room(room_id).to_owned(), + content: event, + position: (), + }), }; transaction.put_event(&event).await?; transaction.commit().await?; @@ -673,14 +655,13 @@ impl_event_cache_store! { mod tests { use matrix_sdk_base::{ event_cache::store::{EventCacheStore, EventCacheStoreError}, - event_cache_store_integration_tests_time, + event_cache_store_integration_tests, event_cache_store_integration_tests_time, }; use matrix_sdk_test::async_test; use uuid::Uuid; use crate::{ - event_cache_store::IndexeddbEventCacheStore, event_cache_store_integration_tests, - indexeddb_event_cache_store_integration_tests, + event_cache_store::IndexeddbEventCacheStore, indexeddb_event_cache_store_integration_tests, }; mod unencrypted { diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs index 2bbd80ab667..fcb67a648a9 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs @@ -29,7 +29,7 @@ use std::sync::LazyLock; -use matrix_sdk_base::linked_chunk::ChunkIdentifier; +use matrix_sdk_base::linked_chunk::{ChunkIdentifier, LinkedChunkId}; use matrix_sdk_crypto::CryptoStoreError; use ruma::{events::relation::RelationType, EventId, OwnedEventId, RoomId}; use serde::{Deserialize, Serialize}; @@ -231,6 +231,43 @@ impl From for IndexedKeyRange { } } +/// A (possibly) encrypted representation of a [`Lease`] +pub type IndexedLeaseContent = MaybeEncrypted; + +/// A (possibly) hashed representation of a [`LinkedChunkId`] which is suitable +/// for use in an IndexedDB key +pub type IndexedLinkedChunkId = Vec; + +/// A (possibly) hashed representation of an [`RoomId`] which is suitable for +/// use in an IndexedDB key +pub type IndexedRoomId = String; + +/// A representation of a [`ChunkIdentifier`] which is suitable for use in an +/// IndexedDB key +pub type IndexedChunkId = u64; + +/// A (possibly) encrypted representation of an [`Event`] +pub type IndexedChunkContent = MaybeEncrypted; + +/// A (possibly) hashed representation of an [`EventId`] which is suitable for +/// use in an IndexedDB key +pub type IndexedEventId = String; + +/// A representation of the position of an [`Event`] in a [`Chunk`] which is +/// suitable for use in an IndexedDB key +pub type IndexedEventPositionIndex = usize; + +/// A (possibly) hashed representation of the relationship between two events +/// (see [`RelationType`](ruma::events::relation::RelationType)) which is +/// suitable for use in an IndexedDB key +pub type IndexedRelationType = String; + +/// A (possibly) encrypted representation of an [`Event`] +pub type IndexedEventContent = MaybeEncrypted; + +/// A (possibly) encrypted representation of a [`Gap`] +pub type IndexedGapContent = MaybeEncrypted; + /// Represents the [`LEASES`][1] object store. /// /// [1]: crate::event_cache_store::migrations::v1::create_lease_object_store @@ -293,8 +330,6 @@ impl IndexedKeyComponentBounds for IndexedLeaseIdKey { } } -pub type IndexedLeaseContent = MaybeEncrypted; - /// Represents the [`LINKED_CHUNKS`][1] object store. /// /// [1]: crate::event_cache_store::migrations::v1::create_linked_chunks_object_store @@ -322,11 +357,11 @@ impl Indexed for Chunk { ) -> Result { Ok(IndexedChunk { id: >::encode( - (&self.room_id, ChunkIdentifier::new(self.identifier)), + (self.linked_chunk_id.as_ref(), ChunkIdentifier::new(self.identifier)), serializer, ), next: IndexedNextChunkIdKey::encode( - (&self.room_id, self.next.map(ChunkIdentifier::new)), + (self.linked_chunk_id.as_ref(), self.next.map(ChunkIdentifier::new)), serializer, ), content: serializer.maybe_encrypt_value(self)?, @@ -344,44 +379,45 @@ impl Indexed for Chunk { /// The value associated with the [primary key](IndexedChunk::id) of the /// [`LINKED_CHUNKS`][1] object store, which is constructed from: /// -/// - The (possibly) encrypted Room ID +/// - The (possibly) hashed Linked Chunk ID /// - The Chunk ID. /// /// [1]: crate::event_cache_store::migrations::v1::create_linked_chunks_object_store #[derive(Debug, Serialize, Deserialize)] -pub struct IndexedChunkIdKey(IndexedRoomId, IndexedChunkId); +pub struct IndexedChunkIdKey(IndexedLinkedChunkId, IndexedChunkId); impl IndexedKey for IndexedChunkIdKey { - type KeyComponents<'a> = (&'a RoomId, ChunkIdentifier); + type KeyComponents<'a> = (LinkedChunkId<'a>, ChunkIdentifier); fn encode( - (room_id, chunk_id): Self::KeyComponents<'_>, + (linked_chunk_id, chunk_id): Self::KeyComponents<'_>, serializer: &IndexeddbSerializer, ) -> Self { - let room_id = serializer.encode_key_as_string(keys::ROOMS, room_id); + let linked_chunk_id = + serializer.hash_key(keys::LINKED_CHUNK_IDS, linked_chunk_id.storage_key()); let chunk_id = chunk_id.index(); - Self(room_id, chunk_id) + Self(linked_chunk_id, chunk_id) } } -impl<'a> IndexedPrefixKeyComponentBounds<'a, Chunk, &'a RoomId> for IndexedChunkIdKey { - fn lower_key_components_with_prefix(room_id: &'a RoomId) -> Self::KeyComponents<'a> { - (room_id, *INDEXED_KEY_LOWER_CHUNK_IDENTIFIER) +impl<'a> IndexedPrefixKeyComponentBounds<'a, Chunk, LinkedChunkId<'a>> for IndexedChunkIdKey { + fn lower_key_components_with_prefix( + linked_chunk_id: LinkedChunkId<'a>, + ) -> Self::KeyComponents<'a> { + (linked_chunk_id, *INDEXED_KEY_LOWER_CHUNK_IDENTIFIER) } - fn upper_key_components_with_prefix(room_id: &'a RoomId) -> Self::KeyComponents<'a> { - (room_id, *INDEXED_KEY_UPPER_CHUNK_IDENTIFIER) + fn upper_key_components_with_prefix( + linked_chunk_id: LinkedChunkId<'a>, + ) -> Self::KeyComponents<'a> { + (linked_chunk_id, *INDEXED_KEY_UPPER_CHUNK_IDENTIFIER) } } -pub type IndexedRoomId = String; -pub type IndexedChunkId = u64; -pub type IndexedChunkContent = MaybeEncrypted; - /// The value associated with the [`next`](IndexedChunk::next) index of the /// [`LINKED_CHUNKS`][1] object store, which is constructed from: /// -/// - The (possibly) encrypted Room ID +/// - The (possibly) hashed Linked Chunk ID /// - The Chunk ID, if there is a next chunk in the list. /// /// Note: it would be more convenient to represent this type with an optional @@ -397,47 +433,52 @@ pub type IndexedChunkContent = MaybeEncrypted; #[serde(untagged)] pub enum IndexedNextChunkIdKey { /// There is no next chunk. - None((IndexedRoomId,)), + None((IndexedLinkedChunkId,)), /// The identifier of the next chunk in the list. Some(IndexedChunkIdKey), } impl IndexedNextChunkIdKey { - pub fn none(room_id: IndexedRoomId) -> Self { - Self::None((room_id,)) + pub fn none(linked_chunk_id: IndexedLinkedChunkId) -> Self { + Self::None((linked_chunk_id,)) } } impl IndexedKey for IndexedNextChunkIdKey { const INDEX: Option<&'static str> = Some(keys::LINKED_CHUNKS_NEXT); - type KeyComponents<'a> = (&'a RoomId, Option); + type KeyComponents<'a> = (LinkedChunkId<'a>, Option); fn encode( - (room_id, next_chunk_id): Self::KeyComponents<'_>, + (linked_chunk_id, next_chunk_id): Self::KeyComponents<'_>, serializer: &IndexeddbSerializer, ) -> Self { next_chunk_id .map(|id| { Self::Some(>::encode( - (room_id, id), + (linked_chunk_id, id), serializer, )) }) .unwrap_or_else(|| { - let room_id = serializer.encode_key_as_string(keys::ROOMS, room_id); + let room_id = + serializer.hash_key(keys::LINKED_CHUNK_IDS, linked_chunk_id.storage_key()); Self::none(room_id) }) } } -impl<'a> IndexedPrefixKeyComponentBounds<'a, Chunk, &'a RoomId> for IndexedNextChunkIdKey { - fn lower_key_components_with_prefix(room_id: &'a RoomId) -> Self::KeyComponents<'a> { - (room_id, None) +impl<'a> IndexedPrefixKeyComponentBounds<'a, Chunk, LinkedChunkId<'a>> for IndexedNextChunkIdKey { + fn lower_key_components_with_prefix( + linked_chunk_id: LinkedChunkId<'a>, + ) -> Self::KeyComponents<'a> { + (linked_chunk_id, None) } - fn upper_key_components_with_prefix(room_id: &'a RoomId) -> Self::KeyComponents<'a> { - (room_id, Some(*INDEXED_KEY_UPPER_CHUNK_IDENTIFIER)) + fn upper_key_components_with_prefix( + linked_chunk_id: LinkedChunkId<'a>, + ) -> Self::KeyComponents<'a> { + (linked_chunk_id, Some(*INDEXED_KEY_UPPER_CHUNK_IDENTIFIER)) } } @@ -448,6 +489,9 @@ impl<'a> IndexedPrefixKeyComponentBounds<'a, Chunk, &'a RoomId> for IndexedNextC pub struct IndexedEvent { /// The primary key of the object store. pub id: IndexedEventIdKey, + /// An indexed key on the object store, which represents the room in which + /// the event exists + pub room: IndexedEventRoomKey, /// An indexed key on the object store, which represents the position of the /// event, if it is in a chunk. pub position: Option, @@ -477,9 +521,10 @@ impl Indexed for Event { serializer: &IndexeddbSerializer, ) -> Result { let event_id = self.event_id().ok_or(Self::Error::NoEventId)?; - let id = IndexedEventIdKey::encode((self.room_id(), &event_id), serializer); + let id = IndexedEventIdKey::encode((self.linked_chunk_id(), &event_id), serializer); + let room = IndexedEventRoomKey::encode((self.room_id(), &event_id), serializer); let position = self.position().map(|position| { - IndexedEventPositionKey::encode((self.room_id(), position), serializer) + IndexedEventPositionKey::encode((self.linked_chunk_id(), position), serializer) }); let relation = self.relation().map(|(related_event, relation_type)| { IndexedEventRelationKey::encode( @@ -487,7 +532,13 @@ impl Indexed for Event { serializer, ) }); - Ok(IndexedEvent { id, position, relation, content: serializer.maybe_encrypt_value(self)? }) + Ok(IndexedEvent { + id, + room, + position, + relation, + content: serializer.maybe_encrypt_value(self)?, + }) } fn from_indexed( @@ -501,24 +552,69 @@ impl Indexed for Event { /// The value associated with the [primary key](IndexedEvent::id) of the /// [`EVENTS`][1] object store, which is constructed from: /// -/// - The (possibly) encrypted Room ID -/// - The (possibly) encrypted Event ID. +/// - The (possibly) hashed Linked Chunk ID +/// - The (possibly) hashed Event ID. /// /// [1]: crate::event_cache_store::migrations::v1::create_events_object_store #[derive(Debug, Serialize, Deserialize)] -pub struct IndexedEventIdKey(IndexedRoomId, IndexedEventId); +pub struct IndexedEventIdKey(IndexedLinkedChunkId, IndexedEventId); impl IndexedKey for IndexedEventIdKey { + type KeyComponents<'a> = (LinkedChunkId<'a>, &'a EventId); + + fn encode( + (linked_chunk_id, event_id): Self::KeyComponents<'_>, + serializer: &IndexeddbSerializer, + ) -> Self { + let linked_chunk_id = + serializer.hash_key(keys::LINKED_CHUNK_IDS, linked_chunk_id.storage_key()); + let event_id = serializer.encode_key_as_string(keys::EVENTS, event_id); + Self(linked_chunk_id, event_id) + } +} + +impl IndexedPrefixKeyBounds> for IndexedEventIdKey { + fn lower_key_with_prefix( + linked_chunk_id: LinkedChunkId<'_>, + serializer: &IndexeddbSerializer, + ) -> Self { + Self::encode((linked_chunk_id, &*INDEXED_KEY_LOWER_EVENT_ID), serializer) + } + + fn upper_key_with_prefix( + linked_chunk_id: LinkedChunkId<'_>, + serializer: &IndexeddbSerializer, + ) -> Self { + Self::encode((linked_chunk_id, &*INDEXED_KEY_UPPER_EVENT_ID), serializer) + } +} + +/// The value associated with the [primary key](IndexedEvent::id) of the +/// [`EVENTS`][1] object store, which is constructed from: +/// +/// - The (possibly) hashed Room ID +/// - The (possibly) hashed Event ID. +/// +/// [1]: crate::event_cache_store::migrations::v1::create_events_object_store +#[derive(Debug, Serialize, Deserialize)] +pub struct IndexedEventRoomKey(IndexedRoomId, IndexedEventId); + +impl IndexedKey for IndexedEventRoomKey { + const INDEX: Option<&'static str> = Some(keys::EVENTS_ROOM); + type KeyComponents<'a> = (&'a RoomId, &'a EventId); - fn encode((room_id, event_id): (&RoomId, &EventId), serializer: &IndexeddbSerializer) -> Self { - let room_id = serializer.encode_key_as_string(keys::ROOMS, room_id); + fn encode( + (room_id, event_id): Self::KeyComponents<'_>, + serializer: &IndexeddbSerializer, + ) -> Self { + let room_id = serializer.encode_key_as_string(keys::ROOMS, room_id.as_str()); let event_id = serializer.encode_key_as_string(keys::EVENTS, event_id); Self(room_id, event_id) } } -impl IndexedPrefixKeyBounds for IndexedEventIdKey { +impl IndexedPrefixKeyBounds for IndexedEventRoomKey { fn lower_key_with_prefix(room_id: &RoomId, serializer: &IndexeddbSerializer) -> Self { Self::encode((room_id, &*INDEXED_KEY_LOWER_EVENT_ID), serializer) } @@ -528,72 +624,73 @@ impl IndexedPrefixKeyBounds for IndexedEventIdKey { } } -pub type IndexedEventId = String; - /// The value associated with the [`position`](IndexedEvent::position) index of /// the [`EVENTS`][1] object store, which is constructed from: /// -/// - The (possibly) encrypted Room ID +/// - The (possibly) hashed Linked Chunk ID /// - The Chunk ID /// - The index of the event in the chunk. /// /// [1]: crate::event_cache_store::migrations::v1::create_events_object_store #[derive(Debug, Serialize, Deserialize)] -pub struct IndexedEventPositionKey(IndexedRoomId, IndexedChunkId, IndexedEventPositionIndex); +pub struct IndexedEventPositionKey(IndexedLinkedChunkId, IndexedChunkId, IndexedEventPositionIndex); impl IndexedKey for IndexedEventPositionKey { const INDEX: Option<&'static str> = Some(keys::EVENTS_POSITION); - type KeyComponents<'a> = (&'a RoomId, Position); + type KeyComponents<'a> = (LinkedChunkId<'a>, Position); fn encode( - (room_id, position): Self::KeyComponents<'_>, + (linked_chunk_id, position): Self::KeyComponents<'_>, serializer: &IndexeddbSerializer, ) -> Self { - let room_id = serializer.encode_key_as_string(keys::ROOMS, room_id); - Self(room_id, position.chunk_identifier, position.index) + let linked_chunk_id = + serializer.hash_key(keys::LINKED_CHUNK_IDS, linked_chunk_id.storage_key()); + Self(linked_chunk_id, position.chunk_identifier, position.index) } } -impl<'a> IndexedPrefixKeyComponentBounds<'a, Event, &'a RoomId> for IndexedEventPositionKey { - fn lower_key_components_with_prefix(room_id: &'a RoomId) -> Self::KeyComponents<'a> { - (room_id, *INDEXED_KEY_LOWER_EVENT_POSITION) +impl<'a> IndexedPrefixKeyComponentBounds<'a, Event, LinkedChunkId<'a>> for IndexedEventPositionKey { + fn lower_key_components_with_prefix( + linked_chunk_id: LinkedChunkId<'a>, + ) -> Self::KeyComponents<'a> { + (linked_chunk_id, *INDEXED_KEY_LOWER_EVENT_POSITION) } - fn upper_key_components_with_prefix(room_id: &'a RoomId) -> Self::KeyComponents<'a> { - (room_id, *INDEXED_KEY_UPPER_EVENT_POSITION) + fn upper_key_components_with_prefix( + linked_chunk_id: LinkedChunkId<'a>, + ) -> Self::KeyComponents<'a> { + (linked_chunk_id, *INDEXED_KEY_UPPER_EVENT_POSITION) } } -impl<'a> IndexedPrefixKeyComponentBounds<'a, Event, (&'a RoomId, ChunkIdentifier)> +impl<'a> IndexedPrefixKeyComponentBounds<'a, Event, (LinkedChunkId<'a>, ChunkIdentifier)> for IndexedEventPositionKey { fn lower_key_components_with_prefix( - (room_id, chunk_id): (&'a RoomId, ChunkIdentifier), + (linked_chunk_id, chunk_id): (LinkedChunkId<'a>, ChunkIdentifier), ) -> Self::KeyComponents<'a> { ( - room_id, + linked_chunk_id, Position { chunk_identifier: chunk_id.index(), index: INDEXED_KEY_LOWER_EVENT_INDEX }, ) } fn upper_key_components_with_prefix( - (room_id, chunk_id): (&'a RoomId, ChunkIdentifier), + (linked_chunk_id, chunk_id): (LinkedChunkId<'a>, ChunkIdentifier), ) -> Self::KeyComponents<'a> { ( - room_id, + linked_chunk_id, Position { chunk_identifier: chunk_id.index(), index: INDEXED_KEY_UPPER_EVENT_INDEX }, ) } } -pub type IndexedEventPositionIndex = usize; - /// The value associated with the [`relation`](IndexedEvent::relation) index of /// the [`EVENTS`][1] object store, which is constructed from: /// -/// - The (possibly) encrypted Room ID -/// - The (possibly) encrypted Event ID of the related event +/// - The (possibly) hashed Room ID +/// - The (possibly) hashed Event ID of the related event /// - The type of relationship between the events /// /// [1]: crate::event_cache_store::migrations::v1::create_events_object_store @@ -658,12 +755,6 @@ impl IndexedPrefixKeyBounds for IndexedEventRelation } } -/// A representation of the relationship between two events (see -/// [`RelationType`](ruma::events::relation::RelationType)) -pub type IndexedRelationType = String; - -pub type IndexedEventContent = MaybeEncrypted; - /// Represents the [`GAPS`][1] object store. /// /// [1]: crate::event_cache_store::migrations::v1::create_gaps_object_store @@ -687,7 +778,7 @@ impl Indexed for Gap { ) -> Result { Ok(IndexedGap { id: >::encode( - (&self.room_id, ChunkIdentifier::new(self.chunk_identifier)), + (self.linked_chunk_id.as_ref(), ChunkIdentifier::new(self.chunk_identifier)), serializer, ), content: serializer.maybe_encrypt_value(self)?, @@ -704,7 +795,7 @@ impl Indexed for Gap { /// The primary key of the [`GAPS`][1] object store, which is constructed from: /// -/// - The (possibly) encrypted Room ID +/// - The (possibly) hashed Linked Chunk ID /// - The Chunk ID /// /// [1]: crate::event_cache_store::migrations::v1::create_gaps_object_store @@ -718,18 +809,20 @@ impl IndexedKey for IndexedGapIdKey { } } -impl<'a> IndexedPrefixKeyComponentBounds<'a, Gap, &'a RoomId> for IndexedGapIdKey { - fn lower_key_components_with_prefix(room_id: &'a RoomId) -> Self::KeyComponents<'a> { +impl<'a> IndexedPrefixKeyComponentBounds<'a, Gap, LinkedChunkId<'a>> for IndexedGapIdKey { + fn lower_key_components_with_prefix( + linked_chunk_id: LinkedChunkId<'a>, + ) -> Self::KeyComponents<'a> { >::lower_key_components_with_prefix( - room_id, + linked_chunk_id, ) } - fn upper_key_components_with_prefix(room_id: &'a RoomId) -> Self::KeyComponents<'a> { + fn upper_key_components_with_prefix( + linked_chunk_id: LinkedChunkId<'a>, + ) -> Self::KeyComponents<'a> { >::upper_key_components_with_prefix( - room_id, + linked_chunk_id, ) } } - -pub type IndexedGapContent = MaybeEncrypted; diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs index 8d26fd6cced..760fb95fe83 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs @@ -15,7 +15,7 @@ use indexed_db_futures::{prelude::IdbTransaction, IdbQuerySource}; use matrix_sdk_base::{ event_cache::{store::EventCacheStoreError, Event as RawEvent, Gap as RawGap}, - linked_chunk::{ChunkContent, ChunkIdentifier, RawChunk}, + linked_chunk::{ChunkContent, ChunkIdentifier, LinkedChunkId, RawChunk}, }; use ruma::{events::relation::RelationType, EventId, OwnedEventId, RoomId}; use serde::{ @@ -34,7 +34,8 @@ use crate::event_cache_store::{ }, types::{ IndexedChunkIdKey, IndexedEventIdKey, IndexedEventPositionKey, IndexedEventRelationKey, - IndexedGapIdKey, IndexedKeyRange, IndexedLeaseIdKey, IndexedNextChunkIdKey, + IndexedEventRoomKey, IndexedGapIdKey, IndexedKeyRange, IndexedLeaseIdKey, + IndexedNextChunkIdKey, }, IndexeddbEventCacheStoreSerializer, }, @@ -103,8 +104,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { self.transaction.await.into_result().map_err(Into::into) } - /// Query IndexedDB for items that match the given key range in the given - /// room. + /// Query IndexedDB for items that match the given key range pub async fn get_items_by_key( &self, range: impl Into>, @@ -132,8 +132,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { Ok(items) } - /// Query IndexedDB for items that match the given key component range in - /// the given room. + /// Query IndexedDB for items that match the given key component range pub async fn get_items_by_key_components<'b, T, K>( &self, range: impl Into>>, @@ -148,7 +147,26 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { self.get_items_by_key::(range).await } - /// Query IndexedDB for all items in the given room by key `K` + /// Query IndexedDB for all items matching the given linked chunk id by key + /// `K` + pub async fn get_items_by_linked_chunk_id<'b, T, K>( + &self, + linked_chunk_id: LinkedChunkId<'b>, + ) -> Result, IndexeddbEventCacheStoreTransactionError> + where + T: Indexed, + T::IndexedType: DeserializeOwned, + T::Error: AsyncErrorDeps, + K: IndexedPrefixKeyBounds> + Serialize, + { + self.get_items_by_key::(IndexedKeyRange::all_with_prefix( + linked_chunk_id, + self.serializer.inner(), + )) + .await + } + + /// Query IndexedDB for all items of type `T` by key `K` in the given room pub async fn get_items_in_room<'b, T, K>( &self, room_id: &'b RoomId, @@ -166,7 +184,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { .await } - /// Query IndexedDB for items that match the given key in the given room. If + /// Query IndexedDB for items that match the given key. If /// more than one item is found, an error is returned. pub async fn get_item_by_key( &self, @@ -185,8 +203,8 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { Ok(items.pop()) } - /// Query IndexedDB for items that match the given key components in the - /// given room. If more than one item is found, an error is returned. + /// Query IndexedDB for items that match the given key components. If more + /// than one item is found, an error is returned. pub async fn get_item_by_key_components<'b, T, K>( &self, components: K::KeyComponents<'b>, @@ -204,8 +222,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { Ok(items.pop()) } - /// Query IndexedDB for the number of items that match the given key range - /// in the given room. + /// Query IndexedDB for the number of items that match the given key range. pub async fn get_items_count_by_key( &self, range: impl Into>, @@ -227,7 +244,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { } /// Query IndexedDB for the number of items that match the given key - /// components range in the given room. + /// components range. pub async fn get_items_count_by_key_components<'b, T, K>( &self, range: impl Into>>, @@ -242,7 +259,27 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { self.get_items_count_by_key::(range).await } - /// Query IndexedDB for the number of items in the given room. + /// Query IndexedDB for the number of items matching the given linked chunk + /// id. + pub async fn get_items_count_by_linked_chunk_id<'b, T, K>( + &self, + linked_chunk_id: LinkedChunkId<'b>, + ) -> Result + where + T: Indexed, + T::IndexedType: DeserializeOwned, + T::Error: AsyncErrorDeps, + K: IndexedPrefixKeyBounds> + Serialize, + { + self.get_items_count_by_key::(IndexedKeyRange::all_with_prefix( + linked_chunk_id, + self.serializer.inner(), + )) + .await + } + + /// Query IndexedDB for the number of items of type `T` by `K` in the given + /// room. pub async fn get_items_count_in_room<'b, T, K>( &self, room_id: &'b RoomId, @@ -292,7 +329,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { } } - /// Adds an item to the given room in the corresponding IndexedDB object + /// Adds an item to the corresponding IndexedDB object /// store, i.e., `T::OBJECT_STORE`. If an item with the same key already /// exists, it will be rejected. pub async fn add_item( @@ -313,7 +350,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { .map_err(Into::into) } - /// Puts an item in the given room in the corresponding IndexedDB object + /// Puts an item in the corresponding IndexedDB object /// store, i.e., `T::OBJECT_STORE`. If an item with the same key already /// exists, it will be overwritten. pub async fn put_item( @@ -334,7 +371,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { .map_err(Into::into) } - /// Delete items in given key range in the given room from IndexedDB + /// Delete items in given key range from IndexedDB pub async fn delete_items_by_key( &self, range: impl Into>, @@ -359,7 +396,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { Ok(()) } - /// Delete items in the given key component range in the given room from + /// Delete items in the given key component range from /// IndexedDB pub async fn delete_items_by_key_components<'b, T, K>( &self, @@ -373,6 +410,23 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { self.delete_items_by_key::(range).await } + /// Delete all items of type `T` by key `K` associated with the given linked + /// chunk id from IndexedDB + pub async fn delete_items_by_linked_chunk_id<'b, T, K>( + &self, + linked_chunk_id: LinkedChunkId<'b>, + ) -> Result<(), IndexeddbEventCacheStoreTransactionError> + where + T: Indexed, + K: IndexedPrefixKeyBounds> + Serialize, + { + self.delete_items_by_key::(IndexedKeyRange::all_with_prefix( + linked_chunk_id, + self.serializer.inner(), + )) + .await + } + /// Delete all items of type `T` by key `K` in the given room from IndexedDB pub async fn delete_items_in_room<'b, T, K>( &self, @@ -389,7 +443,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { .await } - /// Delete item that matches the given key components in the given room from + /// Delete item that matches the given key components from /// IndexedDB pub async fn delete_item_by_key<'b, T, K>( &self, @@ -402,7 +456,8 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { self.delete_items_by_key_components::(key).await } - /// Clear all items of type `T` in all rooms from IndexedDB + /// Clear all items of type `T` from the associated object store + /// `T::OBJECT_STORE` from IndexedDB pub async fn clear(&self) -> Result<(), IndexeddbEventCacheStoreTransactionError> where T: Indexed, @@ -428,66 +483,77 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { self.put_item(lease).await } - /// Query IndexedDB for chunks that match the given chunk identifier in the - /// given room. If more than one item is found, an error is returned. + /// Query IndexedDB for chunks that match the given chunk identifier and the + /// given linked chunk id. If more than one item is found, an error is + /// returned. pub async fn get_chunk_by_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, chunk_id: ChunkIdentifier, ) -> Result, IndexeddbEventCacheStoreTransactionError> { - self.get_item_by_key_components::((room_id, chunk_id)).await + self.get_item_by_key_components::((linked_chunk_id, chunk_id)) + .await } /// Query IndexedDB for chunks such that the next chunk matches the given - /// chunk identifier in the given room. If more than one item is found, - /// an error is returned. + /// chunk identifier and the given linked chunk id. If more than one item is + /// found, an error is returned. pub async fn get_chunk_by_next_chunk_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, next_chunk_id: Option, ) -> Result, IndexeddbEventCacheStoreTransactionError> { - self.get_item_by_key_components::((room_id, next_chunk_id)) - .await + self.get_item_by_key_components::(( + linked_chunk_id, + next_chunk_id, + )) + .await } - /// Query IndexedDB for all chunks in the given room - pub async fn get_chunks_in_room( + /// Query IndexedDB for all chunks matching the given linked chunk id + pub async fn get_chunks_by_linked_chunk_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, ) -> Result, IndexeddbEventCacheStoreTransactionError> { - self.get_items_in_room::(room_id).await + self.get_items_by_linked_chunk_id::(linked_chunk_id).await } - /// Query IndexedDB for the number of chunks in the given room. - pub async fn get_chunks_count_in_room( + /// Query IndexedDB for the number of chunks matching the given linked chunk + /// id. + pub async fn get_chunks_count_by_linked_chunk_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, ) -> Result { - self.get_items_count_in_room::(room_id).await + self.get_items_count_by_linked_chunk_id::(linked_chunk_id).await } - /// Query IndexedDB for the chunk with the maximum key in the given room. + /// Query IndexedDB for the chunk with the maximum key matching the given + /// linked chunk id. pub async fn get_max_chunk_by_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, ) -> Result, IndexeddbEventCacheStoreTransactionError> { - let range = IndexedKeyRange::all_with_prefix::(room_id, self.serializer.inner()); + let range = + IndexedKeyRange::all_with_prefix::(linked_chunk_id, self.serializer.inner()); self.get_max_item_by_key::(range).await } - /// Query IndexedDB for given chunk in given room and additionally query - /// for events or gap, depending on chunk type, in order to construct the - /// full chunk. + /// Query IndexedDB for given chunk matching the given linked chunk id and + /// additionally query for events or gap, depending on chunk type, in + /// order to construct the full chunk. pub async fn load_chunk_by_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, chunk_id: ChunkIdentifier, ) -> Result>, IndexeddbEventCacheStoreTransactionError> { - if let Some(chunk) = self.get_chunk_by_id(room_id, chunk_id).await? { + if let Some(chunk) = self.get_chunk_by_id(linked_chunk_id, chunk_id).await? { let content = match chunk.chunk_type { ChunkType::Event => { let events = self - .get_events_by_chunk(room_id, ChunkIdentifier::new(chunk.identifier)) + .get_events_by_chunk( + linked_chunk_id, + ChunkIdentifier::new(chunk.identifier), + ) .await? .into_iter() .map(RawEvent::from) @@ -496,7 +562,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { } ChunkType::Gap => { let gap = self - .get_gap_by_id(room_id, ChunkIdentifier::new(chunk.identifier)) + .get_gap_by_id(linked_chunk_id, ChunkIdentifier::new(chunk.identifier)) .await? .ok_or(IndexeddbEventCacheStoreTransactionError::ItemNotFound)?; ChunkContent::Gap(RawGap { prev_token: gap.prev_token }) @@ -512,7 +578,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { Ok(None) } - /// Add a chunk to the given room and ensure that the next and previous + /// Add a chunk and ensure that the next and previous /// chunks are properly linked to the chunk being added. If a chunk with /// the same identifier already exists, the given chunk will be /// rejected. @@ -524,7 +590,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { if let Some(previous) = chunk.previous { let previous_identifier = ChunkIdentifier::new(previous); if let Some(mut previous_chunk) = - self.get_chunk_by_id(&chunk.room_id, previous_identifier).await? + self.get_chunk_by_id(chunk.linked_chunk_id.as_ref(), previous_identifier).await? { previous_chunk.next = Some(chunk.identifier); self.put_item(&previous_chunk).await?; @@ -533,7 +599,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { if let Some(next) = chunk.next { let next_identifier = ChunkIdentifier::new(next); if let Some(mut next_chunk) = - self.get_chunk_by_id(&chunk.room_id, next_identifier).await? + self.get_chunk_by_id(chunk.linked_chunk_id.as_ref(), next_identifier).await? { next_chunk.previous = Some(chunk.identifier); self.put_item(&next_chunk).await?; @@ -542,20 +608,20 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { Ok(()) } - /// Delete chunk that matches the given id in the given room and ensure that - /// the next and previous chunk are updated to link to one another. - /// Additionally, ensure that events and gaps in the given chunk are - /// also deleted. + /// Delete chunk that matches the given id and the given linked chunk id and + /// ensure that the next and previous chunk are updated to link to one + /// another. Additionally, ensure that events and gaps in the given + /// chunk are also deleted. pub async fn delete_chunk_by_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, chunk_id: ChunkIdentifier, ) -> Result<(), IndexeddbEventCacheStoreTransactionError> { - if let Some(chunk) = self.get_chunk_by_id(room_id, chunk_id).await? { + if let Some(chunk) = self.get_chunk_by_id(linked_chunk_id, chunk_id).await? { if let Some(previous) = chunk.previous { let previous_identifier = ChunkIdentifier::new(previous); if let Some(mut previous_chunk) = - self.get_chunk_by_id(room_id, previous_identifier).await? + self.get_chunk_by_id(linked_chunk_id, previous_identifier).await? { previous_chunk.next = chunk.next; self.put_item(&previous_chunk).await?; @@ -563,88 +629,104 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { } if let Some(next) = chunk.next { let next_identifier = ChunkIdentifier::new(next); - if let Some(mut next_chunk) = self.get_chunk_by_id(room_id, next_identifier).await? + if let Some(mut next_chunk) = + self.get_chunk_by_id(linked_chunk_id, next_identifier).await? { next_chunk.previous = chunk.previous; self.put_item(&next_chunk).await?; } } - self.delete_item_by_key::((room_id, chunk_id)).await?; + self.delete_item_by_key::((linked_chunk_id, chunk_id)) + .await?; match chunk.chunk_type { ChunkType::Event => { - self.delete_events_by_chunk(room_id, chunk_id).await?; + self.delete_events_by_chunk(linked_chunk_id, chunk_id).await?; } ChunkType::Gap => { - self.delete_gap_by_id(room_id, chunk_id).await?; + self.delete_gap_by_id(linked_chunk_id, chunk_id).await?; } } } Ok(()) } - /// Delete all chunks in the given room - pub async fn delete_chunks_in_room( + /// Delete all chunks associated with the given linked chunk id + pub async fn delete_chunks_by_linked_chunk_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, ) -> Result<(), IndexeddbEventCacheStoreTransactionError> { - self.delete_items_in_room::(room_id).await + self.delete_items_by_linked_chunk_id::(linked_chunk_id).await + } + + /// Query IndexedDB for events that match the given event id and the given + /// linked chunk id. If more than one item is found, an error is returned. + pub async fn get_event_by_id( + &self, + linked_chunk_id: LinkedChunkId<'_>, + event_id: &EventId, + ) -> Result, IndexeddbEventCacheStoreTransactionError> { + let key = self.serializer.encode_key((linked_chunk_id, event_id)); + self.get_item_by_key::(key).await } /// Query IndexedDB for events that match the given event id in the given /// room. If more than one item is found, an error is returned. - pub async fn get_event_by_id( + pub async fn get_event_by_room( &self, room_id: &RoomId, event_id: &EventId, ) -> Result, IndexeddbEventCacheStoreTransactionError> { let key = self.serializer.encode_key((room_id, event_id)); - self.get_item_by_key::(key).await + self.get_item_by_key::(key).await } - /// Query IndexedDB for events in the given position range in the given - /// room. + /// Query IndexedDB for events in the given position range matching the + /// given linked chunk id. pub async fn get_events_by_position( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, range: impl Into>, ) -> Result, IndexeddbEventCacheStoreTransactionError> { self.get_items_by_key_components::( - range.into().map(|position| (room_id, position)), + range.into().map(|position| (linked_chunk_id, position)), ) .await } - /// Query IndexedDB for number of events in the given position range in the - /// given room. + /// Query IndexedDB for number of events in the given position range + /// matching the given linked_chunk_id. pub async fn get_events_count_by_position( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, range: impl Into>, ) -> Result { self.get_items_count_by_key_components::( - range.into().map(|position| (room_id, position)), + range.into().map(|position| (linked_chunk_id, position)), ) .await } - /// Query IndexedDB for events in the given chunk in the given room. + /// Query IndexedDB for events in the given chunk matching the given linked + /// chunk id. pub async fn get_events_by_chunk( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, chunk_id: ChunkIdentifier, ) -> Result, IndexeddbEventCacheStoreTransactionError> { - let range = IndexedKeyRange::all_with_prefix((room_id, chunk_id), self.serializer.inner()); + let range = + IndexedKeyRange::all_with_prefix((linked_chunk_id, chunk_id), self.serializer.inner()); self.get_items_by_key::(range).await } - /// Query IndexedDB for number of events in the given chunk in the given - /// room. + /// Query IndexedDB for number of events in the given chunk matching the + /// given linked chunk id. pub async fn get_events_count_by_chunk( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, chunk_id: ChunkIdentifier, ) -> Result { - let range = IndexedKeyRange::all_with_prefix((room_id, chunk_id), self.serializer.inner()); + let range = + IndexedKeyRange::all_with_prefix((linked_chunk_id, chunk_id), self.serializer.inner()); self.get_items_count_by_key::(range).await } @@ -674,7 +756,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { self.get_items_by_key::(range).await } - /// Puts an event in the given room. If an event with the same key already + /// Puts an event in IndexedDB. If an event with the same key already /// exists, it will be overwritten. pub async fn put_event( &self, @@ -690,89 +772,93 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { // As a workaround, if the event has a position, we delete it first and // then call `put_item`. This should be fine as it all happens within the // context of a single transaction. - self.delete_event_by_position(event.room_id(), position).await?; + self.delete_event_by_position(event.linked_chunk_id(), position).await?; } self.put_item(event).await } - /// Delete events in the given position range in the given room + /// Delete events in the given position range matching the given linked + /// chunk id pub async fn delete_events_by_position( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, range: impl Into>, ) -> Result<(), IndexeddbEventCacheStoreTransactionError> { self.delete_items_by_key_components::( - range.into().map(|position| (room_id, position)), + range.into().map(|position| (linked_chunk_id, position)), ) .await } - /// Delete event in the given position in the given room + /// Delete event in the given position matching the given linked chunk id pub async fn delete_event_by_position( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, position: Position, ) -> Result<(), IndexeddbEventCacheStoreTransactionError> { - self.delete_item_by_key::((room_id, position)).await + self.delete_item_by_key::((linked_chunk_id, position)).await } - /// Delete events in the given chunk in the given room + /// Delete events in the given chunk matching the given linked chunk id pub async fn delete_events_by_chunk( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, chunk_id: ChunkIdentifier, ) -> Result<(), IndexeddbEventCacheStoreTransactionError> { - let range = IndexedKeyRange::all_with_prefix((room_id, chunk_id), self.serializer.inner()); + let range = + IndexedKeyRange::all_with_prefix((linked_chunk_id, chunk_id), self.serializer.inner()); self.delete_items_by_key::(range).await } - /// Delete events starting from the given position in the given room - /// until the end of the chunk + /// Delete events matching the given linked chunk id starting from the given + /// position until the end of the chunk pub async fn delete_events_by_chunk_from_index( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, position: Position, ) -> Result<(), IndexeddbEventCacheStoreTransactionError> { - let lower = (room_id, position); + let lower = (linked_chunk_id, position); let upper = IndexedEventPositionKey::upper_key_components_with_prefix(( - room_id, + linked_chunk_id, ChunkIdentifier::new(position.chunk_identifier), )); let range = IndexedKeyRange::Bound(lower, upper).map(|(_, position)| position); - self.delete_events_by_position(room_id, range).await + self.delete_events_by_position(linked_chunk_id, range).await } - /// Delete all events in the given room - pub async fn delete_events_in_room( + /// Delete all events matching the given linked chunk id + pub async fn delete_events_by_linked_chunk_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, ) -> Result<(), IndexeddbEventCacheStoreTransactionError> { - self.delete_items_in_room::(room_id).await + self.delete_items_by_linked_chunk_id::(linked_chunk_id).await } - /// Query IndexedDB for the gap in the given chunk in the given room. + /// Query IndexedDB for the gap in the given chunk matching the given linked + /// chunk id. pub async fn get_gap_by_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, chunk_id: ChunkIdentifier, ) -> Result, IndexeddbEventCacheStoreTransactionError> { - self.get_item_by_key_components::((room_id, chunk_id)).await + self.get_item_by_key_components::((linked_chunk_id, chunk_id)).await } - /// Delete gap that matches the given chunk identifier in the given room + /// Delete gap that matches the given chunk identifier and the given linked + /// chunk id pub async fn delete_gap_by_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, chunk_id: ChunkIdentifier, ) -> Result<(), IndexeddbEventCacheStoreTransactionError> { - self.delete_item_by_key::((room_id, chunk_id)).await + self.delete_item_by_key::((linked_chunk_id, chunk_id)).await } - /// Delete all gaps in the given room - pub async fn delete_gaps_in_room( + /// Delete all gaps matching the given linked chunk id + pub async fn delete_gaps_by_linked_chunk_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, ) -> Result<(), IndexeddbEventCacheStoreTransactionError> { - self.delete_items_in_room::(room_id).await + self.delete_items_by_linked_chunk_id::(linked_chunk_id).await } } diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/types.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/types.rs index 2e0dded3ddb..62523fadb8c 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/types.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/types.rs @@ -15,8 +15,9 @@ use std::time::Duration; use matrix_sdk_base::{ - deserialized_responses::TimelineEvent, event_cache::store::extract_event_relation, - linked_chunk::ChunkIdentifier, + deserialized_responses::TimelineEvent, + event_cache::store::extract_event_relation, + linked_chunk::{ChunkIdentifier, LinkedChunkId, OwnedLinkedChunkId}, }; use ruma::{OwnedEventId, OwnedRoomId, RoomId}; use serde::{Deserialize, Serialize}; @@ -41,8 +42,8 @@ impl Lease { /// which can be stored in IndexedDB. #[derive(Debug, Serialize, Deserialize)] pub struct Chunk { - /// The room in which the chunk exists. - pub room_id: OwnedRoomId, + /// The linked chunk id in which the chunk exists. + pub linked_chunk_id: OwnedLinkedChunkId, /// The identifier of the chunk - i.e., /// [`ChunkIdentifier`](matrix_sdk_base::linked_chunk::ChunkIdentifier). pub identifier: u64, @@ -89,11 +90,19 @@ impl From for TimelineEvent { } impl Event { + /// The [`LinkedChunkId`] in which the underlying event exists. + pub fn linked_chunk_id(&self) -> LinkedChunkId<'_> { + match self { + Event::InBand(e) => e.linked_chunk_id.as_ref(), + Event::OutOfBand(e) => e.linked_chunk_id.as_ref(), + } + } + /// The [`RoomId`] of the room in which the underlying event exists. pub fn room_id(&self) -> &RoomId { match self { - Event::InBand(e) => &e.room_id, - Event::OutOfBand(e) => &e.room_id, + Event::InBand(e) => e.room_id(), + Event::OutOfBand(e) => e.room_id(), } } @@ -142,8 +151,8 @@ impl Event { /// in-band or out-of-band. #[derive(Debug, Serialize, Deserialize)] pub struct GenericEvent

{ - /// The room in which the event exists. - pub room_id: OwnedRoomId, + /// The linked chunk id in which the event exists. + pub linked_chunk_id: OwnedLinkedChunkId, /// The full content of the event. pub content: TimelineEvent, /// The position of the event, if it is in a chunk. @@ -151,6 +160,11 @@ pub struct GenericEvent

{ } impl

GenericEvent

{ + /// The [`RoomId`] of the room in which the event exists. + pub fn room_id(&self) -> &RoomId { + self.linked_chunk_id.room_id() + } + /// The [`OwnedEventId`] of the underlying event. pub fn event_id(&self) -> Option { self.content.event_id() @@ -199,8 +213,8 @@ impl From for Position { /// which can be stored in IndexedDB. #[derive(Debug, Serialize, Deserialize)] pub struct Gap { - /// The room in which the gap exists. - pub room_id: OwnedRoomId, + /// The linked chunk id in which the gap exists. + pub linked_chunk_id: OwnedLinkedChunkId, /// The identifier of the chunk containing this gap. pub chunk_identifier: u64, /// The token to use in the query, extracted from a previous "from" / diff --git a/crates/matrix-sdk-indexeddb/src/serializer.rs b/crates/matrix-sdk-indexeddb/src/serializer.rs index 43fab58088b..c6b5040554a 100644 --- a/crates/matrix-sdk-indexeddb/src/serializer.rs +++ b/crates/matrix-sdk-indexeddb/src/serializer.rs @@ -88,6 +88,28 @@ impl IndexeddbSerializer { Self { store_cipher } } + /// Hash the given key securely for the given tablename using the store + /// cipher. + /// + /// This works similarly to [`encode_key`](Self::encode_key), but skips + /// formatting and base64 encoding. This is useful for dealing with keys + /// that are represented as byte arrays, as it prevents having to convert + /// the byte array into a string and then back into a byte array. + /// + /// **Note** that when dealing with keys which will be encoded as strings, + /// it is recommended to use [`encode_key`](Self::encode_key), as it + /// ensures that strings are safe for use as a key. + #[allow(unused)] + pub fn hash_key(&self, table_name: &str, key: T) -> Vec + where + T: AsRef<[u8]>, + { + match &self.store_cipher { + Some(cipher) => cipher.hash_key(table_name, key.as_ref()).into(), + None => key.as_ref().into(), + } + } + /// Hash the given key securely for the given tablename, using the store /// cipher. ///