From cd61f3c7f7bd1825ead4ec09cf18df5cfcb26548 Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Thu, 14 Aug 2025 09:57:45 -0400 Subject: [PATCH 01/16] feat(linked chunk): expose OwnedLinkedChunkId::as_ref for use in other crates Signed-off-by: Michael Goldenberg --- crates/matrix-sdk-common/src/linked_chunk/mod.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/matrix-sdk-common/src/linked_chunk/mod.rs b/crates/matrix-sdk-common/src/linked_chunk/mod.rs index 40e86f688bb..4c7d6071302 100644 --- a/crates/matrix-sdk-common/src/linked_chunk/mod.rs +++ b/crates/matrix-sdk-common/src/linked_chunk/mod.rs @@ -172,8 +172,7 @@ impl Display for OwnedLinkedChunkId { } impl OwnedLinkedChunkId { - #[cfg(test)] - fn as_ref(&self) -> LinkedChunkId<'_> { + pub fn as_ref(&self) -> LinkedChunkId<'_> { match self { OwnedLinkedChunkId::Room(room_id) => LinkedChunkId::Room(room_id.as_ref()), OwnedLinkedChunkId::Thread(room_id, event_id) => { From 76d036ebacb2420db4cb31d623c58078f9c857b0 Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Thu, 14 Aug 2025 10:04:06 -0400 Subject: [PATCH 02/16] feat(linked chunk): add trait-based conversions between owned and borrowed linked chunk id Signed-off-by: Michael Goldenberg --- crates/matrix-sdk-common/src/linked_chunk/mod.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/crates/matrix-sdk-common/src/linked_chunk/mod.rs b/crates/matrix-sdk-common/src/linked_chunk/mod.rs index 4c7d6071302..9df230ee545 100644 --- a/crates/matrix-sdk-common/src/linked_chunk/mod.rs +++ b/crates/matrix-sdk-common/src/linked_chunk/mod.rs @@ -134,6 +134,12 @@ impl LinkedChunkId<'_> { } } +impl<'a> From<&'a OwnedLinkedChunkId> for LinkedChunkId<'a> { + fn from(value: &'a OwnedLinkedChunkId) -> Self { + value.as_ref() + } +} + impl PartialEq<&OwnedLinkedChunkId> for LinkedChunkId<'_> { fn eq(&self, other: &&OwnedLinkedChunkId) -> bool { match (self, other) { @@ -189,6 +195,12 @@ impl OwnedLinkedChunkId { } } +impl From> for OwnedLinkedChunkId { + fn from(value: LinkedChunkId<'_>) -> Self { + value.to_owned() + } +} + /// Errors of [`LinkedChunk`]. #[derive(thiserror::Error, Debug)] pub enum Error { From 84f0778307bd4badccce70e2c7792b47e5b9583f Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Thu, 14 Aug 2025 10:11:06 -0400 Subject: [PATCH 03/16] feat(linked chunk): add display impl for LinkedChunkId Signed-off-by: Michael Goldenberg --- .../matrix-sdk-common/src/linked_chunk/mod.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/crates/matrix-sdk-common/src/linked_chunk/mod.rs b/crates/matrix-sdk-common/src/linked_chunk/mod.rs index 9df230ee545..a7bd5e7d261 100644 --- a/crates/matrix-sdk-common/src/linked_chunk/mod.rs +++ b/crates/matrix-sdk-common/src/linked_chunk/mod.rs @@ -116,6 +116,17 @@ pub enum LinkedChunkId<'a> { Thread(&'a RoomId, &'a EventId), } +impl Display for LinkedChunkId<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Room(room_id) => write!(f, "{room_id}"), + Self::Thread(room_id, thread_root) => { + write!(f, "{room_id}:thread:{thread_root}") + } + } + } +} + impl LinkedChunkId<'_> { pub fn storage_key(&self) -> impl '_ + AsRef<[u8]> { match self { @@ -168,12 +179,7 @@ pub enum OwnedLinkedChunkId { impl Display for OwnedLinkedChunkId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - OwnedLinkedChunkId::Room(room_id) => write!(f, "{room_id}"), - OwnedLinkedChunkId::Thread(room_id, thread_root) => { - write!(f, "{room_id}:thread:{thread_root}") - } - } + self.as_ref().fmt(f) } } From 54652daa66fa99e80d5924d1d595f4d5515e98fd Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Thu, 14 Aug 2025 10:14:18 -0400 Subject: [PATCH 04/16] feat(linked chunk): derive ser/de traits for OwnedLinkedChunkId Signed-off-by: Michael Goldenberg --- crates/matrix-sdk-common/src/linked_chunk/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/matrix-sdk-common/src/linked_chunk/mod.rs b/crates/matrix-sdk-common/src/linked_chunk/mod.rs index a7bd5e7d261..6be53714535 100644 --- a/crates/matrix-sdk-common/src/linked_chunk/mod.rs +++ b/crates/matrix-sdk-common/src/linked_chunk/mod.rs @@ -107,6 +107,7 @@ use std::{ pub use as_vector::*; pub use order_tracker::OrderTracker; use ruma::{EventId, OwnedEventId, OwnedRoomId, RoomId}; +use serde::{Deserialize, Serialize}; pub use updates::*; /// An identifier for a linked chunk; borrowed variant. @@ -171,7 +172,7 @@ impl PartialEq> for OwnedLinkedChunkId { } /// An identifier for a linked chunk; owned variant. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] +#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] pub enum OwnedLinkedChunkId { Room(OwnedRoomId), Thread(OwnedRoomId, OwnedEventId), From bd817d4feb0cd477447bf97da581177d2c75c64c Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Thu, 14 Aug 2025 10:19:49 -0400 Subject: [PATCH 05/16] refactor(indexeddb): expose hash_key fn in serializer for keys represented as bytes rather than strings Signed-off-by: Michael Goldenberg --- crates/matrix-sdk-indexeddb/src/serializer.rs | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/crates/matrix-sdk-indexeddb/src/serializer.rs b/crates/matrix-sdk-indexeddb/src/serializer.rs index 43fab58088b..109885f4941 100644 --- a/crates/matrix-sdk-indexeddb/src/serializer.rs +++ b/crates/matrix-sdk-indexeddb/src/serializer.rs @@ -88,6 +88,27 @@ impl IndexeddbSerializer { Self { store_cipher } } + /// Hash the given key securely for the given tablename using the store + /// cipher. + /// + /// This works similarly to [`encode_key`](Self::encode_key), but skips + /// formatting and base64 encoding. This is useful for dealing with keys + /// that are represented as byte arrays, as it prevents having to convert + /// the byte array into a string and then back into a byte array. + /// + /// **Note** that when dealing with keys which will be encoded as strings, + /// it is recommended to use [`encode_key`](Self::encode_key), as it + /// ensures that strings are safe for use as a key. + pub fn hash_key(&self, table_name: &str, key: T) -> Vec + where + T: AsRef<[u8]>, + { + match &self.store_cipher { + Some(cipher) => cipher.hash_key(table_name, key.as_ref()).into(), + None => key.as_ref().into(), + } + } + /// Hash the given key securely for the given tablename, using the store /// cipher. /// From a86abaf092739809f3dbc545f166f3da5d6fcd75 Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Fri, 15 Aug 2025 10:26:53 -0400 Subject: [PATCH 06/16] refactor(indexeddb): add room-based index to event object store in preparation for linked chunk id as primary key Signed-off-by: Michael Goldenberg --- .../src/event_cache_store/migrations.rs | 11 +++++ .../src/event_cache_store/serializer/types.rs | 47 ++++++++++++++++++- 2 files changed, 57 insertions(+), 1 deletion(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs index 6745feca380..30649559f03 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs @@ -118,6 +118,8 @@ pub mod v1 { pub const LINKED_CHUNKS_NEXT_KEY_PATH: &str = "next"; pub const EVENTS: &str = "events"; pub const EVENTS_KEY_PATH: &str = "id"; + pub const EVENTS_ROOM: &str = "events_room"; + pub const EVENTS_ROOM_KEY_PATH: &str = "room"; pub const EVENTS_POSITION: &str = "events_position"; pub const EVENTS_POSITION_KEY_PATH: &str = "position"; pub const EVENTS_RELATION: &str = "events_relation"; @@ -169,6 +171,7 @@ pub mod v1 { /// Create an object store for tracking information about events. /// /// * Primary Key - `id` + /// * Index (unique) - `room` - tracks whether an event is in a given room /// * Index (unique) - `position` - tracks position of an event in linked /// chunks /// * Index - `relation` - tracks any event to which the given event is @@ -178,6 +181,14 @@ pub mod v1 { object_store_params.key_path(Some(&keys::EVENTS_KEY_PATH.into())); let events = db.create_object_store_with_params(keys::EVENTS, &object_store_params)?; + let events_room_params = IdbIndexParameters::new(); + events_room_params.set_unique(true); + events.create_index_with_params( + keys::EVENTS_ROOM, + &keys::EVENTS_ROOM_KEY_PATH.into(), + &events_room_params, + ); + let events_position_params = IdbIndexParameters::new(); events_position_params.set_unique(true); events.create_index_with_params( diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs index 2bbd80ab667..abd796cee64 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs @@ -448,6 +448,9 @@ impl<'a> IndexedPrefixKeyComponentBounds<'a, Chunk, &'a RoomId> for IndexedNextC pub struct IndexedEvent { /// The primary key of the object store. pub id: IndexedEventIdKey, + /// An indexed key on the object store, which represents the room in which + /// the event exists + pub room: IndexedEventRoomKey, /// An indexed key on the object store, which represents the position of the /// event, if it is in a chunk. pub position: Option, @@ -478,6 +481,7 @@ impl Indexed for Event { ) -> Result { let event_id = self.event_id().ok_or(Self::Error::NoEventId)?; let id = IndexedEventIdKey::encode((self.room_id(), &event_id), serializer); + let room = IndexedEventRoomKey::encode((self.room_id(), &event_id), serializer); let position = self.position().map(|position| { IndexedEventPositionKey::encode((self.room_id(), position), serializer) }); @@ -487,7 +491,13 @@ impl Indexed for Event { serializer, ) }); - Ok(IndexedEvent { id, position, relation, content: serializer.maybe_encrypt_value(self)? }) + Ok(IndexedEvent { + id, + room, + position, + relation, + content: serializer.maybe_encrypt_value(self)?, + }) } fn from_indexed( @@ -530,6 +540,41 @@ impl IndexedPrefixKeyBounds for IndexedEventIdKey { pub type IndexedEventId = String; +/// The value associated with the [primary key](IndexedEvent::id) of the +/// [`EVENTS`][1] object store, which is constructed from: +/// +/// - The (possibly) encrypted Room ID +/// - The (possibly) encrypted Event ID. +/// +/// [1]: crate::event_cache_store::migrations::v1::create_events_object_store +#[derive(Debug, Serialize, Deserialize)] +pub struct IndexedEventRoomKey(IndexedRoomId, IndexedEventId); + +impl IndexedKey for IndexedEventRoomKey { + const INDEX: Option<&'static str> = Some(keys::EVENTS_ROOM); + + type KeyComponents<'a> = (&'a RoomId, &'a EventId); + + fn encode( + (room_id, event_id): Self::KeyComponents<'_>, + serializer: &IndexeddbSerializer, + ) -> Self { + let room_id = serializer.encode_key_as_string(keys::ROOMS, room_id.as_str()); + let event_id = serializer.encode_key_as_string(keys::EVENTS, event_id); + Self(room_id, event_id) + } +} + +impl IndexedPrefixKeyBounds for IndexedEventRoomKey { + fn lower_key_with_prefix(room_id: &RoomId, serializer: &IndexeddbSerializer) -> Self { + Self::encode((room_id, &*INDEXED_KEY_LOWER_EVENT_ID), serializer) + } + + fn upper_key_with_prefix(room_id: &RoomId, serializer: &IndexeddbSerializer) -> Self { + Self::encode((room_id, &*INDEXED_KEY_UPPER_EVENT_ID), serializer) + } +} + /// The value associated with the [`position`](IndexedEvent::position) index of /// the [`EVENTS`][1] object store, which is constructed from: /// From 5643742f4724e3c20b6d63cd87d5bd83474df8b9 Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Fri, 15 Aug 2025 13:33:38 -0400 Subject: [PATCH 07/16] refactor(indexeddb): use room-based queries in event-related fns that don't use linked chunk ids Signed-off-by: Michael Goldenberg --- .../src/event_cache_store/mod.rs | 4 ++-- .../src/event_cache_store/transaction.rs | 14 +++++++++++++- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 2d7ec100fbf..42cb98b76d6 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -488,7 +488,7 @@ impl_event_cache_store! { let transaction = self.transaction(&[keys::EVENTS], IdbTransactionMode::Readonly)?; transaction - .get_event_by_id(room_id, event_id) + .get_event_by_room(room_id, event_id) .await .map(|ok| ok.map(Into::into)) .map_err(Into::into) @@ -544,7 +544,7 @@ impl_event_cache_store! { }; let transaction = self.transaction(&[keys::EVENTS], IdbTransactionMode::Readwrite)?; - let event = match transaction.get_event_by_id(room_id, &event_id).await? { + let event = match transaction.get_event_by_room(room_id, &event_id).await? { Some(mut inner) => inner.with_content(event), None => types::Event::OutOfBand(OutOfBandEvent { room_id: room_id.to_owned(), content: event, position: () }), }; diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs index 8d26fd6cced..6a002ef166f 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs @@ -34,7 +34,8 @@ use crate::event_cache_store::{ }, types::{ IndexedChunkIdKey, IndexedEventIdKey, IndexedEventPositionKey, IndexedEventRelationKey, - IndexedGapIdKey, IndexedKeyRange, IndexedLeaseIdKey, IndexedNextChunkIdKey, + IndexedEventRoomKey, IndexedGapIdKey, IndexedKeyRange, IndexedLeaseIdKey, + IndexedNextChunkIdKey, }, IndexeddbEventCacheStoreSerializer, }, @@ -601,6 +602,17 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { self.get_item_by_key::(key).await } + /// Query IndexedDB for events that match the given event id in the given + /// room. If more than one item is found, an error is returned. + pub async fn get_event_by_room( + &self, + room_id: &RoomId, + event_id: &EventId, + ) -> Result, IndexeddbEventCacheStoreTransactionError> { + let key = self.serializer.encode_key((room_id, event_id)); + self.get_item_by_key::(key).await + } + /// Query IndexedDB for events in the given position range in the given /// room. pub async fn get_events_by_position( From 9ec058f520dfa62c9a1eae84e44c225221c07f2c Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Fri, 15 Aug 2025 13:37:03 -0400 Subject: [PATCH 08/16] refactor(indexeddb): re-organize type synonyms in event_cache_store::serializer Signed-off-by: Michael Goldenberg --- .../src/event_cache_store/serializer/types.rs | 51 ++++++++++++------- 1 file changed, 33 insertions(+), 18 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs index abd796cee64..a3e9c270c19 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs @@ -231,6 +231,39 @@ impl From for IndexedKeyRange { } } +/// A (possibly) encrypted representation of a [`Lease`] +pub type IndexedLeaseContent = MaybeEncrypted; + +/// A (possibly) hashed representation of an [`RoomId`] which is suitable for +/// use in an IndexedDB key +pub type IndexedRoomId = String; + +/// A representation of a [`ChunkIdentifier`] which is suitable for use in an +/// IndexedDB key +pub type IndexedChunkId = u64; + +/// A (possibly) encrypted representation of an [`Event`] +pub type IndexedChunkContent = MaybeEncrypted; + +/// A (possibly) hashed representation of an [`EventId`] which is suitable for +/// use in an IndexedDB key +pub type IndexedEventId = String; + +/// A representation of the position of an [`Event`] in a [`Chunk`] which is +/// suitable for use in an IndexedDB key +pub type IndexedEventPositionIndex = usize; + +/// A (possibly) hashed representation of the relationship between two events +/// (see [`RelationType`](ruma::events::relation::RelationType)) which is +/// suitable for use in an IndexedDB key +pub type IndexedRelationType = String; + +/// A (possibly) encrypted representation of an [`Event`] +pub type IndexedEventContent = MaybeEncrypted; + +/// A (possibly) encrypted representation of a [`Gap`] +pub type IndexedGapContent = MaybeEncrypted; + /// Represents the [`LEASES`][1] object store. /// /// [1]: crate::event_cache_store::migrations::v1::create_lease_object_store @@ -293,8 +326,6 @@ impl IndexedKeyComponentBounds for IndexedLeaseIdKey { } } -pub type IndexedLeaseContent = MaybeEncrypted; - /// Represents the [`LINKED_CHUNKS`][1] object store. /// /// [1]: crate::event_cache_store::migrations::v1::create_linked_chunks_object_store @@ -374,10 +405,6 @@ impl<'a> IndexedPrefixKeyComponentBounds<'a, Chunk, &'a RoomId> for IndexedChunk } } -pub type IndexedRoomId = String; -pub type IndexedChunkId = u64; -pub type IndexedChunkContent = MaybeEncrypted; - /// The value associated with the [`next`](IndexedChunk::next) index of the /// [`LINKED_CHUNKS`][1] object store, which is constructed from: /// @@ -538,8 +565,6 @@ impl IndexedPrefixKeyBounds for IndexedEventIdKey { } } -pub type IndexedEventId = String; - /// The value associated with the [primary key](IndexedEvent::id) of the /// [`EVENTS`][1] object store, which is constructed from: /// @@ -632,8 +657,6 @@ impl<'a> IndexedPrefixKeyComponentBounds<'a, Event, (&'a RoomId, ChunkIdentifier } } -pub type IndexedEventPositionIndex = usize; - /// The value associated with the [`relation`](IndexedEvent::relation) index of /// the [`EVENTS`][1] object store, which is constructed from: /// @@ -703,12 +726,6 @@ impl IndexedPrefixKeyBounds for IndexedEventRelation } } -/// A representation of the relationship between two events (see -/// [`RelationType`](ruma::events::relation::RelationType)) -pub type IndexedRelationType = String; - -pub type IndexedEventContent = MaybeEncrypted; - /// Represents the [`GAPS`][1] object store. /// /// [1]: crate::event_cache_store::migrations::v1::create_gaps_object_store @@ -776,5 +793,3 @@ impl<'a> IndexedPrefixKeyComponentBounds<'a, Gap, &'a RoomId> for IndexedGapIdKe ) } } - -pub type IndexedGapContent = MaybeEncrypted; From 07f8fe31c56f381d496b10f877b695aed94773c0 Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Fri, 15 Aug 2025 13:39:08 -0400 Subject: [PATCH 09/16] fix(indexeddb): integrate linked chunk id into relevant event-related types and fns Signed-off-by: Michael Goldenberg --- .../src/event_cache_store/migrations.rs | 1 + .../src/event_cache_store/mod.rs | 30 ++--- .../src/event_cache_store/serializer/types.rs | 79 ++++++++----- .../src/event_cache_store/transaction.rs | 109 +++++++++++------- .../src/event_cache_store/types.rs | 26 ++++- 5 files changed, 153 insertions(+), 92 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs index 30649559f03..ef5db3eb5bf 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs @@ -112,6 +112,7 @@ pub mod v1 { pub const LEASES: &str = "leases"; pub const LEASES_KEY_PATH: &str = "id"; pub const ROOMS: &str = "rooms"; + pub const LINKED_CHUNK_IDS: &str = "linked_chunk_ids"; pub const LINKED_CHUNKS: &str = "linked_chunks"; pub const LINKED_CHUNKS_KEY_PATH: &str = "id"; pub const LINKED_CHUNKS_NEXT: &str = "linked_chunks_next"; diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 42cb98b76d6..96cae4c0c7f 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -168,8 +168,8 @@ impl_event_cache_store! { ) -> Result<(), IndexeddbEventCacheStoreError> { let _timer = timer!("method"); - let linked_chunk_id = linked_chunk_id.to_owned(); - let room_id = linked_chunk_id.room_id(); + let owned_linked_chunk_id = linked_chunk_id.to_owned(); + let room_id = owned_linked_chunk_id.room_id(); let transaction = self.transaction( &[keys::LINKED_CHUNKS, keys::GAPS, keys::EVENTS], @@ -228,7 +228,7 @@ impl_event_cache_store! { transaction .put_event( &types::Event::InBand(InBandEvent { - room_id: room_id.to_owned(), + linked_chunk_id: linked_chunk_id.to_owned(), content: item, position: types::Position { chunk_identifier, @@ -248,7 +248,7 @@ impl_event_cache_store! { transaction .put_event( &types::Event::InBand(InBandEvent { - room_id: room_id.to_owned(), + linked_chunk_id: linked_chunk_id.to_owned(), content: item, position: at.into(), }), @@ -261,7 +261,7 @@ impl_event_cache_store! { trace!(%room_id, "removing item @ {chunk_id}:{index}"); - transaction.delete_event_by_position(room_id, at.into()).await?; + transaction.delete_event_by_position(linked_chunk_id, at.into()).await?; } Update::DetachLastItems { at } => { let chunk_id = at.chunk_identifier().index(); @@ -269,7 +269,7 @@ impl_event_cache_store! { trace!(%room_id, "detaching last items @ {chunk_id}:{index}"); - transaction.delete_events_by_chunk_from_index(room_id, at.into()).await?; + transaction.delete_events_by_chunk_from_index(linked_chunk_id, at.into()).await?; } Update::StartReattachItems | Update::EndReattachItems => { // Nothing? See sqlite implementation @@ -277,7 +277,7 @@ impl_event_cache_store! { Update::Clear => { trace!(%room_id, "clearing room"); transaction.delete_chunks_in_room(room_id).await?; - transaction.delete_events_in_room(room_id).await?; + transaction.delete_events_by_linked_chunk_id(linked_chunk_id).await?; transaction.delete_gaps_in_room(room_id).await?; } } @@ -331,8 +331,8 @@ impl_event_cache_store! { // https://github.com/matrix-org/matrix-rust-sdk/pull/5382. let _ = timer!("method"); - let linked_chunk_id = linked_chunk_id.to_owned(); - let room_id = linked_chunk_id.room_id(); + let owned_linked_chunk_id = linked_chunk_id.to_owned(); + let room_id = owned_linked_chunk_id.room_id(); let transaction = self.transaction( &[keys::LINKED_CHUNKS, keys::EVENTS, keys::GAPS], @@ -343,7 +343,7 @@ impl_event_cache_store! { let chunks = transaction.get_chunks_in_room(room_id).await?; for chunk in chunks { let chunk_id = ChunkIdentifier::new(chunk.identifier); - let num_items = transaction.get_events_count_by_chunk(room_id, chunk_id).await?; + let num_items = transaction.get_events_count_by_chunk(linked_chunk_id, chunk_id).await?; raw_chunks.push(ChunkMetadata { num_items, previous: chunk.previous.map(ChunkIdentifier::new), @@ -462,14 +462,12 @@ impl_event_cache_store! { return Ok(Vec::new()); } - let linked_chunk_id = linked_chunk_id.to_owned(); - let room_id = linked_chunk_id.room_id(); let transaction = self.transaction(&[keys::EVENTS], IdbTransactionMode::Readonly)?; let mut duplicated = Vec::new(); for event_id in events { if let Some(types::Event::InBand(event)) = - transaction.get_event_by_id(room_id, &event_id).await? + transaction.get_event_by_id(linked_chunk_id, &event_id).await? { duplicated.push((event_id, event.position.into())); } @@ -546,7 +544,11 @@ impl_event_cache_store! { self.transaction(&[keys::EVENTS], IdbTransactionMode::Readwrite)?; let event = match transaction.get_event_by_room(room_id, &event_id).await? { Some(mut inner) => inner.with_content(event), - None => types::Event::OutOfBand(OutOfBandEvent { room_id: room_id.to_owned(), content: event, position: () }), + None => types::Event::OutOfBand(OutOfBandEvent { + linked_chunk_id: LinkedChunkId::Room(room_id).to_owned(), + content: event, + position: () + }), }; transaction.put_event(&event).await?; transaction.commit().await?; diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs index a3e9c270c19..2b7cf5bccbe 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs @@ -29,7 +29,7 @@ use std::sync::LazyLock; -use matrix_sdk_base::linked_chunk::ChunkIdentifier; +use matrix_sdk_base::linked_chunk::{ChunkIdentifier, LinkedChunkId}; use matrix_sdk_crypto::CryptoStoreError; use ruma::{events::relation::RelationType, EventId, OwnedEventId, RoomId}; use serde::{Deserialize, Serialize}; @@ -234,6 +234,10 @@ impl From for IndexedKeyRange { /// A (possibly) encrypted representation of a [`Lease`] pub type IndexedLeaseContent = MaybeEncrypted; +/// A (possibly) hashed representation of a [`LinkedChunkId`] which is suitable +/// for use in an IndexedDB key +pub type IndexedLinkedChunkId = Vec; + /// A (possibly) hashed representation of an [`RoomId`] which is suitable for /// use in an IndexedDB key pub type IndexedRoomId = String; @@ -507,10 +511,10 @@ impl Indexed for Event { serializer: &IndexeddbSerializer, ) -> Result { let event_id = self.event_id().ok_or(Self::Error::NoEventId)?; - let id = IndexedEventIdKey::encode((self.room_id(), &event_id), serializer); + let id = IndexedEventIdKey::encode((self.linked_chunk_id(), &event_id), serializer); let room = IndexedEventRoomKey::encode((self.room_id(), &event_id), serializer); let position = self.position().map(|position| { - IndexedEventPositionKey::encode((self.room_id(), position), serializer) + IndexedEventPositionKey::encode((self.linked_chunk_id(), position), serializer) }); let relation = self.relation().map(|(related_event, relation_type)| { IndexedEventRelationKey::encode( @@ -538,30 +542,40 @@ impl Indexed for Event { /// The value associated with the [primary key](IndexedEvent::id) of the /// [`EVENTS`][1] object store, which is constructed from: /// -/// - The (possibly) encrypted Room ID +/// - The (possibly) encrypted Linked Chunk ID /// - The (possibly) encrypted Event ID. /// /// [1]: crate::event_cache_store::migrations::v1::create_events_object_store #[derive(Debug, Serialize, Deserialize)] -pub struct IndexedEventIdKey(IndexedRoomId, IndexedEventId); +pub struct IndexedEventIdKey(IndexedLinkedChunkId, IndexedEventId); impl IndexedKey for IndexedEventIdKey { - type KeyComponents<'a> = (&'a RoomId, &'a EventId); + type KeyComponents<'a> = (LinkedChunkId<'a>, &'a EventId); - fn encode((room_id, event_id): (&RoomId, &EventId), serializer: &IndexeddbSerializer) -> Self { - let room_id = serializer.encode_key_as_string(keys::ROOMS, room_id); + fn encode( + (linked_chunk_id, event_id): Self::KeyComponents<'_>, + serializer: &IndexeddbSerializer, + ) -> Self { + let linked_chunk_id = + serializer.hash_key(keys::LINKED_CHUNK_IDS, linked_chunk_id.storage_key()); let event_id = serializer.encode_key_as_string(keys::EVENTS, event_id); - Self(room_id, event_id) + Self(linked_chunk_id, event_id) } } -impl IndexedPrefixKeyBounds for IndexedEventIdKey { - fn lower_key_with_prefix(room_id: &RoomId, serializer: &IndexeddbSerializer) -> Self { - Self::encode((room_id, &*INDEXED_KEY_LOWER_EVENT_ID), serializer) +impl IndexedPrefixKeyBounds> for IndexedEventIdKey { + fn lower_key_with_prefix( + linked_chunk_id: LinkedChunkId<'_>, + serializer: &IndexeddbSerializer, + ) -> Self { + Self::encode((linked_chunk_id, &*INDEXED_KEY_LOWER_EVENT_ID), serializer) } - fn upper_key_with_prefix(room_id: &RoomId, serializer: &IndexeddbSerializer) -> Self { - Self::encode((room_id, &*INDEXED_KEY_UPPER_EVENT_ID), serializer) + fn upper_key_with_prefix( + linked_chunk_id: LinkedChunkId<'_>, + serializer: &IndexeddbSerializer, + ) -> Self { + Self::encode((linked_chunk_id, &*INDEXED_KEY_UPPER_EVENT_ID), serializer) } } @@ -603,55 +617,60 @@ impl IndexedPrefixKeyBounds for IndexedEventRoomKey { /// The value associated with the [`position`](IndexedEvent::position) index of /// the [`EVENTS`][1] object store, which is constructed from: /// -/// - The (possibly) encrypted Room ID +/// - The (possibly) encrypted Linked Chunk ID /// - The Chunk ID /// - The index of the event in the chunk. /// /// [1]: crate::event_cache_store::migrations::v1::create_events_object_store #[derive(Debug, Serialize, Deserialize)] -pub struct IndexedEventPositionKey(IndexedRoomId, IndexedChunkId, IndexedEventPositionIndex); +pub struct IndexedEventPositionKey(IndexedLinkedChunkId, IndexedChunkId, IndexedEventPositionIndex); impl IndexedKey for IndexedEventPositionKey { const INDEX: Option<&'static str> = Some(keys::EVENTS_POSITION); - type KeyComponents<'a> = (&'a RoomId, Position); + type KeyComponents<'a> = (LinkedChunkId<'a>, Position); fn encode( - (room_id, position): Self::KeyComponents<'_>, + (linked_chunk_id, position): Self::KeyComponents<'_>, serializer: &IndexeddbSerializer, ) -> Self { - let room_id = serializer.encode_key_as_string(keys::ROOMS, room_id); - Self(room_id, position.chunk_identifier, position.index) + let linked_chunk_id = + serializer.hash_key(keys::LINKED_CHUNK_IDS, linked_chunk_id.storage_key()); + Self(linked_chunk_id, position.chunk_identifier, position.index) } } -impl<'a> IndexedPrefixKeyComponentBounds<'a, Event, &'a RoomId> for IndexedEventPositionKey { - fn lower_key_components_with_prefix(room_id: &'a RoomId) -> Self::KeyComponents<'a> { - (room_id, *INDEXED_KEY_LOWER_EVENT_POSITION) +impl<'a> IndexedPrefixKeyComponentBounds<'a, Event, LinkedChunkId<'a>> for IndexedEventPositionKey { + fn lower_key_components_with_prefix( + linked_chunk_id: LinkedChunkId<'a>, + ) -> Self::KeyComponents<'a> { + (linked_chunk_id, *INDEXED_KEY_LOWER_EVENT_POSITION) } - fn upper_key_components_with_prefix(room_id: &'a RoomId) -> Self::KeyComponents<'a> { - (room_id, *INDEXED_KEY_UPPER_EVENT_POSITION) + fn upper_key_components_with_prefix( + linked_chunk_id: LinkedChunkId<'a>, + ) -> Self::KeyComponents<'a> { + (linked_chunk_id, *INDEXED_KEY_UPPER_EVENT_POSITION) } } -impl<'a> IndexedPrefixKeyComponentBounds<'a, Event, (&'a RoomId, ChunkIdentifier)> +impl<'a> IndexedPrefixKeyComponentBounds<'a, Event, (LinkedChunkId<'a>, ChunkIdentifier)> for IndexedEventPositionKey { fn lower_key_components_with_prefix( - (room_id, chunk_id): (&'a RoomId, ChunkIdentifier), + (linked_chunk_id, chunk_id): (LinkedChunkId<'a>, ChunkIdentifier), ) -> Self::KeyComponents<'a> { ( - room_id, + linked_chunk_id, Position { chunk_identifier: chunk_id.index(), index: INDEXED_KEY_LOWER_EVENT_INDEX }, ) } fn upper_key_components_with_prefix( - (room_id, chunk_id): (&'a RoomId, ChunkIdentifier), + (linked_chunk_id, chunk_id): (LinkedChunkId<'a>, ChunkIdentifier), ) -> Self::KeyComponents<'a> { ( - room_id, + linked_chunk_id, Position { chunk_identifier: chunk_id.index(), index: INDEXED_KEY_UPPER_EVENT_INDEX }, ) } diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs index 6a002ef166f..54d1098f7ce 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs @@ -15,7 +15,7 @@ use indexed_db_futures::{prelude::IdbTransaction, IdbQuerySource}; use matrix_sdk_base::{ event_cache::{store::EventCacheStoreError, Event as RawEvent, Gap as RawGap}, - linked_chunk::{ChunkContent, ChunkIdentifier, RawChunk}, + linked_chunk::{ChunkContent, ChunkIdentifier, LinkedChunkId, RawChunk}, }; use ruma::{events::relation::RelationType, EventId, OwnedEventId, RoomId}; use serde::{ @@ -374,6 +374,23 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { self.delete_items_by_key::(range).await } + /// Delete all items of type `T` by key `K` associated with the given linked + /// chunk id from IndexedDB + pub async fn delete_items_by_linked_chunk_id<'b, T, K>( + &self, + linked_chunk_id: LinkedChunkId<'b>, + ) -> Result<(), IndexeddbEventCacheStoreTransactionError> + where + T: Indexed, + K: IndexedPrefixKeyBounds> + Serialize, + { + self.delete_items_by_key::(IndexedKeyRange::all_with_prefix( + linked_chunk_id, + self.serializer.inner(), + )) + .await + } + /// Delete all items of type `T` by key `K` in the given room from IndexedDB pub async fn delete_items_in_room<'b, T, K>( &self, @@ -488,7 +505,10 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { let content = match chunk.chunk_type { ChunkType::Event => { let events = self - .get_events_by_chunk(room_id, ChunkIdentifier::new(chunk.identifier)) + .get_events_by_chunk( + LinkedChunkId::Room(room_id), + ChunkIdentifier::new(chunk.identifier), + ) .await? .into_iter() .map(RawEvent::from) @@ -573,7 +593,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { self.delete_item_by_key::((room_id, chunk_id)).await?; match chunk.chunk_type { ChunkType::Event => { - self.delete_events_by_chunk(room_id, chunk_id).await?; + self.delete_events_by_chunk(LinkedChunkId::Room(room_id), chunk_id).await?; } ChunkType::Gap => { self.delete_gap_by_id(room_id, chunk_id).await?; @@ -591,14 +611,14 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { self.delete_items_in_room::(room_id).await } - /// Query IndexedDB for events that match the given event id in the given - /// room. If more than one item is found, an error is returned. + /// Query IndexedDB for events that match the given event id and the given + /// linked chunk id. If more than one item is found, an error is returned. pub async fn get_event_by_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, event_id: &EventId, ) -> Result, IndexeddbEventCacheStoreTransactionError> { - let key = self.serializer.encode_key((room_id, event_id)); + let key = self.serializer.encode_key((linked_chunk_id, event_id)); self.get_item_by_key::(key).await } @@ -613,50 +633,53 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { self.get_item_by_key::(key).await } - /// Query IndexedDB for events in the given position range in the given - /// room. + /// Query IndexedDB for events in the given position range matching the + /// given linked chunk id. pub async fn get_events_by_position( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, range: impl Into>, ) -> Result, IndexeddbEventCacheStoreTransactionError> { self.get_items_by_key_components::( - range.into().map(|position| (room_id, position)), + range.into().map(|position| (linked_chunk_id, position)), ) .await } - /// Query IndexedDB for number of events in the given position range in the - /// given room. + /// Query IndexedDB for number of events in the given position range + /// matching the given linked_chunk_id. pub async fn get_events_count_by_position( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, range: impl Into>, ) -> Result { self.get_items_count_by_key_components::( - range.into().map(|position| (room_id, position)), + range.into().map(|position| (linked_chunk_id, position)), ) .await } - /// Query IndexedDB for events in the given chunk in the given room. + /// Query IndexedDB for events in the given chunk matching the given linked + /// chunk id. pub async fn get_events_by_chunk( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, chunk_id: ChunkIdentifier, ) -> Result, IndexeddbEventCacheStoreTransactionError> { - let range = IndexedKeyRange::all_with_prefix((room_id, chunk_id), self.serializer.inner()); + let range = + IndexedKeyRange::all_with_prefix((linked_chunk_id, chunk_id), self.serializer.inner()); self.get_items_by_key::(range).await } - /// Query IndexedDB for number of events in the given chunk in the given - /// room. + /// Query IndexedDB for number of events in the given chunk matching the + /// given linked chunk id. pub async fn get_events_count_by_chunk( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, chunk_id: ChunkIdentifier, ) -> Result { - let range = IndexedKeyRange::all_with_prefix((room_id, chunk_id), self.serializer.inner()); + let range = + IndexedKeyRange::all_with_prefix((linked_chunk_id, chunk_id), self.serializer.inner()); self.get_items_count_by_key::(range).await } @@ -702,64 +725,66 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { // As a workaround, if the event has a position, we delete it first and // then call `put_item`. This should be fine as it all happens within the // context of a single transaction. - self.delete_event_by_position(event.room_id(), position).await?; + self.delete_event_by_position(event.linked_chunk_id(), position).await?; } self.put_item(event).await } - /// Delete events in the given position range in the given room + /// Delete events in the given position range matching the given linked + /// chunk id pub async fn delete_events_by_position( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, range: impl Into>, ) -> Result<(), IndexeddbEventCacheStoreTransactionError> { self.delete_items_by_key_components::( - range.into().map(|position| (room_id, position)), + range.into().map(|position| (linked_chunk_id, position)), ) .await } - /// Delete event in the given position in the given room + /// Delete event in the given position matching the given linked chunk id pub async fn delete_event_by_position( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, position: Position, ) -> Result<(), IndexeddbEventCacheStoreTransactionError> { - self.delete_item_by_key::((room_id, position)).await + self.delete_item_by_key::((linked_chunk_id, position)).await } - /// Delete events in the given chunk in the given room + /// Delete events in the given chunk matching the given linked chunk id pub async fn delete_events_by_chunk( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, chunk_id: ChunkIdentifier, ) -> Result<(), IndexeddbEventCacheStoreTransactionError> { - let range = IndexedKeyRange::all_with_prefix((room_id, chunk_id), self.serializer.inner()); + let range = + IndexedKeyRange::all_with_prefix((linked_chunk_id, chunk_id), self.serializer.inner()); self.delete_items_by_key::(range).await } - /// Delete events starting from the given position in the given room - /// until the end of the chunk + /// Delete events matching the given linked chunk id starting from the given + /// position until the end of the chunk pub async fn delete_events_by_chunk_from_index( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, position: Position, ) -> Result<(), IndexeddbEventCacheStoreTransactionError> { - let lower = (room_id, position); + let lower = (linked_chunk_id, position); let upper = IndexedEventPositionKey::upper_key_components_with_prefix(( - room_id, + linked_chunk_id, ChunkIdentifier::new(position.chunk_identifier), )); let range = IndexedKeyRange::Bound(lower, upper).map(|(_, position)| position); - self.delete_events_by_position(room_id, range).await + self.delete_events_by_position(linked_chunk_id, range).await } - /// Delete all events in the given room - pub async fn delete_events_in_room( + /// Delete all events matching the given linked chunk id + pub async fn delete_events_by_linked_chunk_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, ) -> Result<(), IndexeddbEventCacheStoreTransactionError> { - self.delete_items_in_room::(room_id).await + self.delete_items_by_linked_chunk_id::(linked_chunk_id).await } /// Query IndexedDB for the gap in the given chunk in the given room. diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/types.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/types.rs index 2e0dded3ddb..85dbce2ccc0 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/types.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/types.rs @@ -15,8 +15,9 @@ use std::time::Duration; use matrix_sdk_base::{ - deserialized_responses::TimelineEvent, event_cache::store::extract_event_relation, - linked_chunk::ChunkIdentifier, + deserialized_responses::TimelineEvent, + event_cache::store::extract_event_relation, + linked_chunk::{ChunkIdentifier, LinkedChunkId, OwnedLinkedChunkId}, }; use ruma::{OwnedEventId, OwnedRoomId, RoomId}; use serde::{Deserialize, Serialize}; @@ -89,11 +90,19 @@ impl From for TimelineEvent { } impl Event { + /// The [`LinkedChunkId`] in which the underlying event exists. + pub fn linked_chunk_id(&self) -> LinkedChunkId<'_> { + match self { + Event::InBand(e) => e.linked_chunk_id.as_ref(), + Event::OutOfBand(e) => e.linked_chunk_id.as_ref(), + } + } + /// The [`RoomId`] of the room in which the underlying event exists. pub fn room_id(&self) -> &RoomId { match self { - Event::InBand(e) => &e.room_id, - Event::OutOfBand(e) => &e.room_id, + Event::InBand(e) => e.room_id(), + Event::OutOfBand(e) => e.room_id(), } } @@ -142,8 +151,8 @@ impl Event { /// in-band or out-of-band. #[derive(Debug, Serialize, Deserialize)] pub struct GenericEvent

{ - /// The room in which the event exists. - pub room_id: OwnedRoomId, + /// The linked chunk id in which the event exists. + pub linked_chunk_id: OwnedLinkedChunkId, /// The full content of the event. pub content: TimelineEvent, /// The position of the event, if it is in a chunk. @@ -151,6 +160,11 @@ pub struct GenericEvent

{ } impl

GenericEvent

{ + /// The [`RoomId`] of the room in which the event exists. + pub fn room_id(&self) -> &RoomId { + self.linked_chunk_id.room_id() + } + /// The [`OwnedEventId`] of the underlying event. pub fn event_id(&self) -> Option { self.content.event_id() From 6565f7291b53927c70b0cf084f21d10527c53ea2 Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Fri, 15 Aug 2025 13:41:46 -0400 Subject: [PATCH 10/16] fix(indexeddb): integrate linked chunk id into relevant chunk- and gap-related types and fns Signed-off-by: Michael Goldenberg --- .../src/event_cache_store/mod.rs | 42 ++--- .../src/event_cache_store/serializer/types.rs | 80 +++++---- .../src/event_cache_store/transaction.rs | 158 ++++++++++++------ .../src/event_cache_store/types.rs | 8 +- 4 files changed, 172 insertions(+), 116 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 96cae4c0c7f..74410c5a4ab 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -183,7 +183,7 @@ impl_event_cache_store! { transaction .add_chunk( &types::Chunk { - room_id: room_id.to_owned(), + linked_chunk_id: linked_chunk_id.to_owned(), identifier: new.index(), previous: previous.map(|i| i.index()), next: next.map(|i| i.index()), @@ -197,7 +197,7 @@ impl_event_cache_store! { transaction .add_item( &types::Gap { - room_id: room_id.to_owned(), + linked_chunk_id: linked_chunk_id.to_owned(), chunk_identifier: new.index(), prev_token: gap.prev_token, }, @@ -206,7 +206,7 @@ impl_event_cache_store! { transaction .add_chunk( &types::Chunk { - room_id: room_id.to_owned(), + linked_chunk_id: linked_chunk_id.to_owned(), identifier: new.index(), previous: previous.map(|i| i.index()), next: next.map(|i| i.index()), @@ -217,7 +217,7 @@ impl_event_cache_store! { } Update::RemoveChunk(chunk_id) => { trace!("Removing chunk {chunk_id:?}"); - transaction.delete_chunk_by_id(room_id, chunk_id).await?; + transaction.delete_chunk_by_id(linked_chunk_id, chunk_id).await?; } Update::PushItems { at, items } => { let chunk_identifier = at.chunk_identifier().index(); @@ -276,9 +276,9 @@ impl_event_cache_store! { } Update::Clear => { trace!(%room_id, "clearing room"); - transaction.delete_chunks_in_room(room_id).await?; + transaction.delete_chunks_by_linked_chunk_id(linked_chunk_id).await?; transaction.delete_events_by_linked_chunk_id(linked_chunk_id).await?; - transaction.delete_gaps_in_room(room_id).await?; + transaction.delete_gaps_by_linked_chunk_id(linked_chunk_id).await?; } } } @@ -293,19 +293,16 @@ impl_event_cache_store! { ) -> Result>, IndexeddbEventCacheStoreError> { let _ = timer!("method"); - let linked_chunk_id = linked_chunk_id.to_owned(); - let room_id = linked_chunk_id.room_id(); - let transaction = self.transaction( &[keys::LINKED_CHUNKS, keys::GAPS, keys::EVENTS], IdbTransactionMode::Readwrite, )?; let mut raw_chunks = Vec::new(); - let chunks = transaction.get_chunks_in_room(room_id).await?; + let chunks = transaction.get_chunks_by_linked_chunk_id(linked_chunk_id).await?; for chunk in chunks { if let Some(raw_chunk) = transaction - .load_chunk_by_id(room_id, ChunkIdentifier::new(chunk.identifier)) + .load_chunk_by_id(linked_chunk_id, ChunkIdentifier::new(chunk.identifier)) .await? { raw_chunks.push(raw_chunk); @@ -331,16 +328,13 @@ impl_event_cache_store! { // https://github.com/matrix-org/matrix-rust-sdk/pull/5382. let _ = timer!("method"); - let owned_linked_chunk_id = linked_chunk_id.to_owned(); - let room_id = owned_linked_chunk_id.room_id(); - let transaction = self.transaction( &[keys::LINKED_CHUNKS, keys::EVENTS, keys::GAPS], IdbTransactionMode::Readwrite, )?; let mut raw_chunks = Vec::new(); - let chunks = transaction.get_chunks_in_room(room_id).await?; + let chunks = transaction.get_chunks_by_linked_chunk_id(linked_chunk_id).await?; for chunk in chunks { let chunk_id = ChunkIdentifier::new(chunk.identifier); let num_items = transaction.get_events_count_by_chunk(linked_chunk_id, chunk_id).await?; @@ -364,20 +358,20 @@ impl_event_cache_store! { > { let _timer = timer!("method"); - let linked_chunk_id = linked_chunk_id.to_owned(); - let room_id = linked_chunk_id.room_id(); + let owned_linked_chunk_id = linked_chunk_id.to_owned(); + let room_id = owned_linked_chunk_id.room_id(); let transaction = self.transaction( &[keys::LINKED_CHUNKS, keys::EVENTS, keys::GAPS], IdbTransactionMode::Readonly, )?; - if transaction.get_chunks_count_in_room(room_id).await? == 0 { + if transaction.get_chunks_count_by_linked_chunk_id(linked_chunk_id).await? == 0 { return Ok((None, ChunkIdentifierGenerator::new_from_scratch())); } // Now that we know we have some chunks in the room, we query IndexedDB // for the last chunk in the room by getting the chunk which does not // have a next chunk. - match transaction.get_chunk_by_next_chunk_id(room_id, None).await { + match transaction.get_chunk_by_next_chunk_id(linked_chunk_id, None).await { Err(IndexeddbEventCacheStoreTransactionError::ItemIsNotUnique) => { // If there are multiple chunks that do not have a next chunk, that // means we have more than one last chunk, which means that we have @@ -397,11 +391,11 @@ impl_event_cache_store! { Ok(Some(last_chunk)) => { let last_chunk_identifier = ChunkIdentifier::new(last_chunk.identifier); let last_raw_chunk = transaction - .load_chunk_by_id(room_id, last_chunk_identifier) + .load_chunk_by_id(linked_chunk_id, last_chunk_identifier) .await? .ok_or(IndexeddbEventCacheStoreError::UnableToLoadChunk)?; let max_chunk_id = transaction - .get_max_chunk_by_id(room_id) + .get_max_chunk_by_id(linked_chunk_id) .await? .map(|chunk| ChunkIdentifier::new(chunk.identifier)) .ok_or(IndexeddbEventCacheStoreError::NoMaxChunkId)?; @@ -420,16 +414,14 @@ impl_event_cache_store! { ) -> Result>, IndexeddbEventCacheStoreError> { let _timer = timer!("method"); - let linked_chunk_id = linked_chunk_id.to_owned(); - let room_id = linked_chunk_id.room_id(); let transaction = self.transaction( &[keys::LINKED_CHUNKS, keys::EVENTS, keys::GAPS], IdbTransactionMode::Readonly, )?; - if let Some(chunk) = transaction.get_chunk_by_id(room_id, before_chunk_identifier).await? { + if let Some(chunk) = transaction.get_chunk_by_id(linked_chunk_id, before_chunk_identifier).await? { if let Some(previous_identifier) = chunk.previous { let previous_identifier = ChunkIdentifier::new(previous_identifier); - return Ok(transaction.load_chunk_by_id(room_id, previous_identifier).await?); + return Ok(transaction.load_chunk_by_id(linked_chunk_id, previous_identifier).await?); } } Ok(None) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs index 2b7cf5bccbe..73476eccf1d 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs @@ -357,11 +357,11 @@ impl Indexed for Chunk { ) -> Result { Ok(IndexedChunk { id: >::encode( - (&self.room_id, ChunkIdentifier::new(self.identifier)), + (self.linked_chunk_id.as_ref(), ChunkIdentifier::new(self.identifier)), serializer, ), next: IndexedNextChunkIdKey::encode( - (&self.room_id, self.next.map(ChunkIdentifier::new)), + (self.linked_chunk_id.as_ref(), self.next.map(ChunkIdentifier::new)), serializer, ), content: serializer.maybe_encrypt_value(self)?, @@ -379,40 +379,45 @@ impl Indexed for Chunk { /// The value associated with the [primary key](IndexedChunk::id) of the /// [`LINKED_CHUNKS`][1] object store, which is constructed from: /// -/// - The (possibly) encrypted Room ID +/// - The (possibly) encrypted Linked Chunk ID /// - The Chunk ID. /// /// [1]: crate::event_cache_store::migrations::v1::create_linked_chunks_object_store #[derive(Debug, Serialize, Deserialize)] -pub struct IndexedChunkIdKey(IndexedRoomId, IndexedChunkId); +pub struct IndexedChunkIdKey(IndexedLinkedChunkId, IndexedChunkId); impl IndexedKey for IndexedChunkIdKey { - type KeyComponents<'a> = (&'a RoomId, ChunkIdentifier); + type KeyComponents<'a> = (LinkedChunkId<'a>, ChunkIdentifier); fn encode( - (room_id, chunk_id): Self::KeyComponents<'_>, + (linked_chunk_id, chunk_id): Self::KeyComponents<'_>, serializer: &IndexeddbSerializer, ) -> Self { - let room_id = serializer.encode_key_as_string(keys::ROOMS, room_id); + let linked_chunk_id = + serializer.hash_key(keys::LINKED_CHUNK_IDS, linked_chunk_id.storage_key()); let chunk_id = chunk_id.index(); - Self(room_id, chunk_id) + Self(linked_chunk_id, chunk_id) } } -impl<'a> IndexedPrefixKeyComponentBounds<'a, Chunk, &'a RoomId> for IndexedChunkIdKey { - fn lower_key_components_with_prefix(room_id: &'a RoomId) -> Self::KeyComponents<'a> { - (room_id, *INDEXED_KEY_LOWER_CHUNK_IDENTIFIER) +impl<'a> IndexedPrefixKeyComponentBounds<'a, Chunk, LinkedChunkId<'a>> for IndexedChunkIdKey { + fn lower_key_components_with_prefix( + linked_chunk_id: LinkedChunkId<'a>, + ) -> Self::KeyComponents<'a> { + (linked_chunk_id, *INDEXED_KEY_LOWER_CHUNK_IDENTIFIER) } - fn upper_key_components_with_prefix(room_id: &'a RoomId) -> Self::KeyComponents<'a> { - (room_id, *INDEXED_KEY_UPPER_CHUNK_IDENTIFIER) + fn upper_key_components_with_prefix( + linked_chunk_id: LinkedChunkId<'a>, + ) -> Self::KeyComponents<'a> { + (linked_chunk_id, *INDEXED_KEY_UPPER_CHUNK_IDENTIFIER) } } /// The value associated with the [`next`](IndexedChunk::next) index of the /// [`LINKED_CHUNKS`][1] object store, which is constructed from: /// -/// - The (possibly) encrypted Room ID +/// - The (possibly) encrypted Linked Chunk ID /// - The Chunk ID, if there is a next chunk in the list. /// /// Note: it would be more convenient to represent this type with an optional @@ -428,47 +433,52 @@ impl<'a> IndexedPrefixKeyComponentBounds<'a, Chunk, &'a RoomId> for IndexedChunk #[serde(untagged)] pub enum IndexedNextChunkIdKey { /// There is no next chunk. - None((IndexedRoomId,)), + None((IndexedLinkedChunkId,)), /// The identifier of the next chunk in the list. Some(IndexedChunkIdKey), } impl IndexedNextChunkIdKey { - pub fn none(room_id: IndexedRoomId) -> Self { - Self::None((room_id,)) + pub fn none(linked_chunk_id: IndexedLinkedChunkId) -> Self { + Self::None((linked_chunk_id,)) } } impl IndexedKey for IndexedNextChunkIdKey { const INDEX: Option<&'static str> = Some(keys::LINKED_CHUNKS_NEXT); - type KeyComponents<'a> = (&'a RoomId, Option); + type KeyComponents<'a> = (LinkedChunkId<'a>, Option); fn encode( - (room_id, next_chunk_id): Self::KeyComponents<'_>, + (linked_chunk_id, next_chunk_id): Self::KeyComponents<'_>, serializer: &IndexeddbSerializer, ) -> Self { next_chunk_id .map(|id| { Self::Some(>::encode( - (room_id, id), + (linked_chunk_id, id), serializer, )) }) .unwrap_or_else(|| { - let room_id = serializer.encode_key_as_string(keys::ROOMS, room_id); + let room_id = + serializer.hash_key(keys::LINKED_CHUNK_IDS, linked_chunk_id.storage_key()); Self::none(room_id) }) } } -impl<'a> IndexedPrefixKeyComponentBounds<'a, Chunk, &'a RoomId> for IndexedNextChunkIdKey { - fn lower_key_components_with_prefix(room_id: &'a RoomId) -> Self::KeyComponents<'a> { - (room_id, None) +impl<'a> IndexedPrefixKeyComponentBounds<'a, Chunk, LinkedChunkId<'a>> for IndexedNextChunkIdKey { + fn lower_key_components_with_prefix( + linked_chunk_id: LinkedChunkId<'a>, + ) -> Self::KeyComponents<'a> { + (linked_chunk_id, None) } - fn upper_key_components_with_prefix(room_id: &'a RoomId) -> Self::KeyComponents<'a> { - (room_id, Some(*INDEXED_KEY_UPPER_CHUNK_IDENTIFIER)) + fn upper_key_components_with_prefix( + linked_chunk_id: LinkedChunkId<'a>, + ) -> Self::KeyComponents<'a> { + (linked_chunk_id, Some(*INDEXED_KEY_UPPER_CHUNK_IDENTIFIER)) } } @@ -768,7 +778,7 @@ impl Indexed for Gap { ) -> Result { Ok(IndexedGap { id: >::encode( - (&self.room_id, ChunkIdentifier::new(self.chunk_identifier)), + (self.linked_chunk_id.as_ref(), ChunkIdentifier::new(self.chunk_identifier)), serializer, ), content: serializer.maybe_encrypt_value(self)?, @@ -785,7 +795,7 @@ impl Indexed for Gap { /// The primary key of the [`GAPS`][1] object store, which is constructed from: /// -/// - The (possibly) encrypted Room ID +/// - The (possibly) encrypted Linked Chunk ID /// - The Chunk ID /// /// [1]: crate::event_cache_store::migrations::v1::create_gaps_object_store @@ -799,16 +809,20 @@ impl IndexedKey for IndexedGapIdKey { } } -impl<'a> IndexedPrefixKeyComponentBounds<'a, Gap, &'a RoomId> for IndexedGapIdKey { - fn lower_key_components_with_prefix(room_id: &'a RoomId) -> Self::KeyComponents<'a> { +impl<'a> IndexedPrefixKeyComponentBounds<'a, Gap, LinkedChunkId<'a>> for IndexedGapIdKey { + fn lower_key_components_with_prefix( + linked_chunk_id: LinkedChunkId<'a>, + ) -> Self::KeyComponents<'a> { >::lower_key_components_with_prefix( - room_id, + linked_chunk_id, ) } - fn upper_key_components_with_prefix(room_id: &'a RoomId) -> Self::KeyComponents<'a> { + fn upper_key_components_with_prefix( + linked_chunk_id: LinkedChunkId<'a>, + ) -> Self::KeyComponents<'a> { >::upper_key_components_with_prefix( - room_id, + linked_chunk_id, ) } } diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs index 54d1098f7ce..0703bf19f43 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs @@ -149,6 +149,25 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { self.get_items_by_key::(range).await } + /// Query IndexedDB for all items matching the given linked chunk id by key + /// `K` + pub async fn get_items_by_linked_chunk_id<'b, T, K>( + &self, + linked_chunk_id: LinkedChunkId<'b>, + ) -> Result, IndexeddbEventCacheStoreTransactionError> + where + T: Indexed, + T::IndexedType: DeserializeOwned, + T::Error: AsyncErrorDeps, + K: IndexedPrefixKeyBounds> + Serialize, + { + self.get_items_by_key::(IndexedKeyRange::all_with_prefix( + linked_chunk_id, + self.serializer.inner(), + )) + .await + } + /// Query IndexedDB for all items in the given room by key `K` pub async fn get_items_in_room<'b, T, K>( &self, @@ -243,6 +262,25 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { self.get_items_count_by_key::(range).await } + /// Query IndexedDB for the number of items matching the given linked chunk + /// id. + pub async fn get_items_count_by_linked_chunk_id<'b, T, K>( + &self, + linked_chunk_id: LinkedChunkId<'b>, + ) -> Result + where + T: Indexed, + T::IndexedType: DeserializeOwned, + T::Error: AsyncErrorDeps, + K: IndexedPrefixKeyBounds> + Serialize, + { + self.get_items_count_by_key::(IndexedKeyRange::all_with_prefix( + linked_chunk_id, + self.serializer.inner(), + )) + .await + } + /// Query IndexedDB for the number of items in the given room. pub async fn get_items_count_in_room<'b, T, K>( &self, @@ -446,67 +484,75 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { self.put_item(lease).await } - /// Query IndexedDB for chunks that match the given chunk identifier in the - /// given room. If more than one item is found, an error is returned. + /// Query IndexedDB for chunks that match the given chunk identifier and the + /// given linked chunk id. If more than one item is found, an error is + /// returned. pub async fn get_chunk_by_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, chunk_id: ChunkIdentifier, ) -> Result, IndexeddbEventCacheStoreTransactionError> { - self.get_item_by_key_components::((room_id, chunk_id)).await + self.get_item_by_key_components::((linked_chunk_id, chunk_id)) + .await } /// Query IndexedDB for chunks such that the next chunk matches the given - /// chunk identifier in the given room. If more than one item is found, - /// an error is returned. + /// chunk identifier and the given linked chunk id. If more than one item is + /// found, an error is returned. pub async fn get_chunk_by_next_chunk_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, next_chunk_id: Option, ) -> Result, IndexeddbEventCacheStoreTransactionError> { - self.get_item_by_key_components::((room_id, next_chunk_id)) - .await + self.get_item_by_key_components::(( + linked_chunk_id, + next_chunk_id, + )) + .await } - /// Query IndexedDB for all chunks in the given room - pub async fn get_chunks_in_room( + /// Query IndexedDB for all chunks matching the given linked chunk id + pub async fn get_chunks_by_linked_chunk_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, ) -> Result, IndexeddbEventCacheStoreTransactionError> { - self.get_items_in_room::(room_id).await + self.get_items_by_linked_chunk_id::(linked_chunk_id).await } - /// Query IndexedDB for the number of chunks in the given room. - pub async fn get_chunks_count_in_room( + /// Query IndexedDB for the number of chunks matching the given linked chunk + /// id. + pub async fn get_chunks_count_by_linked_chunk_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, ) -> Result { - self.get_items_count_in_room::(room_id).await + self.get_items_count_by_linked_chunk_id::(linked_chunk_id).await } - /// Query IndexedDB for the chunk with the maximum key in the given room. + /// Query IndexedDB for the chunk with the maximum key matching the given + /// linked chunk id. pub async fn get_max_chunk_by_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, ) -> Result, IndexeddbEventCacheStoreTransactionError> { - let range = IndexedKeyRange::all_with_prefix::(room_id, self.serializer.inner()); + let range = + IndexedKeyRange::all_with_prefix::(linked_chunk_id, self.serializer.inner()); self.get_max_item_by_key::(range).await } - /// Query IndexedDB for given chunk in given room and additionally query - /// for events or gap, depending on chunk type, in order to construct the - /// full chunk. + /// Query IndexedDB for given chunk matching the given linked chunk id and + /// additionally query for events or gap, depending on chunk type, in + /// order to construct the full chunk. pub async fn load_chunk_by_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, chunk_id: ChunkIdentifier, ) -> Result>, IndexeddbEventCacheStoreTransactionError> { - if let Some(chunk) = self.get_chunk_by_id(room_id, chunk_id).await? { + if let Some(chunk) = self.get_chunk_by_id(linked_chunk_id, chunk_id).await? { let content = match chunk.chunk_type { ChunkType::Event => { let events = self .get_events_by_chunk( - LinkedChunkId::Room(room_id), + linked_chunk_id, ChunkIdentifier::new(chunk.identifier), ) .await? @@ -517,7 +563,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { } ChunkType::Gap => { let gap = self - .get_gap_by_id(room_id, ChunkIdentifier::new(chunk.identifier)) + .get_gap_by_id(linked_chunk_id, ChunkIdentifier::new(chunk.identifier)) .await? .ok_or(IndexeddbEventCacheStoreTransactionError::ItemNotFound)?; ChunkContent::Gap(RawGap { prev_token: gap.prev_token }) @@ -533,7 +579,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { Ok(None) } - /// Add a chunk to the given room and ensure that the next and previous + /// Add a chunk and ensure that the next and previous /// chunks are properly linked to the chunk being added. If a chunk with /// the same identifier already exists, the given chunk will be /// rejected. @@ -545,7 +591,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { if let Some(previous) = chunk.previous { let previous_identifier = ChunkIdentifier::new(previous); if let Some(mut previous_chunk) = - self.get_chunk_by_id(&chunk.room_id, previous_identifier).await? + self.get_chunk_by_id(chunk.linked_chunk_id.as_ref(), previous_identifier).await? { previous_chunk.next = Some(chunk.identifier); self.put_item(&previous_chunk).await?; @@ -554,7 +600,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { if let Some(next) = chunk.next { let next_identifier = ChunkIdentifier::new(next); if let Some(mut next_chunk) = - self.get_chunk_by_id(&chunk.room_id, next_identifier).await? + self.get_chunk_by_id(chunk.linked_chunk_id.as_ref(), next_identifier).await? { next_chunk.previous = Some(chunk.identifier); self.put_item(&next_chunk).await?; @@ -563,20 +609,20 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { Ok(()) } - /// Delete chunk that matches the given id in the given room and ensure that - /// the next and previous chunk are updated to link to one another. - /// Additionally, ensure that events and gaps in the given chunk are - /// also deleted. + /// Delete chunk that matches the given id and the given linked chunk id and + /// ensure that the next and previous chunk are updated to link to one + /// another. Additionally, ensure that events and gaps in the given + /// chunk are also deleted. pub async fn delete_chunk_by_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, chunk_id: ChunkIdentifier, ) -> Result<(), IndexeddbEventCacheStoreTransactionError> { - if let Some(chunk) = self.get_chunk_by_id(room_id, chunk_id).await? { + if let Some(chunk) = self.get_chunk_by_id(linked_chunk_id, chunk_id).await? { if let Some(previous) = chunk.previous { let previous_identifier = ChunkIdentifier::new(previous); if let Some(mut previous_chunk) = - self.get_chunk_by_id(room_id, previous_identifier).await? + self.get_chunk_by_id(linked_chunk_id, previous_identifier).await? { previous_chunk.next = chunk.next; self.put_item(&previous_chunk).await?; @@ -584,19 +630,21 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { } if let Some(next) = chunk.next { let next_identifier = ChunkIdentifier::new(next); - if let Some(mut next_chunk) = self.get_chunk_by_id(room_id, next_identifier).await? + if let Some(mut next_chunk) = + self.get_chunk_by_id(linked_chunk_id, next_identifier).await? { next_chunk.previous = chunk.previous; self.put_item(&next_chunk).await?; } } - self.delete_item_by_key::((room_id, chunk_id)).await?; + self.delete_item_by_key::((linked_chunk_id, chunk_id)) + .await?; match chunk.chunk_type { ChunkType::Event => { - self.delete_events_by_chunk(LinkedChunkId::Room(room_id), chunk_id).await?; + self.delete_events_by_chunk(linked_chunk_id, chunk_id).await?; } ChunkType::Gap => { - self.delete_gap_by_id(room_id, chunk_id).await?; + self.delete_gap_by_id(linked_chunk_id, chunk_id).await?; } } } @@ -604,11 +652,11 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { } /// Delete all chunks in the given room - pub async fn delete_chunks_in_room( + pub async fn delete_chunks_by_linked_chunk_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, ) -> Result<(), IndexeddbEventCacheStoreTransactionError> { - self.delete_items_in_room::(room_id).await + self.delete_items_by_linked_chunk_id::(linked_chunk_id).await } /// Query IndexedDB for events that match the given event id and the given @@ -787,29 +835,31 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { self.delete_items_by_linked_chunk_id::(linked_chunk_id).await } - /// Query IndexedDB for the gap in the given chunk in the given room. + /// Query IndexedDB for the gap in the given chunk matching the given linked + /// chunk id. pub async fn get_gap_by_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, chunk_id: ChunkIdentifier, ) -> Result, IndexeddbEventCacheStoreTransactionError> { - self.get_item_by_key_components::((room_id, chunk_id)).await + self.get_item_by_key_components::((linked_chunk_id, chunk_id)).await } - /// Delete gap that matches the given chunk identifier in the given room + /// Delete gap that matches the given chunk identifier and the given linked + /// chunk id pub async fn delete_gap_by_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, chunk_id: ChunkIdentifier, ) -> Result<(), IndexeddbEventCacheStoreTransactionError> { - self.delete_item_by_key::((room_id, chunk_id)).await + self.delete_item_by_key::((linked_chunk_id, chunk_id)).await } - /// Delete all gaps in the given room - pub async fn delete_gaps_in_room( + /// Delete all gaps matching the given linked chunk id + pub async fn delete_gaps_by_linked_chunk_id( &self, - room_id: &RoomId, + linked_chunk_id: LinkedChunkId<'_>, ) -> Result<(), IndexeddbEventCacheStoreTransactionError> { - self.delete_items_in_room::(room_id).await + self.delete_items_by_linked_chunk_id::(linked_chunk_id).await } } diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/types.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/types.rs index 85dbce2ccc0..62523fadb8c 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/types.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/types.rs @@ -42,8 +42,8 @@ impl Lease { /// which can be stored in IndexedDB. #[derive(Debug, Serialize, Deserialize)] pub struct Chunk { - /// The room in which the chunk exists. - pub room_id: OwnedRoomId, + /// The linked chunk id in which the chunk exists. + pub linked_chunk_id: OwnedLinkedChunkId, /// The identifier of the chunk - i.e., /// [`ChunkIdentifier`](matrix_sdk_base::linked_chunk::ChunkIdentifier). pub identifier: u64, @@ -213,8 +213,8 @@ impl From for Position { /// which can be stored in IndexedDB. #[derive(Debug, Serialize, Deserialize)] pub struct Gap { - /// The room in which the gap exists. - pub room_id: OwnedRoomId, + /// The linked chunk id in which the gap exists. + pub linked_chunk_id: OwnedLinkedChunkId, /// The identifier of the chunk containing this gap. pub chunk_identifier: u64, /// The token to use in the query, extracted from a previous "from" / From 17fc25152f84ceb923b9ee868c5de8ea29271e6f Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Fri, 15 Aug 2025 13:43:34 -0400 Subject: [PATCH 11/16] test(indexeddb): use event cache store integration tests from matrix_sdk_base Signed-off-by: Michael Goldenberg --- .../event_cache_store/integration_tests.rs | 95 ------------------- .../src/event_cache_store/mod.rs | 5 +- 2 files changed, 2 insertions(+), 98 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/integration_tests.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/integration_tests.rs index 1acea02313a..ffe6a12406a 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/integration_tests.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/integration_tests.rs @@ -684,98 +684,3 @@ macro_rules! indexeddb_event_cache_store_integration_tests { } }; } - -// This is copied from `matrix_sdk_base::event_cache::store::integration_tests` -// for the time being, because the IndexedDB implementation of `EventCacheStore` -// is being completed iteratively. So, we are only bringing over the tests -// relevant to the implemented functions. At the moment, this includes the -// following. -// -// - EventCacheStore::handle_linked_chunk_updates -// - EventCacheStore::load_all_chunks -// -// When all functions are implemented, we can get rid of this macro and use the -// one from `matrix_sdk_base`. -#[macro_export] -macro_rules! event_cache_store_integration_tests { - () => { - mod event_cache_store_integration_tests { - use matrix_sdk_base::event_cache::store::{ - EventCacheStoreIntegrationTests, IntoEventCacheStore, - }; - use matrix_sdk_test::async_test; - - use super::get_event_cache_store; - - #[async_test] - async fn test_handle_updates_and_rebuild_linked_chunk() { - let event_cache_store = - get_event_cache_store().await.unwrap().into_event_cache_store(); - event_cache_store.test_handle_updates_and_rebuild_linked_chunk().await; - } - - #[async_test] - async fn test_linked_chunk_incremental_loading() { - let event_cache_store = - get_event_cache_store().await.unwrap().into_event_cache_store(); - event_cache_store.test_linked_chunk_incremental_loading().await; - } - - #[async_test] - async fn test_rebuild_empty_linked_chunk() { - let event_cache_store = - get_event_cache_store().await.unwrap().into_event_cache_store(); - event_cache_store.test_rebuild_empty_linked_chunk().await; - } - - #[async_test] - async fn test_load_all_chunks_metadata() { - let event_cache_store = - get_event_cache_store().await.unwrap().into_event_cache_store(); - event_cache_store.test_load_all_chunks_metadata().await; - } - - #[async_test] - async fn test_clear_all_linked_chunks() { - let event_cache_store = - get_event_cache_store().await.unwrap().into_event_cache_store(); - event_cache_store.test_clear_all_linked_chunks().await; - } - - #[async_test] - async fn test_remove_room() { - let event_cache_store = - get_event_cache_store().await.unwrap().into_event_cache_store(); - event_cache_store.test_remove_room().await; - } - - #[async_test] - async fn test_filter_duplicated_events() { - let event_cache_store = - get_event_cache_store().await.unwrap().into_event_cache_store(); - event_cache_store.test_filter_duplicated_events().await; - } - - #[async_test] - async fn test_find_event() { - let event_cache_store = - get_event_cache_store().await.unwrap().into_event_cache_store(); - event_cache_store.test_find_event().await; - } - - #[async_test] - async fn test_find_event_relations() { - let event_cache_store = - get_event_cache_store().await.unwrap().into_event_cache_store(); - event_cache_store.test_find_event_relations().await; - } - - #[async_test] - async fn test_save_event() { - let event_cache_store = - get_event_cache_store().await.unwrap().into_event_cache_store(); - event_cache_store.test_save_event().await; - } - } - }; -} diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 74410c5a4ab..87e2b9c37e9 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -667,14 +667,13 @@ impl_event_cache_store! { mod tests { use matrix_sdk_base::{ event_cache::store::{EventCacheStore, EventCacheStoreError}, - event_cache_store_integration_tests_time, + event_cache_store_integration_tests, event_cache_store_integration_tests_time, }; use matrix_sdk_test::async_test; use uuid::Uuid; use crate::{ - event_cache_store::IndexeddbEventCacheStore, event_cache_store_integration_tests, - indexeddb_event_cache_store_integration_tests, + event_cache_store::IndexeddbEventCacheStore, indexeddb_event_cache_store_integration_tests, }; mod unencrypted { From a4edef19268717278613730387d59f5e24bea271 Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Fri, 15 Aug 2025 13:49:14 -0400 Subject: [PATCH 12/16] refactor(indexeddb): log linked chunk id rather than room id in handle_linked_chunk_updates Signed-off-by: Michael Goldenberg --- .../src/event_cache_store/mod.rs | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 87e2b9c37e9..dc5d78324af 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -168,9 +168,6 @@ impl_event_cache_store! { ) -> Result<(), IndexeddbEventCacheStoreError> { let _timer = timer!("method"); - let owned_linked_chunk_id = linked_chunk_id.to_owned(); - let room_id = owned_linked_chunk_id.room_id(); - let transaction = self.transaction( &[keys::LINKED_CHUNKS, keys::GAPS, keys::EVENTS], IdbTransactionMode::Readwrite, @@ -179,7 +176,7 @@ impl_event_cache_store! { for update in updates { match update { Update::NewItemsChunk { previous, new, next } => { - trace!(%room_id, "Inserting new chunk (prev={previous:?}, new={new:?}, next={next:?})"); + trace!(%linked_chunk_id, "Inserting new chunk (prev={previous:?}, new={new:?}, next={next:?})"); transaction .add_chunk( &types::Chunk { @@ -193,7 +190,7 @@ impl_event_cache_store! { .await?; } Update::NewGapChunk { previous, new, next, gap } => { - trace!(%room_id, "Inserting new gap (prev={previous:?}, new={new:?}, next={next:?})"); + trace!(%linked_chunk_id, "Inserting new gap (prev={previous:?}, new={new:?}, next={next:?})"); transaction .add_item( &types::Gap { @@ -216,13 +213,13 @@ impl_event_cache_store! { .await?; } Update::RemoveChunk(chunk_id) => { - trace!("Removing chunk {chunk_id:?}"); + trace!(%linked_chunk_id, "Removing chunk {chunk_id:?}"); transaction.delete_chunk_by_id(linked_chunk_id, chunk_id).await?; } Update::PushItems { at, items } => { let chunk_identifier = at.chunk_identifier().index(); - trace!(%room_id, "pushing {} items @ {chunk_identifier}", items.len()); + trace!(%linked_chunk_id, "pushing {} items @ {chunk_identifier}", items.len()); for (i, item) in items.into_iter().enumerate() { transaction @@ -243,7 +240,7 @@ impl_event_cache_store! { let chunk_id = at.chunk_identifier().index(); let index = at.index(); - trace!(%room_id, "replacing item @ {chunk_id}:{index}"); + trace!(%linked_chunk_id, "replacing item @ {chunk_id}:{index}"); transaction .put_event( @@ -259,7 +256,7 @@ impl_event_cache_store! { let chunk_id = at.chunk_identifier().index(); let index = at.index(); - trace!(%room_id, "removing item @ {chunk_id}:{index}"); + trace!(%linked_chunk_id, "removing item @ {chunk_id}:{index}"); transaction.delete_event_by_position(linked_chunk_id, at.into()).await?; } @@ -267,7 +264,7 @@ impl_event_cache_store! { let chunk_id = at.chunk_identifier().index(); let index = at.index(); - trace!(%room_id, "detaching last items @ {chunk_id}:{index}"); + trace!(%linked_chunk_id, "detaching last items @ {chunk_id}:{index}"); transaction.delete_events_by_chunk_from_index(linked_chunk_id, at.into()).await?; } @@ -275,7 +272,7 @@ impl_event_cache_store! { // Nothing? See sqlite implementation } Update::Clear => { - trace!(%room_id, "clearing room"); + trace!(%linked_chunk_id, "clearing room"); transaction.delete_chunks_by_linked_chunk_id(linked_chunk_id).await?; transaction.delete_events_by_linked_chunk_id(linked_chunk_id).await?; transaction.delete_gaps_by_linked_chunk_id(linked_chunk_id).await?; From 1e2bc5ad75a98ef686caf18e03f5809dcccb2a00 Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Sun, 17 Aug 2025 11:58:55 -0400 Subject: [PATCH 13/16] docs(indexeddb): remove references to room where relevant in transaction docs Signed-off-by: Michael Goldenberg --- .../src/event_cache_store/transaction.rs | 39 +++++++++---------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs index 0703bf19f43..760fb95fe83 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs @@ -104,8 +104,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { self.transaction.await.into_result().map_err(Into::into) } - /// Query IndexedDB for items that match the given key range in the given - /// room. + /// Query IndexedDB for items that match the given key range pub async fn get_items_by_key( &self, range: impl Into>, @@ -133,8 +132,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { Ok(items) } - /// Query IndexedDB for items that match the given key component range in - /// the given room. + /// Query IndexedDB for items that match the given key component range pub async fn get_items_by_key_components<'b, T, K>( &self, range: impl Into>>, @@ -168,7 +166,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { .await } - /// Query IndexedDB for all items in the given room by key `K` + /// Query IndexedDB for all items of type `T` by key `K` in the given room pub async fn get_items_in_room<'b, T, K>( &self, room_id: &'b RoomId, @@ -186,7 +184,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { .await } - /// Query IndexedDB for items that match the given key in the given room. If + /// Query IndexedDB for items that match the given key. If /// more than one item is found, an error is returned. pub async fn get_item_by_key( &self, @@ -205,8 +203,8 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { Ok(items.pop()) } - /// Query IndexedDB for items that match the given key components in the - /// given room. If more than one item is found, an error is returned. + /// Query IndexedDB for items that match the given key components. If more + /// than one item is found, an error is returned. pub async fn get_item_by_key_components<'b, T, K>( &self, components: K::KeyComponents<'b>, @@ -224,8 +222,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { Ok(items.pop()) } - /// Query IndexedDB for the number of items that match the given key range - /// in the given room. + /// Query IndexedDB for the number of items that match the given key range. pub async fn get_items_count_by_key( &self, range: impl Into>, @@ -247,7 +244,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { } /// Query IndexedDB for the number of items that match the given key - /// components range in the given room. + /// components range. pub async fn get_items_count_by_key_components<'b, T, K>( &self, range: impl Into>>, @@ -281,7 +278,8 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { .await } - /// Query IndexedDB for the number of items in the given room. + /// Query IndexedDB for the number of items of type `T` by `K` in the given + /// room. pub async fn get_items_count_in_room<'b, T, K>( &self, room_id: &'b RoomId, @@ -331,7 +329,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { } } - /// Adds an item to the given room in the corresponding IndexedDB object + /// Adds an item to the corresponding IndexedDB object /// store, i.e., `T::OBJECT_STORE`. If an item with the same key already /// exists, it will be rejected. pub async fn add_item( @@ -352,7 +350,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { .map_err(Into::into) } - /// Puts an item in the given room in the corresponding IndexedDB object + /// Puts an item in the corresponding IndexedDB object /// store, i.e., `T::OBJECT_STORE`. If an item with the same key already /// exists, it will be overwritten. pub async fn put_item( @@ -373,7 +371,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { .map_err(Into::into) } - /// Delete items in given key range in the given room from IndexedDB + /// Delete items in given key range from IndexedDB pub async fn delete_items_by_key( &self, range: impl Into>, @@ -398,7 +396,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { Ok(()) } - /// Delete items in the given key component range in the given room from + /// Delete items in the given key component range from /// IndexedDB pub async fn delete_items_by_key_components<'b, T, K>( &self, @@ -445,7 +443,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { .await } - /// Delete item that matches the given key components in the given room from + /// Delete item that matches the given key components from /// IndexedDB pub async fn delete_item_by_key<'b, T, K>( &self, @@ -458,7 +456,8 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { self.delete_items_by_key_components::(key).await } - /// Clear all items of type `T` in all rooms from IndexedDB + /// Clear all items of type `T` from the associated object store + /// `T::OBJECT_STORE` from IndexedDB pub async fn clear(&self) -> Result<(), IndexeddbEventCacheStoreTransactionError> where T: Indexed, @@ -651,7 +650,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { Ok(()) } - /// Delete all chunks in the given room + /// Delete all chunks associated with the given linked chunk id pub async fn delete_chunks_by_linked_chunk_id( &self, linked_chunk_id: LinkedChunkId<'_>, @@ -757,7 +756,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { self.get_items_by_key::(range).await } - /// Puts an event in the given room. If an event with the same key already + /// Puts an event in IndexedDB. If an event with the same key already /// exists, it will be overwritten. pub async fn put_event( &self, From d210e60e5b0ef3ba89594ec602f764c0ac83d608 Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Sun, 17 Aug 2025 12:02:37 -0400 Subject: [PATCH 14/16] docs(indexeddb): correct key docs to express that keys are hashed, not encrypted Signed-off-by: Michael Goldenberg --- .../src/event_cache_store/serializer/types.rs | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs index 73476eccf1d..fcb67a648a9 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs @@ -379,7 +379,7 @@ impl Indexed for Chunk { /// The value associated with the [primary key](IndexedChunk::id) of the /// [`LINKED_CHUNKS`][1] object store, which is constructed from: /// -/// - The (possibly) encrypted Linked Chunk ID +/// - The (possibly) hashed Linked Chunk ID /// - The Chunk ID. /// /// [1]: crate::event_cache_store::migrations::v1::create_linked_chunks_object_store @@ -417,7 +417,7 @@ impl<'a> IndexedPrefixKeyComponentBounds<'a, Chunk, LinkedChunkId<'a>> for Index /// The value associated with the [`next`](IndexedChunk::next) index of the /// [`LINKED_CHUNKS`][1] object store, which is constructed from: /// -/// - The (possibly) encrypted Linked Chunk ID +/// - The (possibly) hashed Linked Chunk ID /// - The Chunk ID, if there is a next chunk in the list. /// /// Note: it would be more convenient to represent this type with an optional @@ -552,8 +552,8 @@ impl Indexed for Event { /// The value associated with the [primary key](IndexedEvent::id) of the /// [`EVENTS`][1] object store, which is constructed from: /// -/// - The (possibly) encrypted Linked Chunk ID -/// - The (possibly) encrypted Event ID. +/// - The (possibly) hashed Linked Chunk ID +/// - The (possibly) hashed Event ID. /// /// [1]: crate::event_cache_store::migrations::v1::create_events_object_store #[derive(Debug, Serialize, Deserialize)] @@ -592,8 +592,8 @@ impl IndexedPrefixKeyBounds> for IndexedEventIdKey { /// The value associated with the [primary key](IndexedEvent::id) of the /// [`EVENTS`][1] object store, which is constructed from: /// -/// - The (possibly) encrypted Room ID -/// - The (possibly) encrypted Event ID. +/// - The (possibly) hashed Room ID +/// - The (possibly) hashed Event ID. /// /// [1]: crate::event_cache_store::migrations::v1::create_events_object_store #[derive(Debug, Serialize, Deserialize)] @@ -627,7 +627,7 @@ impl IndexedPrefixKeyBounds for IndexedEventRoomKey { /// The value associated with the [`position`](IndexedEvent::position) index of /// the [`EVENTS`][1] object store, which is constructed from: /// -/// - The (possibly) encrypted Linked Chunk ID +/// - The (possibly) hashed Linked Chunk ID /// - The Chunk ID /// - The index of the event in the chunk. /// @@ -689,8 +689,8 @@ impl<'a> IndexedPrefixKeyComponentBounds<'a, Event, (LinkedChunkId<'a>, ChunkIde /// The value associated with the [`relation`](IndexedEvent::relation) index of /// the [`EVENTS`][1] object store, which is constructed from: /// -/// - The (possibly) encrypted Room ID -/// - The (possibly) encrypted Event ID of the related event +/// - The (possibly) hashed Room ID +/// - The (possibly) hashed Event ID of the related event /// - The type of relationship between the events /// /// [1]: crate::event_cache_store::migrations::v1::create_events_object_store @@ -795,7 +795,7 @@ impl Indexed for Gap { /// The primary key of the [`GAPS`][1] object store, which is constructed from: /// -/// - The (possibly) encrypted Linked Chunk ID +/// - The (possibly) hashed Linked Chunk ID /// - The Chunk ID /// /// [1]: crate::event_cache_store::migrations::v1::create_gaps_object_store From 82faedd560d49eb3ec2c4dd459154422f129ec34 Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Sun, 17 Aug 2025 12:05:05 -0400 Subject: [PATCH 15/16] style(indexeddb): format event cache store impl by temporarily removing enclosing macro Signed-off-by: Michael Goldenberg --- .../src/event_cache_store/mod.rs | 111 ++++++++---------- 1 file changed, 51 insertions(+), 60 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index dc5d78324af..72376cbcc82 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -178,38 +178,32 @@ impl_event_cache_store! { Update::NewItemsChunk { previous, new, next } => { trace!(%linked_chunk_id, "Inserting new chunk (prev={previous:?}, new={new:?}, next={next:?})"); transaction - .add_chunk( - &types::Chunk { - linked_chunk_id: linked_chunk_id.to_owned(), - identifier: new.index(), - previous: previous.map(|i| i.index()), - next: next.map(|i| i.index()), - chunk_type: ChunkType::Event, - }, - ) + .add_chunk(&types::Chunk { + linked_chunk_id: linked_chunk_id.to_owned(), + identifier: new.index(), + previous: previous.map(|i| i.index()), + next: next.map(|i| i.index()), + chunk_type: ChunkType::Event, + }) .await?; } Update::NewGapChunk { previous, new, next, gap } => { trace!(%linked_chunk_id, "Inserting new gap (prev={previous:?}, new={new:?}, next={next:?})"); transaction - .add_item( - &types::Gap { - linked_chunk_id: linked_chunk_id.to_owned(), - chunk_identifier: new.index(), - prev_token: gap.prev_token, - }, - ) + .add_item(&types::Gap { + linked_chunk_id: linked_chunk_id.to_owned(), + chunk_identifier: new.index(), + prev_token: gap.prev_token, + }) .await?; transaction - .add_chunk( - &types::Chunk { - linked_chunk_id: linked_chunk_id.to_owned(), - identifier: new.index(), - previous: previous.map(|i| i.index()), - next: next.map(|i| i.index()), - chunk_type: ChunkType::Gap, - }, - ) + .add_chunk(&types::Chunk { + linked_chunk_id: linked_chunk_id.to_owned(), + identifier: new.index(), + previous: previous.map(|i| i.index()), + next: next.map(|i| i.index()), + chunk_type: ChunkType::Gap, + }) .await?; } Update::RemoveChunk(chunk_id) => { @@ -223,16 +217,14 @@ impl_event_cache_store! { for (i, item) in items.into_iter().enumerate() { transaction - .put_event( - &types::Event::InBand(InBandEvent { - linked_chunk_id: linked_chunk_id.to_owned(), - content: item, - position: types::Position { - chunk_identifier, - index: at.index() + i, - }, - }), - ) + .put_event(&types::Event::InBand(InBandEvent { + linked_chunk_id: linked_chunk_id.to_owned(), + content: item, + position: types::Position { + chunk_identifier, + index: at.index() + i, + }, + })) .await?; } } @@ -243,13 +235,11 @@ impl_event_cache_store! { trace!(%linked_chunk_id, "replacing item @ {chunk_id}:{index}"); transaction - .put_event( - &types::Event::InBand(InBandEvent { - linked_chunk_id: linked_chunk_id.to_owned(), - content: item, - position: at.into(), - }), - ) + .put_event(&types::Event::InBand(InBandEvent { + linked_chunk_id: linked_chunk_id.to_owned(), + content: item, + position: at.into(), + })) .await?; } Update::RemoveItem { at } => { @@ -266,7 +256,9 @@ impl_event_cache_store! { trace!(%linked_chunk_id, "detaching last items @ {chunk_id}:{index}"); - transaction.delete_events_by_chunk_from_index(linked_chunk_id, at.into()).await?; + transaction + .delete_events_by_chunk_from_index(linked_chunk_id, at.into()) + .await?; } Update::StartReattachItems | Update::EndReattachItems => { // Nothing? See sqlite implementation @@ -334,7 +326,8 @@ impl_event_cache_store! { let chunks = transaction.get_chunks_by_linked_chunk_id(linked_chunk_id).await?; for chunk in chunks { let chunk_id = ChunkIdentifier::new(chunk.identifier); - let num_items = transaction.get_events_count_by_chunk(linked_chunk_id, chunk_id).await?; + let num_items = + transaction.get_events_count_by_chunk(linked_chunk_id, chunk_id).await?; raw_chunks.push(ChunkMetadata { num_items, previous: chunk.previous.map(ChunkIdentifier::new), @@ -379,12 +372,12 @@ impl_event_cache_store! { // There was some error querying IndexedDB, but it is not necessarily // a violation of our data constraints. Err(e.into()) - }, + } Ok(None) => { // If there is no chunk without a next chunk, that means every chunk // points to another chunk, which means that we have a cycle in our list. Err(IndexeddbEventCacheStoreError::ChunksContainCycle) - }, + } Ok(Some(last_chunk)) => { let last_chunk_identifier = ChunkIdentifier::new(last_chunk.identifier); let last_raw_chunk = transaction @@ -415,10 +408,14 @@ impl_event_cache_store! { &[keys::LINKED_CHUNKS, keys::EVENTS, keys::GAPS], IdbTransactionMode::Readonly, )?; - if let Some(chunk) = transaction.get_chunk_by_id(linked_chunk_id, before_chunk_identifier).await? { + if let Some(chunk) = + transaction.get_chunk_by_id(linked_chunk_id, before_chunk_identifier).await? + { if let Some(previous_identifier) = chunk.previous { let previous_identifier = ChunkIdentifier::new(previous_identifier); - return Ok(transaction.load_chunk_by_id(linked_chunk_id, previous_identifier).await?); + return Ok(transaction + .load_chunk_by_id(linked_chunk_id, previous_identifier) + .await?); } } Ok(None) @@ -451,8 +448,7 @@ impl_event_cache_store! { return Ok(Vec::new()); } - let transaction = - self.transaction(&[keys::EVENTS], IdbTransactionMode::Readonly)?; + let transaction = self.transaction(&[keys::EVENTS], IdbTransactionMode::Readonly)?; let mut duplicated = Vec::new(); for event_id in events { if let Some(types::Event::InBand(event)) = @@ -472,8 +468,7 @@ impl_event_cache_store! { ) -> Result, IndexeddbEventCacheStoreError> { let _timer = timer!("method"); - let transaction = - self.transaction(&[keys::EVENTS], IdbTransactionMode::Readonly)?; + let transaction = self.transaction(&[keys::EVENTS], IdbTransactionMode::Readonly)?; transaction .get_event_by_room(room_id, event_id) .await @@ -490,8 +485,7 @@ impl_event_cache_store! { ) -> Result)>, IndexeddbEventCacheStoreError> { let _timer = timer!("method"); - let transaction = - self.transaction(&[keys::EVENTS], IdbTransactionMode::Readonly)?; + let transaction = self.transaction(&[keys::EVENTS], IdbTransactionMode::Readonly)?; let mut related_events = Vec::new(); match filters { @@ -506,9 +500,7 @@ impl_event_cache_store! { } } _ => { - for event in - transaction.get_events_by_related_event(room_id, event_id).await? - { + for event in transaction.get_events_by_related_event(room_id, event_id).await? { let position = event.position().map(Into::into); related_events.push((event.into(), position)); } @@ -529,14 +521,13 @@ impl_event_cache_store! { error!(%room_id, "Trying to save an event with no ID"); return Ok(()); }; - let transaction = - self.transaction(&[keys::EVENTS], IdbTransactionMode::Readwrite)?; + let transaction = self.transaction(&[keys::EVENTS], IdbTransactionMode::Readwrite)?; let event = match transaction.get_event_by_room(room_id, &event_id).await? { Some(mut inner) => inner.with_content(event), None => types::Event::OutOfBand(OutOfBandEvent { linked_chunk_id: LinkedChunkId::Room(room_id).to_owned(), content: event, - position: () + position: (), }), }; transaction.put_event(&event).await?; From 03d02d7c9a138915936721abd7121fe085cb4387 Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Sun, 17 Aug 2025 15:05:30 -0400 Subject: [PATCH 16/16] refactor(indexeddb): allow IndexeddbSerializer::hash_key to be unused until event cache store is a default feature Signed-off-by: Michael Goldenberg --- crates/matrix-sdk-indexeddb/src/serializer.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/matrix-sdk-indexeddb/src/serializer.rs b/crates/matrix-sdk-indexeddb/src/serializer.rs index 109885f4941..c6b5040554a 100644 --- a/crates/matrix-sdk-indexeddb/src/serializer.rs +++ b/crates/matrix-sdk-indexeddb/src/serializer.rs @@ -99,6 +99,7 @@ impl IndexeddbSerializer { /// **Note** that when dealing with keys which will be encoded as strings, /// it is recommended to use [`encode_key`](Self::encode_key), as it /// ensures that strings are safe for use as a key. + #[allow(unused)] pub fn hash_key(&self, table_name: &str, key: T) -> Vec where T: AsRef<[u8]>,