diff --git a/bindings/matrix-sdk-ffi/src/client.rs b/bindings/matrix-sdk-ffi/src/client.rs index e4c7306bf07..c62e32c0ed9 100644 --- a/bindings/matrix-sdk-ffi/src/client.rs +++ b/bindings/matrix-sdk-ffi/src/client.rs @@ -14,7 +14,6 @@ use matrix_sdk::{ authentication::oauth::{ AccountManagementActionFull, ClientId, OAuthAuthorizationData, OAuthSession, }, - event_cache::EventCacheError, media::{MediaFormat, MediaRequestParameters, MediaRetentionPolicy, MediaThumbnailSettings}, ruma::{ api::client::{ @@ -39,7 +38,7 @@ use matrix_sdk::{ }, sliding_sync::Version as SdkSlidingSyncVersion, store::RoomLoadSettings as SdkRoomLoadSettings, - Account, AuthApi, AuthSession, Client as MatrixClient, SessionChange, SessionTokens, + Account, AuthApi, AuthSession, Client as MatrixClient, Error, SessionChange, SessionTokens, STATE_STORE_DATABASE_NAME, }; use matrix_sdk_common::{stream::StreamExt, SendOutsideWasm, SyncOutsideWasm}; @@ -1510,8 +1509,8 @@ impl Client { &self, policy: MediaRetentionPolicy, ) -> Result<(), ClientError> { - let closure = async || -> Result<_, EventCacheError> { - let store = self.inner.event_cache_store().lock().await?; + let closure = async || -> Result<_, Error> { + let store = self.inner.media_store().lock().await?; Ok(store.set_media_retention_policy(policy).await?) }; @@ -1559,13 +1558,13 @@ impl Client { // Clean up the media cache according to the current media retention policy. self.inner - .event_cache_store() + .media_store() .lock() .await - .map_err(EventCacheError::from)? + .map_err(Error::from)? .clean_up_media_cache() .await - .map_err(EventCacheError::from)?; + .map_err(Error::from)?; // Clear all the room chunks. It's important to *not* call // `EventCacheStore::clear_all_linked_chunks` here, because there might be live diff --git a/crates/matrix-sdk-base/src/client.rs b/crates/matrix-sdk-base/src/client.rs index 250ed4e589e..4318e981d27 100644 --- a/crates/matrix-sdk-base/src/client.rs +++ b/crates/matrix-sdk-base/src/client.rs @@ -57,6 +57,7 @@ use crate::{ deserialized_responses::DisplayName, error::{Error, Result}, event_cache::store::EventCacheStoreLock, + media::store::MediaStoreLock, response_processors::{self as processors, Context}, room::{ Room, RoomInfoNotableUpdate, RoomInfoNotableUpdateReasons, RoomMembersUpdate, RoomState, @@ -91,6 +92,9 @@ pub struct BaseClient { /// The store used by the event cache. event_cache_store: EventCacheStoreLock, + /// The store used by the media cache. + media_store: MediaStoreLock, + /// The store used for encryption. /// /// This field is only meant to be used for `OlmMachine` initialization. @@ -189,6 +193,7 @@ impl BaseClient { BaseClient { state_store: store, event_cache_store: config.event_cache_store, + media_store: config.media_store, #[cfg(feature = "e2e-encryption")] crypto_store: config.crypto_store, #[cfg(feature = "e2e-encryption")] @@ -222,6 +227,7 @@ impl BaseClient { let copy = Self { state_store: BaseStateStore::new(config.state_store), event_cache_store: config.event_cache_store, + media_store: config.media_store, // We copy the crypto store as well as the `OlmMachine` for two reasons: // 1. The `self.crypto_store` is the same as the one used inside the `OlmMachine`. // 2. We need to ensure that the parent and child use the same data and caches inside @@ -306,6 +312,11 @@ impl BaseClient { &self.event_cache_store } + /// Get a reference to the media store. + pub fn media_store(&self) -> &MediaStoreLock { + &self.media_store + } + /// Check whether the client has been activated. /// /// See [`BaseClient::activate`] to know what it means. diff --git a/crates/matrix-sdk-base/src/event_cache/store/memory_store.rs b/crates/matrix-sdk-base/src/event_cache/store/memory_store.rs index 445d008a542..f0da096b39d 100644 --- a/crates/matrix-sdk-base/src/event_cache/store/memory_store.rs +++ b/crates/matrix-sdk-base/src/event_cache/store/memory_store.rs @@ -14,7 +14,6 @@ use std::{ collections::HashMap, - num::NonZeroUsize, sync::{Arc, RwLock as StdRwLock}, }; @@ -24,26 +23,15 @@ use matrix_sdk_common::{ ChunkIdentifier, ChunkIdentifierGenerator, ChunkMetadata, LinkedChunkId, Position, RawChunk, Update, relational::RelationalLinkedChunk, }, - ring_buffer::RingBuffer, store_locks::memory_store_helper::try_take_leased_lock, }; -use ruma::{ - EventId, MxcUri, OwnedEventId, OwnedMxcUri, RoomId, - events::relation::RelationType, - time::{Instant, SystemTime}, -}; +use ruma::{EventId, OwnedEventId, RoomId, events::relation::RelationType, time::Instant}; use tracing::error; use super::{ EventCacheStore, EventCacheStoreError, Result, compute_filters_string, extract_event_relation, }; -use crate::{ - event_cache::{Event, Gap}, - media::{ - MediaRequestParameters, UniqueKey as _, - store::{IgnoreMediaRetentionPolicy, MediaRetentionPolicy, MediaService, MediaStoreInner}, - }, -}; +use crate::event_cache::{Event, Gap}; /// In-memory, non-persistent implementation of the `EventCacheStore`. /// @@ -51,55 +39,21 @@ use crate::{ #[derive(Debug, Clone)] pub struct MemoryStore { inner: Arc>, - media_service: MediaService, } #[derive(Debug)] struct MemoryStoreInner { - media: RingBuffer, leases: HashMap, events: RelationalLinkedChunk, - media_retention_policy: Option, - last_media_cleanup_time: SystemTime, } -/// A media content in the `MemoryStore`. -#[derive(Debug)] -struct MediaContent { - /// The URI of the content. - uri: OwnedMxcUri, - - /// The unique key of the content. - key: String, - - /// The bytes of the content. - data: Vec, - - /// Whether we should ignore the [`MediaRetentionPolicy`] for this content. - ignore_policy: bool, - - /// The time of the last access of the content. - last_access: SystemTime, -} - -const NUMBER_OF_MEDIAS: NonZeroUsize = NonZeroUsize::new(20).unwrap(); - impl Default for MemoryStore { fn default() -> Self { - // Given that the store is empty, we won't need to clean it up right away. - let last_media_cleanup_time = SystemTime::now(); - let media_service = MediaService::new(); - media_service.restore(None, Some(last_media_cleanup_time)); - Self { inner: Arc::new(StdRwLock::new(MemoryStoreInner { - media: RingBuffer::new(NUMBER_OF_MEDIAS), leases: Default::default(), events: RelationalLinkedChunk::new(), - media_retention_policy: None, - last_media_cleanup_time, })), - media_service, } } } @@ -274,313 +228,12 @@ impl EventCacheStore for MemoryStore { self.inner.write().unwrap().events.save_item(room_id.to_owned(), event); Ok(()) } - - async fn add_media_content( - &self, - request: &MediaRequestParameters, - data: Vec, - ignore_policy: IgnoreMediaRetentionPolicy, - ) -> Result<()> { - self.media_service.add_media_content(self, request, data, ignore_policy).await - } - - async fn replace_media_key( - &self, - from: &MediaRequestParameters, - to: &MediaRequestParameters, - ) -> Result<(), Self::Error> { - let expected_key = from.unique_key(); - - let mut inner = self.inner.write().unwrap(); - - if let Some(media_content) = - inner.media.iter_mut().find(|media_content| media_content.key == expected_key) - { - media_content.uri = to.uri().to_owned(); - media_content.key = to.unique_key(); - } - - Ok(()) - } - - async fn get_media_content(&self, request: &MediaRequestParameters) -> Result>> { - self.media_service.get_media_content(self, request).await - } - - async fn remove_media_content(&self, request: &MediaRequestParameters) -> Result<()> { - let expected_key = request.unique_key(); - - let mut inner = self.inner.write().unwrap(); - - let Some(index) = - inner.media.iter().position(|media_content| media_content.key == expected_key) - else { - return Ok(()); - }; - - inner.media.remove(index); - - Ok(()) - } - - async fn get_media_content_for_uri( - &self, - uri: &MxcUri, - ) -> Result>, Self::Error> { - self.media_service.get_media_content_for_uri(self, uri).await - } - - async fn remove_media_content_for_uri(&self, uri: &MxcUri) -> Result<()> { - let mut inner = self.inner.write().unwrap(); - - let positions = inner - .media - .iter() - .enumerate() - .filter_map(|(position, media_content)| (media_content.uri == uri).then_some(position)) - .collect::>(); - - // Iterate in reverse-order so that positions stay valid after first removals. - for position in positions.into_iter().rev() { - inner.media.remove(position); - } - - Ok(()) - } - - async fn set_media_retention_policy( - &self, - policy: MediaRetentionPolicy, - ) -> Result<(), Self::Error> { - self.media_service.set_media_retention_policy(self, policy).await - } - - fn media_retention_policy(&self) -> MediaRetentionPolicy { - self.media_service.media_retention_policy() - } - - async fn set_ignore_media_retention_policy( - &self, - request: &MediaRequestParameters, - ignore_policy: IgnoreMediaRetentionPolicy, - ) -> Result<(), Self::Error> { - self.media_service.set_ignore_media_retention_policy(self, request, ignore_policy).await - } - - async fn clean_up_media_cache(&self) -> Result<(), Self::Error> { - self.media_service.clean_up_media_cache(self).await - } -} - -#[cfg_attr(target_family = "wasm", async_trait(?Send))] -#[cfg_attr(not(target_family = "wasm"), async_trait)] -impl MediaStoreInner for MemoryStore { - type Error = EventCacheStoreError; - - async fn media_retention_policy_inner( - &self, - ) -> Result, Self::Error> { - Ok(self.inner.read().unwrap().media_retention_policy) - } - - async fn set_media_retention_policy_inner( - &self, - policy: MediaRetentionPolicy, - ) -> Result<(), Self::Error> { - self.inner.write().unwrap().media_retention_policy = Some(policy); - Ok(()) - } - - async fn add_media_content_inner( - &self, - request: &MediaRequestParameters, - data: Vec, - last_access: SystemTime, - policy: MediaRetentionPolicy, - ignore_policy: IgnoreMediaRetentionPolicy, - ) -> Result<(), Self::Error> { - // Avoid duplication. Let's try to remove it first. - self.remove_media_content(request).await?; - - let ignore_policy = ignore_policy.is_yes(); - - if !ignore_policy && policy.exceeds_max_file_size(data.len() as u64) { - // Do not store it. - return Ok(()); - } - - // Now, let's add it. - let mut inner = self.inner.write().unwrap(); - inner.media.push(MediaContent { - uri: request.uri().to_owned(), - key: request.unique_key(), - data, - ignore_policy, - last_access, - }); - - Ok(()) - } - - async fn set_ignore_media_retention_policy_inner( - &self, - request: &MediaRequestParameters, - ignore_policy: IgnoreMediaRetentionPolicy, - ) -> Result<(), Self::Error> { - let mut inner = self.inner.write().unwrap(); - let expected_key = request.unique_key(); - - if let Some(media_content) = inner.media.iter_mut().find(|media| media.key == expected_key) - { - media_content.ignore_policy = ignore_policy.is_yes(); - } - - Ok(()) - } - - async fn get_media_content_inner( - &self, - request: &MediaRequestParameters, - current_time: SystemTime, - ) -> Result>, Self::Error> { - let mut inner = self.inner.write().unwrap(); - let expected_key = request.unique_key(); - - // First get the content out of the buffer, we are going to put it back at the - // end. - let Some(index) = inner.media.iter().position(|media| media.key == expected_key) else { - return Ok(None); - }; - let Some(mut content) = inner.media.remove(index) else { - return Ok(None); - }; - - // Clone the data. - let data = content.data.clone(); - - // Update the last access time. - content.last_access = current_time; - - // Put it back in the buffer. - inner.media.push(content); - - Ok(Some(data)) - } - - async fn get_media_content_for_uri_inner( - &self, - expected_uri: &MxcUri, - current_time: SystemTime, - ) -> Result>, Self::Error> { - let mut inner = self.inner.write().unwrap(); - - // First get the content out of the buffer, we are going to put it back at the - // end. - let Some(index) = inner.media.iter().position(|media| media.uri == expected_uri) else { - return Ok(None); - }; - let Some(mut content) = inner.media.remove(index) else { - return Ok(None); - }; - - // Clone the data. - let data = content.data.clone(); - - // Update the last access time. - content.last_access = current_time; - - // Put it back in the buffer. - inner.media.push(content); - - Ok(Some(data)) - } - - async fn clean_up_media_cache_inner( - &self, - policy: MediaRetentionPolicy, - current_time: SystemTime, - ) -> Result<(), Self::Error> { - if !policy.has_limitations() { - // We can safely skip all the checks. - return Ok(()); - } - - let mut inner = self.inner.write().unwrap(); - - // First, check media content that exceed the max filesize. - if policy.computed_max_file_size().is_some() { - inner.media.retain(|content| { - content.ignore_policy || !policy.exceeds_max_file_size(content.data.len() as u64) - }); - } - - // Then, clean up expired media content. - if policy.last_access_expiry.is_some() { - inner.media.retain(|content| { - content.ignore_policy - || !policy.has_content_expired(current_time, content.last_access) - }); - } - - // Finally, if the cache size is too big, remove old items until it fits. - if let Some(max_cache_size) = policy.max_cache_size { - // Reverse the iterator because in case the cache size is overflowing, we want - // to count the number of old items to remove. Items are sorted by last access - // and old items are at the start. - let (_, items_to_remove) = inner.media.iter().enumerate().rev().fold( - (0u64, Vec::with_capacity(NUMBER_OF_MEDIAS.into())), - |(mut cache_size, mut items_to_remove), (index, content)| { - if content.ignore_policy { - // Do not count it. - return (cache_size, items_to_remove); - } - - let remove_item = if items_to_remove.is_empty() { - // We have not reached the max cache size yet. - if let Some(sum) = cache_size.checked_add(content.data.len() as u64) { - cache_size = sum; - // Start removing items if we have exceeded the max cache size. - cache_size > max_cache_size - } else { - // The cache size is overflowing, remove the remaining items, since the - // max cache size cannot be bigger than - // usize::MAX. - true - } - } else { - // We have reached the max cache size already, just remove it. - true - }; - - if remove_item { - items_to_remove.push(index); - } - - (cache_size, items_to_remove) - }, - ); - - // The indexes are already in reverse order so we can just iterate in that order - // to remove them starting by the end. - for index in items_to_remove { - inner.media.remove(index); - } - } - - inner.last_media_cleanup_time = current_time; - - Ok(()) - } - - async fn last_media_cleanup_time_inner(&self) -> Result, Self::Error> { - Ok(Some(self.inner.read().unwrap().last_media_cleanup_time)) - } } #[cfg(test)] mod tests { use super::{MemoryStore, Result}; - use crate::media_store_inner_integration_tests; + use crate::{event_cache_store_integration_tests, event_cache_store_integration_tests_time}; async fn get_event_cache_store() -> Result { Ok(MemoryStore::new()) @@ -588,5 +241,4 @@ mod tests { event_cache_store_integration_tests!(); event_cache_store_integration_tests_time!(); - media_store_inner_integration_tests!(with_media_size_tests); } diff --git a/crates/matrix-sdk-base/src/event_cache/store/mod.rs b/crates/matrix-sdk-base/src/event_cache/store/mod.rs index ecc2ec7e880..7d8ce765cb1 100644 --- a/crates/matrix-sdk-base/src/event_cache/store/mod.rs +++ b/crates/matrix-sdk-base/src/event_cache/store/mod.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! The event cache stores holds events and downloaded media when the cache was +//! The event cache stores holds events when the cache was //! activated to save bandwidth at the cost of increased storage space usage. //! //! Implementing the `EventCacheStore` trait, you can plug any storage backend diff --git a/crates/matrix-sdk-base/src/media/store/integration_tests.rs b/crates/matrix-sdk-base/src/media/store/integration_tests.rs index 9400bcbc24e..1df1d805653 100644 --- a/crates/matrix-sdk-base/src/media/store/integration_tests.rs +++ b/crates/matrix-sdk-base/src/media/store/integration_tests.rs @@ -23,11 +23,10 @@ use ruma::{ uint, }; -use super::{ - MediaRetentionPolicy, MediaStoreInner, - media_service::{IgnoreMediaRetentionPolicy, MediaStore}, +use super::{MediaRetentionPolicy, MediaStoreInner, media_service::IgnoreMediaRetentionPolicy}; +use crate::media::{ + MediaFormat, MediaRequestParameters, MediaThumbnailSettings, store::MediaStore, }; -use crate::media::{MediaFormat, MediaRequestParameters, MediaThumbnailSettings}; /// [`MediaStoreInner`] integration tests. /// @@ -981,7 +980,7 @@ where /// ```no_run /// # use matrix_sdk_base::media::store::{ /// # MediaStore, -/// # MemoryStore as MyStore, +/// # MemoryMediaStore as MyStore, /// # Result as MediaStoreResult, /// # }; /// @@ -1275,7 +1274,7 @@ where /// ```no_run /// # use matrix_sdk_base::media::store::{ /// # MediaStore, -/// # MemoryStore as MyStore, +/// # MemoryMediaStore as MyStore, /// # Result as MediaStoreResult, /// # }; /// diff --git a/crates/matrix-sdk-base/src/media/store/media_service.rs b/crates/matrix-sdk-base/src/media/store/media_service.rs index 5250634124e..7c787c509fc 100644 --- a/crates/matrix-sdk-base/src/media/store/media_service.rs +++ b/crates/matrix-sdk-base/src/media/store/media_service.rs @@ -12,11 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::{fmt, sync::Arc}; +use std::sync::Arc; -use async_trait::async_trait; use matrix_sdk_common::{ - AsyncTraitDeps, SendOutsideWasm, SyncOutsideWasm, + SendOutsideWasm, SyncOutsideWasm, executor::{JoinHandle, spawn}, locks::Mutex, }; @@ -24,7 +23,7 @@ use ruma::{MxcUri, time::SystemTime}; use tokio::sync::Mutex as AsyncMutex; use tracing::error; -use super::{MediaRetentionPolicy, MediaStoreError}; +use super::{MediaRetentionPolicy, MediaStoreInner}; use crate::media::MediaRequestParameters; /// API for implementors of [`MediaStore`] to manage their media through @@ -349,272 +348,6 @@ where } } -/// An abstract trait that can be used to implement different store backends -/// for the media store of the SDK. -#[cfg_attr(target_family = "wasm", async_trait(?Send))] -#[cfg_attr(not(target_family = "wasm"), async_trait)] -pub trait MediaStore: AsyncTraitDeps { - /// The error type used by this media store. - type Error: fmt::Debug + Into; - - /// Try to take a lock using the given store. - async fn try_take_leased_lock( - &self, - lease_duration_ms: u32, - key: &str, - holder: &str, - ) -> Result; - - /// Add a media file's content in the media store. - /// - /// # Arguments - /// - /// * `request` - The `MediaRequest` of the file. - /// - /// * `content` - The content of the file. - async fn add_media_content( - &self, - request: &MediaRequestParameters, - content: Vec, - ignore_policy: IgnoreMediaRetentionPolicy, - ) -> Result<(), Self::Error>; - - /// Replaces the given media's content key with another one. - /// - /// This should be used whenever a temporary (local) MXID has been used, and - /// it must now be replaced with its actual remote counterpart (after - /// uploading some content, or creating an empty MXC URI). - /// - /// ⚠ No check is performed to ensure that the media formats are consistent, - /// i.e. it's possible to update with a thumbnail key a media that was - /// keyed as a file before. The caller is responsible of ensuring that - /// the replacement makes sense, according to their use case. - /// - /// This should not raise an error when the `from` parameter points to an - /// unknown media, and it should silently continue in this case. - /// - /// # Arguments - /// - /// * `from` - The previous `MediaRequest` of the file. - /// - /// * `to` - The new `MediaRequest` of the file. - async fn replace_media_key( - &self, - from: &MediaRequestParameters, - to: &MediaRequestParameters, - ) -> Result<(), Self::Error>; - - /// Get a media file's content out of the media store. - /// - /// # Arguments - /// - /// * `request` - The `MediaRequest` of the file. - async fn get_media_content( - &self, - request: &MediaRequestParameters, - ) -> Result>, Self::Error>; - - /// Remove a media file's content from the media store. - /// - /// # Arguments - /// - /// * `request` - The `MediaRequest` of the file. - async fn remove_media_content( - &self, - request: &MediaRequestParameters, - ) -> Result<(), Self::Error>; - - /// Get a media file's content associated to an `MxcUri` from the - /// media store. - /// - /// In theory, there could be several files stored using the same URI and a - /// different `MediaFormat`. This API is meant to be used with a media file - /// that has only been stored with a single format. - /// - /// If there are several media files for a given URI in different formats, - /// this API will only return one of them. Which one is left as an - /// implementation detail. - /// - /// # Arguments - /// - /// * `uri` - The `MxcUri` of the media file. - async fn get_media_content_for_uri(&self, uri: &MxcUri) - -> Result>, Self::Error>; - - /// Remove all the media files' content associated to an `MxcUri` from the - /// media store. - /// - /// This should not raise an error when the `uri` parameter points to an - /// unknown media, and it should return an Ok result in this case. - /// - /// # Arguments - /// - /// * `uri` - The `MxcUri` of the media files. - async fn remove_media_content_for_uri(&self, uri: &MxcUri) -> Result<(), Self::Error>; - - /// Set the `MediaRetentionPolicy` to use for deciding whether to store or - /// keep media content. - /// - /// # Arguments - /// - /// * `policy` - The `MediaRetentionPolicy` to use. - async fn set_media_retention_policy( - &self, - policy: MediaRetentionPolicy, - ) -> Result<(), Self::Error>; - - /// Get the current `MediaRetentionPolicy`. - fn media_retention_policy(&self) -> MediaRetentionPolicy; - - /// Set whether the current [`MediaRetentionPolicy`] should be ignored for - /// the media. - /// - /// The change will be taken into account in the next cleanup. - /// - /// # Arguments - /// - /// * `request` - The `MediaRequestParameters` of the file. - /// - /// * `ignore_policy` - Whether the current `MediaRetentionPolicy` should be - /// ignored. - async fn set_ignore_media_retention_policy( - &self, - request: &MediaRequestParameters, - ignore_policy: IgnoreMediaRetentionPolicy, - ) -> Result<(), Self::Error>; - - /// Clean up the media cache with the current `MediaRetentionPolicy`. - /// - /// If there is already an ongoing cleanup, this is a noop. - async fn clean_up_media_cache(&self) -> Result<(), Self::Error>; -} - -/// An abstract trait that can be used to implement different store backends -/// for the media cache of the SDK. -/// -/// The main purposes of this trait are to be able to centralize where we handle -/// [`MediaRetentionPolicy`] by wrapping this in a [`MediaService`], and to -/// simplify the implementation of tests by being able to have complete control -/// over the `SystemTime`s provided to the store. -#[cfg_attr(target_family = "wasm", async_trait(?Send))] -#[cfg_attr(not(target_family = "wasm"), async_trait)] -pub trait MediaStoreInner: AsyncTraitDeps + Clone { - /// The error type used by this media cache store. - type Error: fmt::Debug + fmt::Display + Into; - - /// The persisted media retention policy in the media cache. - async fn media_retention_policy_inner( - &self, - ) -> Result, Self::Error>; - - /// Persist the media retention policy in the media cache. - /// - /// # Arguments - /// - /// * `policy` - The `MediaRetentionPolicy` to persist. - async fn set_media_retention_policy_inner( - &self, - policy: MediaRetentionPolicy, - ) -> Result<(), Self::Error>; - - /// Add a media file's content in the media cache. - /// - /// # Arguments - /// - /// * `request` - The `MediaRequestParameters` of the file. - /// - /// * `content` - The content of the file. - /// - /// * `current_time` - The current time, to set the last access time of the - /// media. - /// - /// * `policy` - The media retention policy, to check whether the media is - /// too big to be cached. - /// - /// * `ignore_policy` - Whether the `MediaRetentionPolicy` should be ignored - /// for this media. This setting should be persisted alongside the media - /// and taken into account whenever the policy is used. - async fn add_media_content_inner( - &self, - request: &MediaRequestParameters, - content: Vec, - current_time: SystemTime, - policy: MediaRetentionPolicy, - ignore_policy: IgnoreMediaRetentionPolicy, - ) -> Result<(), Self::Error>; - - /// Set whether the current [`MediaRetentionPolicy`] should be ignored for - /// the media. - /// - /// If the media of the given request is not found, this should be a noop. - /// - /// The change will be taken into account in the next cleanup. - /// - /// # Arguments - /// - /// * `request` - The `MediaRequestParameters` of the file. - /// - /// * `ignore_policy` - Whether the current `MediaRetentionPolicy` should be - /// ignored. - async fn set_ignore_media_retention_policy_inner( - &self, - request: &MediaRequestParameters, - ignore_policy: IgnoreMediaRetentionPolicy, - ) -> Result<(), Self::Error>; - - /// Get a media file's content out of the media cache. - /// - /// # Arguments - /// - /// * `request` - The `MediaRequestParameters` of the file. - /// - /// * `current_time` - The current time, to update the last access time of - /// the media. - async fn get_media_content_inner( - &self, - request: &MediaRequestParameters, - current_time: SystemTime, - ) -> Result>, Self::Error>; - - /// Get a media file's content associated to an `MxcUri` from the - /// media store. - /// - /// # Arguments - /// - /// * `uri` - The `MxcUri` of the media file. - /// - /// * `current_time` - The current time, to update the last access time of - /// the media. - async fn get_media_content_for_uri_inner( - &self, - uri: &MxcUri, - current_time: SystemTime, - ) -> Result>, Self::Error>; - - /// Clean up the media cache with the given policy. - /// - /// For the integration tests, it is expected that content that does not - /// pass the last access expiry and max file size criteria will be - /// removed first. After that, the remaining cache size should be - /// computed to compare against the max cache size criteria. - /// - /// # Arguments - /// - /// * `policy` - The media retention policy to use for the cleanup. The - /// `cleanup_frequency` will be ignored. - /// - /// * `current_time` - The current time, to be used to check for expired - /// content and to be stored as the time of the last media cache cleanup. - async fn clean_up_media_cache_inner( - &self, - policy: MediaRetentionPolicy, - current_time: SystemTime, - ) -> Result<(), Self::Error>; - - /// The time of the last media cache cleanup. - async fn last_media_cleanup_time_inner(&self) -> Result, Self::Error>; -} - /// Whether the [`MediaRetentionPolicy`] should be ignored for the current /// content. /// @@ -685,10 +418,10 @@ mod tests { }; use super::{ - IgnoreMediaRetentionPolicy, MediaRetentionPolicy, MediaService, MediaStoreError, - MediaStoreInner, TimeProvider, + IgnoreMediaRetentionPolicy, MediaRetentionPolicy, MediaService, MediaStoreInner, + TimeProvider, }; - use crate::media::{MediaFormat, MediaRequestParameters, UniqueKey}; + use crate::media::{MediaFormat, MediaRequestParameters, UniqueKey, store::MediaStoreError}; #[derive(Debug, Default, Clone)] struct MockMediaStoreInner { diff --git a/crates/matrix-sdk-base/src/media/store/memory_store.rs b/crates/matrix-sdk-base/src/media/store/memory_store.rs new file mode 100644 index 00000000000..0785e94746d --- /dev/null +++ b/crates/matrix-sdk-base/src/media/store/memory_store.rs @@ -0,0 +1,440 @@ +// Copyright 2024 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{ + collections::HashMap, + num::NonZeroUsize, + sync::{Arc, RwLock as StdRwLock}, + time::SystemTime, +}; + +use async_trait::async_trait; +use matrix_sdk_common::{ + ring_buffer::RingBuffer, store_locks::memory_store_helper::try_take_leased_lock, +}; +use ruma::{MxcUri, OwnedMxcUri, time::Instant}; + +use super::Result; +use crate::media::{ + MediaRequestParameters, UniqueKey as _, + store::{ + IgnoreMediaRetentionPolicy, MediaRetentionPolicy, MediaService, MediaStore, + MediaStoreError, MediaStoreInner, + }, +}; + +/// In-memory, non-persistent implementation of the `MediaStore`. +/// +/// Default if no other is configured at startup. +#[derive(Debug, Clone)] +pub struct MemoryMediaStore { + inner: Arc>, + media_service: MediaService, +} + +#[derive(Debug)] +struct MemoryMediaStoreInner { + media: RingBuffer, + leases: HashMap, + media_retention_policy: Option, + last_media_cleanup_time: SystemTime, +} + +/// A media content in the `MemoryStore`. +#[derive(Debug)] +struct MediaContent { + /// The URI of the content. + uri: OwnedMxcUri, + + /// The unique key of the content. + key: String, + + /// The bytes of the content. + data: Vec, + + /// Whether we should ignore the [`MediaRetentionPolicy`] for this content. + ignore_policy: bool, + + /// The time of the last access of the content. + last_access: SystemTime, +} + +const NUMBER_OF_MEDIAS: NonZeroUsize = NonZeroUsize::new(20).unwrap(); + +impl Default for MemoryMediaStore { + fn default() -> Self { + // Given that the store is empty, we won't need to clean it up right away. + let last_media_cleanup_time = SystemTime::now(); + let media_service = MediaService::new(); + media_service.restore(None, Some(last_media_cleanup_time)); + + Self { + inner: Arc::new(StdRwLock::new(MemoryMediaStoreInner { + media: RingBuffer::new(NUMBER_OF_MEDIAS), + leases: Default::default(), + media_retention_policy: None, + last_media_cleanup_time, + })), + media_service, + } + } +} + +impl MemoryMediaStore { + /// Create a new empty MemoryMediaStore + pub fn new() -> Self { + Self::default() + } +} + +#[cfg_attr(target_family = "wasm", async_trait(?Send))] +#[cfg_attr(not(target_family = "wasm"), async_trait)] +impl MediaStore for MemoryMediaStore { + type Error = MediaStoreError; + + async fn try_take_leased_lock( + &self, + lease_duration_ms: u32, + key: &str, + holder: &str, + ) -> Result { + let mut inner = self.inner.write().unwrap(); + + Ok(try_take_leased_lock(&mut inner.leases, lease_duration_ms, key, holder)) + } + + async fn add_media_content( + &self, + request: &MediaRequestParameters, + data: Vec, + ignore_policy: IgnoreMediaRetentionPolicy, + ) -> Result<(), Self::Error> { + self.media_service.add_media_content(self, request, data, ignore_policy).await + } + + async fn replace_media_key( + &self, + from: &MediaRequestParameters, + to: &MediaRequestParameters, + ) -> Result<(), Self::Error> { + let expected_key = from.unique_key(); + + let mut inner = self.inner.write().unwrap(); + + if let Some(media_content) = + inner.media.iter_mut().find(|media_content| media_content.key == expected_key) + { + media_content.uri = to.uri().to_owned(); + media_content.key = to.unique_key(); + } + + Ok(()) + } + + async fn get_media_content( + &self, + request: &MediaRequestParameters, + ) -> Result>, Self::Error> { + self.media_service.get_media_content(self, request).await + } + + async fn remove_media_content( + &self, + request: &MediaRequestParameters, + ) -> Result<(), Self::Error> { + let expected_key = request.unique_key(); + + let mut inner = self.inner.write().unwrap(); + + let Some(index) = + inner.media.iter().position(|media_content| media_content.key == expected_key) + else { + return Ok(()); + }; + + inner.media.remove(index); + + Ok(()) + } + + async fn get_media_content_for_uri( + &self, + uri: &MxcUri, + ) -> Result>, Self::Error> { + self.media_service.get_media_content_for_uri(self, uri).await + } + + async fn remove_media_content_for_uri(&self, uri: &MxcUri) -> Result<(), Self::Error> { + let mut inner = self.inner.write().unwrap(); + + let positions = inner + .media + .iter() + .enumerate() + .filter_map(|(position, media_content)| (media_content.uri == uri).then_some(position)) + .collect::>(); + + // Iterate in reverse-order so that positions stay valid after first removals. + for position in positions.into_iter().rev() { + inner.media.remove(position); + } + + Ok(()) + } + + async fn set_media_retention_policy( + &self, + policy: MediaRetentionPolicy, + ) -> Result<(), Self::Error> { + self.media_service.set_media_retention_policy(self, policy).await + } + + fn media_retention_policy(&self) -> MediaRetentionPolicy { + self.media_service.media_retention_policy() + } + + async fn set_ignore_media_retention_policy( + &self, + request: &MediaRequestParameters, + ignore_policy: IgnoreMediaRetentionPolicy, + ) -> Result<(), Self::Error> { + self.media_service.set_ignore_media_retention_policy(self, request, ignore_policy).await + } + + async fn clean_up_media_cache(&self) -> Result<(), Self::Error> { + self.media_service.clean_up_media_cache(self).await + } +} + +#[cfg_attr(target_family = "wasm", async_trait(?Send))] +#[cfg_attr(not(target_family = "wasm"), async_trait)] +impl MediaStoreInner for MemoryMediaStore { + type Error = MediaStoreError; + + async fn media_retention_policy_inner( + &self, + ) -> Result, Self::Error> { + Ok(self.inner.read().unwrap().media_retention_policy) + } + + async fn set_media_retention_policy_inner( + &self, + policy: MediaRetentionPolicy, + ) -> Result<(), Self::Error> { + self.inner.write().unwrap().media_retention_policy = Some(policy); + Ok(()) + } + + async fn add_media_content_inner( + &self, + request: &MediaRequestParameters, + data: Vec, + last_access: SystemTime, + policy: MediaRetentionPolicy, + ignore_policy: IgnoreMediaRetentionPolicy, + ) -> Result<(), Self::Error> { + // Avoid duplication. Let's try to remove it first. + self.remove_media_content(request).await?; + + let ignore_policy = ignore_policy.is_yes(); + + if !ignore_policy && policy.exceeds_max_file_size(data.len() as u64) { + // Do not store it. + return Ok(()); + } + + // Now, let's add it. + let mut inner = self.inner.write().unwrap(); + inner.media.push(MediaContent { + uri: request.uri().to_owned(), + key: request.unique_key(), + data, + ignore_policy, + last_access, + }); + + Ok(()) + } + + async fn set_ignore_media_retention_policy_inner( + &self, + request: &MediaRequestParameters, + ignore_policy: IgnoreMediaRetentionPolicy, + ) -> Result<(), Self::Error> { + let mut inner = self.inner.write().unwrap(); + let expected_key = request.unique_key(); + + if let Some(media_content) = inner.media.iter_mut().find(|media| media.key == expected_key) + { + media_content.ignore_policy = ignore_policy.is_yes(); + } + + Ok(()) + } + + async fn get_media_content_inner( + &self, + request: &MediaRequestParameters, + current_time: SystemTime, + ) -> Result>, Self::Error> { + let mut inner = self.inner.write().unwrap(); + let expected_key = request.unique_key(); + + // First get the content out of the buffer, we are going to put it back at the + // end. + let Some(index) = inner.media.iter().position(|media| media.key == expected_key) else { + return Ok(None); + }; + let Some(mut content) = inner.media.remove(index) else { + return Ok(None); + }; + + // Clone the data. + let data = content.data.clone(); + + // Update the last access time. + content.last_access = current_time; + + // Put it back in the buffer. + inner.media.push(content); + + Ok(Some(data)) + } + + async fn get_media_content_for_uri_inner( + &self, + expected_uri: &MxcUri, + current_time: SystemTime, + ) -> Result>, Self::Error> { + let mut inner = self.inner.write().unwrap(); + + // First get the content out of the buffer, we are going to put it back at the + // end. + let Some(index) = inner.media.iter().position(|media| media.uri == expected_uri) else { + return Ok(None); + }; + let Some(mut content) = inner.media.remove(index) else { + return Ok(None); + }; + + // Clone the data. + let data = content.data.clone(); + + // Update the last access time. + content.last_access = current_time; + + // Put it back in the buffer. + inner.media.push(content); + + Ok(Some(data)) + } + + async fn clean_up_media_cache_inner( + &self, + policy: MediaRetentionPolicy, + current_time: SystemTime, + ) -> Result<(), Self::Error> { + if !policy.has_limitations() { + // We can safely skip all the checks. + return Ok(()); + } + + let mut inner = self.inner.write().unwrap(); + + // First, check media content that exceed the max filesize. + if policy.computed_max_file_size().is_some() { + inner.media.retain(|content| { + content.ignore_policy || !policy.exceeds_max_file_size(content.data.len() as u64) + }); + } + + // Then, clean up expired media content. + if policy.last_access_expiry.is_some() { + inner.media.retain(|content| { + content.ignore_policy + || !policy.has_content_expired(current_time, content.last_access) + }); + } + + // Finally, if the cache size is too big, remove old items until it fits. + if let Some(max_cache_size) = policy.max_cache_size { + // Reverse the iterator because in case the cache size is overflowing, we want + // to count the number of old items to remove. Items are sorted by last access + // and old items are at the start. + let (_, items_to_remove) = inner.media.iter().enumerate().rev().fold( + (0u64, Vec::with_capacity(NUMBER_OF_MEDIAS.into())), + |(mut cache_size, mut items_to_remove), (index, content)| { + if content.ignore_policy { + // Do not count it. + return (cache_size, items_to_remove); + } + + let remove_item = if items_to_remove.is_empty() { + // We have not reached the max cache size yet. + if let Some(sum) = cache_size.checked_add(content.data.len() as u64) { + cache_size = sum; + // Start removing items if we have exceeded the max cache size. + cache_size > max_cache_size + } else { + // The cache size is overflowing, remove the remaining items, since the + // max cache size cannot be bigger than + // usize::MAX. + true + } + } else { + // We have reached the max cache size already, just remove it. + true + }; + + if remove_item { + items_to_remove.push(index); + } + + (cache_size, items_to_remove) + }, + ); + + // The indexes are already in reverse order so we can just iterate in that order + // to remove them starting by the end. + for index in items_to_remove { + inner.media.remove(index); + } + } + + inner.last_media_cleanup_time = current_time; + + Ok(()) + } + + async fn last_media_cleanup_time_inner(&self) -> Result, Self::Error> { + Ok(Some(self.inner.read().unwrap().last_media_cleanup_time)) + } +} + +#[cfg(test)] +mod tests { + use super::{MemoryMediaStore, Result}; + use crate::{ + media_store_inner_integration_tests, media_store_integration_tests, + media_store_integration_tests_time, + }; + + async fn get_media_store() -> Result { + Ok(MemoryMediaStore::new()) + } + + media_store_inner_integration_tests!(); + media_store_integration_tests!(); + media_store_integration_tests_time!(); +} diff --git a/crates/matrix-sdk-base/src/media/store/mod.rs b/crates/matrix-sdk-base/src/media/store/mod.rs index cee19959753..f63628c3f7d 100644 --- a/crates/matrix-sdk-base/src/media/store/mod.rs +++ b/crates/matrix-sdk-base/src/media/store/mod.rs @@ -21,20 +21,158 @@ mod media_retention_policy; mod media_service; +mod memory_store; +mod traits; #[cfg(any(test, feature = "testing"))] #[macro_use] pub mod integration_tests; +#[cfg(not(tarpaulin_include))] +use std::fmt; +use std::{ops::Deref, sync::Arc}; + +use matrix_sdk_common::store_locks::{ + BackingStore, CrossProcessStoreLock, CrossProcessStoreLockGuard, LockStoreError, +}; +use matrix_sdk_store_encryption::Error as StoreEncryptionError; +pub use traits::{DynMediaStore, IntoMediaStore, MediaStore, MediaStoreInner}; + #[cfg(any(test, feature = "testing"))] -pub use self::integration_tests::MediaStoreInnerIntegrationTests; +pub use self::integration_tests::{MediaStoreInnerIntegrationTests, MediaStoreIntegrationTests}; pub use self::{ media_retention_policy::MediaRetentionPolicy, - media_service::{IgnoreMediaRetentionPolicy, MediaService, MediaStoreInner}, + media_service::{IgnoreMediaRetentionPolicy, MediaService}, + memory_store::MemoryMediaStore, }; /// Media store specific error type. #[derive(Debug, thiserror::Error)] -pub enum MediaStoreError {} +pub enum MediaStoreError { + /// An error happened in the underlying database backend. + #[error(transparent)] + Backend(Box), + + /// The store failed to encrypt or decrypt some data. + #[error("Error encrypting or decrypting data from the event cache store: {0}")] + Encryption(#[from] StoreEncryptionError), + + /// The store contains invalid data. + #[error("The store contains invalid data: {details}")] + InvalidData { + /// Details why the data contained in the store was invalid. + details: String, + }, + + /// The store failed to serialize or deserialize some data. + #[error("Error serializing or deserializing data from the event cache store: {0}")] + Serialization(#[from] serde_json::Error), +} + +impl MediaStoreError { + /// Create a new [`Backend`][Self::Backend] error. + /// + /// Shorthand for `MediaStoreError::Backend(Box::new(error))`. + #[inline] + pub fn backend(error: E) -> Self + where + E: std::error::Error + Send + Sync + 'static, + { + Self::Backend(Box::new(error)) + } +} /// An `MediaStore` specific result type. pub type Result = std::result::Result; + +/// The high-level public type to represent an `MediaStore` lock. +#[derive(Clone)] +pub struct MediaStoreLock { + /// The inner cross process lock that is used to lock the `MediaStore`. + cross_process_lock: Arc>, + + /// The store itself. + /// + /// That's the only place where the store exists. + store: Arc, +} + +#[cfg(not(tarpaulin_include))] +impl fmt::Debug for MediaStoreLock { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.debug_struct("MediaStoreLock").finish_non_exhaustive() + } +} + +impl MediaStoreLock { + /// Create a new lock around the [`MediaStore`]. + /// + /// The `holder` argument represents the holder inside the + /// [`CrossProcessStoreLock::new`]. + pub fn new(store: S, holder: String) -> Self + where + S: IntoMediaStore, + { + let store = store.into_event_cache_store(); + + Self { + cross_process_lock: Arc::new(CrossProcessStoreLock::new( + LockableMediaStore(store.clone()), + "default".to_owned(), + holder, + )), + store, + } + } + + /// Acquire a spin lock (see [`CrossProcessStoreLock::spin_lock`]). + pub async fn lock(&self) -> Result, LockStoreError> { + let cross_process_lock_guard = self.cross_process_lock.spin_lock(None).await?; + + Ok(MediaStoreLockGuard { cross_process_lock_guard, store: self.store.deref() }) + } +} + +/// An RAII implementation of a “scoped lock” of an [`MediaStoreLock`]. +/// When this structure is dropped (falls out of scope), the lock will be +/// unlocked. +pub struct MediaStoreLockGuard<'a> { + /// The cross process lock guard. + #[allow(unused)] + cross_process_lock_guard: CrossProcessStoreLockGuard, + + /// A reference to the store. + store: &'a DynMediaStore, +} + +#[cfg(not(tarpaulin_include))] +impl fmt::Debug for MediaStoreLockGuard<'_> { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.debug_struct("MediaStoreLockGuard").finish_non_exhaustive() + } +} + +impl Deref for MediaStoreLockGuard<'_> { + type Target = DynMediaStore; + + fn deref(&self) -> &Self::Target { + self.store + } +} + +/// A type that wraps the [`MediaStore`] but implements [`BackingStore`] to +/// make it usable inside the cross process lock. +#[derive(Clone, Debug)] +struct LockableMediaStore(Arc); + +impl BackingStore for LockableMediaStore { + type LockError = MediaStoreError; + + async fn try_lock( + &self, + lease_duration_ms: u32, + key: &str, + holder: &str, + ) -> std::result::Result { + self.0.try_take_leased_lock(lease_duration_ms, key, holder).await + } +} diff --git a/crates/matrix-sdk-base/src/media/store/traits.rs b/crates/matrix-sdk-base/src/media/store/traits.rs new file mode 100644 index 00000000000..8b287edcc22 --- /dev/null +++ b/crates/matrix-sdk-base/src/media/store/traits.rs @@ -0,0 +1,427 @@ +// Copyright 2025 Kévin Commaille +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Types and traits regarding media caching of the event cache store. + +use std::{fmt, sync::Arc}; + +use async_trait::async_trait; +use matrix_sdk_common::AsyncTraitDeps; +use ruma::{MxcUri, time::SystemTime}; + +#[cfg(doc)] +use crate::media::store::MediaService; +use crate::media::{ + MediaRequestParameters, + store::{IgnoreMediaRetentionPolicy, MediaRetentionPolicy, MediaStoreError}, +}; + +/// An abstract trait that can be used to implement different store backends +/// for the event cache of the SDK. +#[cfg_attr(target_family = "wasm", async_trait(?Send))] +#[cfg_attr(not(target_family = "wasm"), async_trait)] +pub trait MediaStore: AsyncTraitDeps { + /// The error type used by this event cache store. + type Error: fmt::Debug + Into; + + /// Try to take a lock using the given store. + async fn try_take_leased_lock( + &self, + lease_duration_ms: u32, + key: &str, + holder: &str, + ) -> Result; + + /// Add a media file's content in the media store. + /// + /// # Arguments + /// + /// * `request` - The `MediaRequest` of the file. + /// + /// * `content` - The content of the file. + async fn add_media_content( + &self, + request: &MediaRequestParameters, + content: Vec, + ignore_policy: IgnoreMediaRetentionPolicy, + ) -> Result<(), Self::Error>; + + /// Replaces the given media's content key with another one. + /// + /// This should be used whenever a temporary (local) MXID has been used, and + /// it must now be replaced with its actual remote counterpart (after + /// uploading some content, or creating an empty MXC URI). + /// + /// ⚠ No check is performed to ensure that the media formats are consistent, + /// i.e. it's possible to update with a thumbnail key a media that was + /// keyed as a file before. The caller is responsible of ensuring that + /// the replacement makes sense, according to their use case. + /// + /// This should not raise an error when the `from` parameter points to an + /// unknown media, and it should silently continue in this case. + /// + /// # Arguments + /// + /// * `from` - The previous `MediaRequest` of the file. + /// + /// * `to` - The new `MediaRequest` of the file. + async fn replace_media_key( + &self, + from: &MediaRequestParameters, + to: &MediaRequestParameters, + ) -> Result<(), Self::Error>; + + /// Get a media file's content out of the media store. + /// + /// # Arguments + /// + /// * `request` - The `MediaRequest` of the file. + async fn get_media_content( + &self, + request: &MediaRequestParameters, + ) -> Result>, Self::Error>; + + /// Remove a media file's content from the media store. + /// + /// # Arguments + /// + /// * `request` - The `MediaRequest` of the file. + async fn remove_media_content( + &self, + request: &MediaRequestParameters, + ) -> Result<(), Self::Error>; + + /// Get a media file's content associated to an `MxcUri` from the + /// media store. + /// + /// In theory, there could be several files stored using the same URI and a + /// different `MediaFormat`. This API is meant to be used with a media file + /// that has only been stored with a single format. + /// + /// If there are several media files for a given URI in different formats, + /// this API will only return one of them. Which one is left as an + /// implementation detail. + /// + /// # Arguments + /// + /// * `uri` - The `MxcUri` of the media file. + async fn get_media_content_for_uri(&self, uri: &MxcUri) + -> Result>, Self::Error>; + + /// Remove all the media files' content associated to an `MxcUri` from the + /// media store. + /// + /// This should not raise an error when the `uri` parameter points to an + /// unknown media, and it should return an Ok result in this case. + /// + /// # Arguments + /// + /// * `uri` - The `MxcUri` of the media files. + async fn remove_media_content_for_uri(&self, uri: &MxcUri) -> Result<(), Self::Error>; + + /// Set the `MediaRetentionPolicy` to use for deciding whether to store or + /// keep media content. + /// + /// # Arguments + /// + /// * `policy` - The `MediaRetentionPolicy` to use. + async fn set_media_retention_policy( + &self, + policy: MediaRetentionPolicy, + ) -> Result<(), Self::Error>; + + /// Get the current `MediaRetentionPolicy`. + fn media_retention_policy(&self) -> MediaRetentionPolicy; + + /// Set whether the current [`MediaRetentionPolicy`] should be ignored for + /// the media. + /// + /// The change will be taken into account in the next cleanup. + /// + /// # Arguments + /// + /// * `request` - The `MediaRequestParameters` of the file. + /// + /// * `ignore_policy` - Whether the current `MediaRetentionPolicy` should be + /// ignored. + async fn set_ignore_media_retention_policy( + &self, + request: &MediaRequestParameters, + ignore_policy: IgnoreMediaRetentionPolicy, + ) -> Result<(), Self::Error>; + + /// Clean up the media cache with the current `MediaRetentionPolicy`. + /// + /// If there is already an ongoing cleanup, this is a noop. + async fn clean_up_media_cache(&self) -> Result<(), Self::Error>; +} + +/// An abstract trait that can be used to implement different store backends +/// for the media cache of the SDK. +/// +/// The main purposes of this trait are to be able to centralize where we handle +/// [`MediaRetentionPolicy`] by wrapping this in a [`MediaService`], and to +/// simplify the implementation of tests by being able to have complete control +/// over the `SystemTime`s provided to the store. +#[cfg_attr(target_family = "wasm", async_trait(?Send))] +#[cfg_attr(not(target_family = "wasm"), async_trait)] +pub trait MediaStoreInner: AsyncTraitDeps + Clone { + /// The error type used by this media cache store. + type Error: fmt::Debug + fmt::Display + Into; + + /// The persisted media retention policy in the media cache. + async fn media_retention_policy_inner( + &self, + ) -> Result, Self::Error>; + + /// Persist the media retention policy in the media cache. + /// + /// # Arguments + /// + /// * `policy` - The `MediaRetentionPolicy` to persist. + async fn set_media_retention_policy_inner( + &self, + policy: MediaRetentionPolicy, + ) -> Result<(), Self::Error>; + + /// Add a media file's content in the media cache. + /// + /// # Arguments + /// + /// * `request` - The `MediaRequestParameters` of the file. + /// + /// * `content` - The content of the file. + /// + /// * `current_time` - The current time, to set the last access time of the + /// media. + /// + /// * `policy` - The media retention policy, to check whether the media is + /// too big to be cached. + /// + /// * `ignore_policy` - Whether the `MediaRetentionPolicy` should be ignored + /// for this media. This setting should be persisted alongside the media + /// and taken into account whenever the policy is used. + async fn add_media_content_inner( + &self, + request: &MediaRequestParameters, + content: Vec, + current_time: SystemTime, + policy: MediaRetentionPolicy, + ignore_policy: IgnoreMediaRetentionPolicy, + ) -> Result<(), Self::Error>; + + /// Set whether the current [`MediaRetentionPolicy`] should be ignored for + /// the media. + /// + /// If the media of the given request is not found, this should be a noop. + /// + /// The change will be taken into account in the next cleanup. + /// + /// # Arguments + /// + /// * `request` - The `MediaRequestParameters` of the file. + /// + /// * `ignore_policy` - Whether the current `MediaRetentionPolicy` should be + /// ignored. + async fn set_ignore_media_retention_policy_inner( + &self, + request: &MediaRequestParameters, + ignore_policy: IgnoreMediaRetentionPolicy, + ) -> Result<(), Self::Error>; + + /// Get a media file's content out of the media cache. + /// + /// # Arguments + /// + /// * `request` - The `MediaRequestParameters` of the file. + /// + /// * `current_time` - The current time, to update the last access time of + /// the media. + async fn get_media_content_inner( + &self, + request: &MediaRequestParameters, + current_time: SystemTime, + ) -> Result>, Self::Error>; + + /// Get a media file's content associated to an `MxcUri` from the + /// media store. + /// + /// # Arguments + /// + /// * `uri` - The `MxcUri` of the media file. + /// + /// * `current_time` - The current time, to update the last access time of + /// the media. + async fn get_media_content_for_uri_inner( + &self, + uri: &MxcUri, + current_time: SystemTime, + ) -> Result>, Self::Error>; + + /// Clean up the media cache with the given policy. + /// + /// For the integration tests, it is expected that content that does not + /// pass the last access expiry and max file size criteria will be + /// removed first. After that, the remaining cache size should be + /// computed to compare against the max cache size criteria. + /// + /// # Arguments + /// + /// * `policy` - The media retention policy to use for the cleanup. The + /// `cleanup_frequency` will be ignored. + /// + /// * `current_time` - The current time, to be used to check for expired + /// content and to be stored as the time of the last media cache cleanup. + async fn clean_up_media_cache_inner( + &self, + policy: MediaRetentionPolicy, + current_time: SystemTime, + ) -> Result<(), Self::Error>; + + /// The time of the last media cache cleanup. + async fn last_media_cleanup_time_inner(&self) -> Result, Self::Error>; +} + +#[repr(transparent)] +struct EraseMediaStoreError(T); + +#[cfg(not(tarpaulin_include))] +impl fmt::Debug for EraseMediaStoreError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +#[cfg_attr(target_family = "wasm", async_trait(?Send))] +#[cfg_attr(not(target_family = "wasm"), async_trait)] +impl MediaStore for EraseMediaStoreError { + type Error = MediaStoreError; + + async fn try_take_leased_lock( + &self, + lease_duration_ms: u32, + key: &str, + holder: &str, + ) -> Result { + self.0.try_take_leased_lock(lease_duration_ms, key, holder).await.map_err(Into::into) + } + + async fn add_media_content( + &self, + request: &MediaRequestParameters, + content: Vec, + ignore_policy: IgnoreMediaRetentionPolicy, + ) -> Result<(), Self::Error> { + self.0.add_media_content(request, content, ignore_policy).await.map_err(Into::into) + } + + async fn replace_media_key( + &self, + from: &MediaRequestParameters, + to: &MediaRequestParameters, + ) -> Result<(), Self::Error> { + self.0.replace_media_key(from, to).await.map_err(Into::into) + } + + async fn get_media_content( + &self, + request: &MediaRequestParameters, + ) -> Result>, Self::Error> { + self.0.get_media_content(request).await.map_err(Into::into) + } + + async fn remove_media_content( + &self, + request: &MediaRequestParameters, + ) -> Result<(), Self::Error> { + self.0.remove_media_content(request).await.map_err(Into::into) + } + + async fn get_media_content_for_uri( + &self, + uri: &MxcUri, + ) -> Result>, Self::Error> { + self.0.get_media_content_for_uri(uri).await.map_err(Into::into) + } + + async fn remove_media_content_for_uri(&self, uri: &MxcUri) -> Result<(), Self::Error> { + self.0.remove_media_content_for_uri(uri).await.map_err(Into::into) + } + + async fn set_media_retention_policy( + &self, + policy: MediaRetentionPolicy, + ) -> Result<(), Self::Error> { + self.0.set_media_retention_policy(policy).await.map_err(Into::into) + } + + fn media_retention_policy(&self) -> MediaRetentionPolicy { + self.0.media_retention_policy() + } + + async fn set_ignore_media_retention_policy( + &self, + request: &MediaRequestParameters, + ignore_policy: IgnoreMediaRetentionPolicy, + ) -> Result<(), Self::Error> { + self.0.set_ignore_media_retention_policy(request, ignore_policy).await.map_err(Into::into) + } + + async fn clean_up_media_cache(&self) -> Result<(), Self::Error> { + self.0.clean_up_media_cache().await.map_err(Into::into) + } +} + +/// A type-erased [`MediaStore`]. +pub type DynMediaStore = dyn MediaStore; + +/// A type that can be type-erased into `Arc`. +/// +/// This trait is not meant to be implemented directly outside +/// `matrix-sdk-base`, but it is automatically implemented for everything that +/// implements `MediaStore`. +pub trait IntoMediaStore { + #[doc(hidden)] + fn into_event_cache_store(self) -> Arc; +} + +impl IntoMediaStore for Arc { + fn into_event_cache_store(self) -> Arc { + self + } +} + +impl IntoMediaStore for T +where + T: MediaStore + Sized + 'static, +{ + fn into_event_cache_store(self) -> Arc { + Arc::new(EraseMediaStoreError(self)) + } +} + +// Turns a given `Arc` into `Arc` by attaching the +// `MediaStore` impl vtable of `EraseMediaStoreError`. +impl IntoMediaStore for Arc +where + T: MediaStore + 'static, +{ + fn into_event_cache_store(self) -> Arc { + let ptr: *const T = Arc::into_raw(self); + let ptr_erased = ptr as *const EraseMediaStoreError; + // SAFETY: EraseMediaStoreError is repr(transparent) so T and + // EraseMediaStoreError have the same layout and ABI + unsafe { Arc::from_raw(ptr_erased) } + } +} diff --git a/crates/matrix-sdk-base/src/store/mod.rs b/crates/matrix-sdk-base/src/store/mod.rs index 1e5da059da5..93cd96fff77 100644 --- a/crates/matrix-sdk-base/src/store/mod.rs +++ b/crates/matrix-sdk-base/src/store/mod.rs @@ -72,6 +72,7 @@ use crate::{ MinimalRoomMemberEvent, Room, RoomCreateWithCreatorEventContent, RoomStateFilter, SessionMeta, deserialized_responses::DisplayName, event_cache::store as event_cache_store, + media::store as media_store, room::{RoomInfo, RoomInfoNotableUpdate, RoomState}, }; @@ -775,6 +776,7 @@ pub struct StoreConfig { pub(crate) crypto_store: Arc, pub(crate) state_store: Arc, pub(crate) event_cache_store: event_cache_store::EventCacheStoreLock, + pub(crate) media_store: media_store::MediaStoreLock, cross_process_store_locks_holder_name: String, } @@ -800,6 +802,10 @@ impl StoreConfig { event_cache_store::MemoryStore::new(), cross_process_store_locks_holder_name.clone(), ), + media_store: media_store::MediaStoreLock::new( + media_store::MemoryMediaStore::new(), + cross_process_store_locks_holder_name.clone(), + ), cross_process_store_locks_holder_name, } } @@ -830,6 +836,18 @@ impl StoreConfig { ); self } + + /// Set a custom implementation of an `MediaStore`. + pub fn media_store(mut self, media_store: S) -> Self + where + S: media_store::IntoMediaStore, + { + self.media_store = media_store::MediaStoreLock::new( + media_store, + self.cross_process_store_locks_holder_name.clone(), + ); + self + } } #[cfg(test)] diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/builder.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/builder.rs index 8dc19cc2b06..a281cabcbf0 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/builder.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/builder.rs @@ -14,7 +14,6 @@ use std::{rc::Rc, sync::Arc}; -use matrix_sdk_base::event_cache::store::{media::MediaService, MemoryStore}; use matrix_sdk_store_encryption::StoreCipher; use web_sys::DomException; @@ -70,8 +69,6 @@ impl IndexeddbEventCacheStoreBuilder { serializer: IndexeddbEventCacheStoreSerializer::new(IndexeddbSerializer::new( self.store_cipher, )), - media_service: MediaService::new(), - memory_store: MemoryStore::new(), }) } } diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/error.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/error.rs index b8fb67dfdc6..7132143ba9a 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/error.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/error.rs @@ -13,7 +13,7 @@ // limitations under the License use matrix_sdk_base::{ - event_cache::store::{EventCacheStore, EventCacheStoreError, MemoryStore}, + event_cache::store::{EventCacheStore, EventCacheStoreError}, SendOutsideWasm, SyncOutsideWasm, }; use serde::de::Error; @@ -43,8 +43,6 @@ pub enum IndexeddbEventCacheStoreError { NoMaxChunkId, #[error("transaction: {0}")] Transaction(#[from] IndexeddbEventCacheStoreTransactionError), - #[error("media store: {0}")] - MemoryStore(::Error), } impl From for IndexeddbEventCacheStoreError { @@ -68,7 +66,6 @@ impl From for EventCacheStoreError { | NoMaxChunkId | UnableToLoadChunk => Self::InvalidData { details: value.to_string() }, Transaction(inner) => inner.into(), - MemoryStore(inner) => inner, } } } diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/integration_tests.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/integration_tests.rs index 020a546b3c6..ffe6a12406a 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/integration_tests.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/integration_tests.rs @@ -684,30 +684,3 @@ macro_rules! indexeddb_event_cache_store_integration_tests { } }; } - -/// This is a partial copy of -/// [`matrix_sdk_base::event_cache_store_media_integration_tests`] that contains -/// tests for functions of [`EventCacheStoreMedia`] that are implemented by -/// [`IndexeddbEventCacheStore`]. -/// -/// This is useful for adding functionality to [`IndexeddbEventCacheStore`] over -/// multiple pull requests. Once a full implementation [`EventCacheStoreMedia`] -/// exists, this will be replaced with the actual integration tests referenced -/// above. -#[macro_export] -macro_rules! event_cache_store_media_integration_tests { - () => { - mod event_cache_store_media_integration_tests { - use matrix_sdk_base::event_cache::store::media::EventCacheStoreMediaIntegrationTests; - use matrix_sdk_test::async_test; - - use super::get_event_cache_store; - - #[async_test] - async fn test_store_media_retention_policy() { - let event_cache_store_media = get_event_cache_store().await.unwrap(); - event_cache_store_media.test_store_media_retention_policy().await; - } - } - }; -} diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs index f7163827b5b..9978ea906fe 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs @@ -130,17 +130,6 @@ pub mod v1 { pub const EVENTS_RELATION_RELATION_TYPES: &str = "events_relation_relation_type"; pub const GAPS: &str = "gaps"; pub const GAPS_KEY_PATH: &str = "id"; - pub const MEDIA_RETENTION_POLICY_KEY: &str = "media_retention_policy"; - pub const MEDIA: &str = "media"; - pub const MEDIA_KEY_PATH: &str = "id"; - pub const MEDIA_SOURCE: &str = "media_source"; - pub const MEDIA_SOURCE_KEY_PATH: &str = "source"; - pub const MEDIA_CONTENT_SIZE: &str = "media_content_size"; - pub const MEDIA_CONTENT_SIZE_KEY_PATH: &str = "content_size"; - pub const MEDIA_LAST_ACCESS: &str = "media_last_access"; - pub const MEDIA_LAST_ACCESS_KEY_PATH: &str = "last_access"; - pub const MEDIA_RETENTION_METADATA: &str = "media_retention_metadata"; - pub const MEDIA_RETENTION_METADATA_KEY_PATH: &str = "retention_metadata"; } /// Create all object stores and indices for v1 database @@ -150,7 +139,6 @@ pub mod v1 { create_linked_chunks_object_store(db)?; create_events_object_store(db)?; create_gaps_object_store(db)?; - create_media_object_store(db)?; Ok(()) } @@ -229,32 +217,4 @@ pub mod v1 { let _ = db.create_object_store_with_params(keys::GAPS, &object_store_params)?; Ok(()) } - - /// Create an object store for tracking information about media. - /// - /// * Primary Key - `id` - /// * Index - `source` - tracks the [`MediaSource`][1] of the associated - /// media - /// * Index - `content_size` - tracks the size of the media content and - /// whether to ignore the [`MediaRetentionPolicy`][2] - /// * Index - `last_access` - tracks the last time the associated media was - /// accessed - /// * Index - `retention_metadata` - tracks all retention metadata - i.e., - /// joins `content_size` and `last_access` - /// - /// [1]: ruma::events::room::MediaSource - /// [2]: matrix_sdk_base::event_cache::store::media::MediaRetentionPolicy - fn create_media_object_store(db: &IdbDatabase) -> Result<(), DomException> { - let mut object_store_params = IdbObjectStoreParameters::new(); - object_store_params.key_path(Some(&keys::MEDIA_KEY_PATH.into())); - let media = db.create_object_store_with_params(keys::MEDIA, &object_store_params)?; - media.create_index(keys::MEDIA_SOURCE, &keys::MEDIA_SOURCE_KEY_PATH.into())?; - media.create_index(keys::MEDIA_CONTENT_SIZE, &keys::MEDIA_CONTENT_SIZE_KEY_PATH.into())?; - media.create_index(keys::MEDIA_LAST_ACCESS, &keys::MEDIA_LAST_ACCESS_KEY_PATH.into())?; - media.create_index( - keys::MEDIA_RETENTION_METADATA, - &keys::MEDIA_RETENTION_METADATA_KEY_PATH.into(), - )?; - Ok(()) - } } diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 05eee8d291f..400112ce7e1 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -18,21 +18,11 @@ use std::{rc::Rc, time::Duration}; use indexed_db_futures::IdbDatabase; use matrix_sdk_base::{ - event_cache::{ - store::{ - media::{ - EventCacheStoreMedia, IgnoreMediaRetentionPolicy, MediaRetentionPolicy, - MediaService, - }, - EventCacheStore, MemoryStore, - }, - Event, Gap, - }, + event_cache::{store::EventCacheStore, Event, Gap}, linked_chunk::{ ChunkIdentifier, ChunkIdentifierGenerator, ChunkMetadata, LinkedChunkId, Position, RawChunk, Update, }, - media::MediaRequestParameters, timer, }; use ruma::{ @@ -72,15 +62,6 @@ pub struct IndexeddbEventCacheStore { inner: Rc, // A serializer with functionality tailored to `IndexeddbEventCacheStore` serializer: IndexeddbEventCacheStoreSerializer, - // A service for conveniently delegating media-related queries to an `EventCacheStoreMedia` - // implementation - media_service: MediaService, - // An in-memory store for providing temporary implementations for - // functions of `EventCacheStore`. - // - // NOTE: This will be removed once we have IndexedDB-backed implementations for all - // functions in `EventCacheStore`. - memory_store: MemoryStore, } impl IndexeddbEventCacheStore { @@ -515,211 +496,6 @@ impl EventCacheStore for IndexeddbEventCacheStore { transaction.commit().await?; Ok(()) } - - #[instrument(skip_all)] - async fn add_media_content( - &self, - request: &MediaRequestParameters, - content: Vec, - ignore_policy: IgnoreMediaRetentionPolicy, - ) -> Result<(), IndexeddbEventCacheStoreError> { - let _timer = timer!("method"); - self.media_service.add_media_content(self, request, content, ignore_policy).await - } - - #[instrument(skip_all)] - async fn replace_media_key( - &self, - from: &MediaRequestParameters, - to: &MediaRequestParameters, - ) -> Result<(), IndexeddbEventCacheStoreError> { - let _timer = timer!("method"); - self.memory_store - .replace_media_key(from, to) - .await - .map_err(IndexeddbEventCacheStoreError::MemoryStore) - } - - #[instrument(skip_all)] - async fn get_media_content( - &self, - request: &MediaRequestParameters, - ) -> Result>, IndexeddbEventCacheStoreError> { - let _timer = timer!("method"); - self.media_service.get_media_content(self, request).await - } - - #[instrument(skip_all)] - async fn remove_media_content( - &self, - request: &MediaRequestParameters, - ) -> Result<(), IndexeddbEventCacheStoreError> { - let _timer = timer!("method"); - self.memory_store - .remove_media_content(request) - .await - .map_err(IndexeddbEventCacheStoreError::MemoryStore) - } - - #[instrument(skip(self))] - async fn get_media_content_for_uri( - &self, - uri: &MxcUri, - ) -> Result>, IndexeddbEventCacheStoreError> { - let _timer = timer!("method"); - self.media_service.get_media_content_for_uri(self, uri).await - } - - #[instrument(skip(self))] - async fn remove_media_content_for_uri( - &self, - uri: &MxcUri, - ) -> Result<(), IndexeddbEventCacheStoreError> { - let _timer = timer!("method"); - self.memory_store - .remove_media_content_for_uri(uri) - .await - .map_err(IndexeddbEventCacheStoreError::MemoryStore) - } - - #[instrument(skip_all)] - async fn set_media_retention_policy( - &self, - policy: MediaRetentionPolicy, - ) -> Result<(), IndexeddbEventCacheStoreError> { - let _timer = timer!("method"); - self.media_service.set_media_retention_policy(self, policy).await - } - - #[instrument(skip_all)] - fn media_retention_policy(&self) -> MediaRetentionPolicy { - let _timer = timer!("method"); - self.media_service.media_retention_policy() - } - - #[instrument(skip_all)] - async fn set_ignore_media_retention_policy( - &self, - request: &MediaRequestParameters, - ignore_policy: IgnoreMediaRetentionPolicy, - ) -> Result<(), IndexeddbEventCacheStoreError> { - let _timer = timer!("method"); - self.media_service.set_ignore_media_retention_policy(self, request, ignore_policy).await - } - - #[instrument(skip_all)] - async fn clean_up_media_cache(&self) -> Result<(), IndexeddbEventCacheStoreError> { - let _timer = timer!("method"); - self.media_service.clean_up_media_cache(self).await - } -} - -#[cfg(target_family = "wasm")] -#[async_trait::async_trait(?Send)] -impl EventCacheStoreMedia for IndexeddbEventCacheStore { - type Error = IndexeddbEventCacheStoreError; - - #[instrument(skip_all)] - async fn media_retention_policy_inner( - &self, - ) -> Result, IndexeddbEventCacheStoreError> { - let _timer = timer!("method"); - self.transaction(&[MediaRetentionPolicy::OBJECT_STORE], IdbTransactionMode::Readonly)? - .get_media_retention_policy() - .await - .map_err(Into::into) - } - - #[instrument(skip_all)] - async fn set_media_retention_policy_inner( - &self, - policy: MediaRetentionPolicy, - ) -> Result<(), IndexeddbEventCacheStoreError> { - let _timer = timer!("method"); - self.transaction(&[MediaRetentionPolicy::OBJECT_STORE], IdbTransactionMode::Readwrite)? - .put_item(&policy) - .await - .map_err(Into::into) - } - - #[instrument(skip_all)] - async fn add_media_content_inner( - &self, - request: &MediaRequestParameters, - content: Vec, - current_time: SystemTime, - policy: MediaRetentionPolicy, - ignore_policy: IgnoreMediaRetentionPolicy, - ) -> Result<(), IndexeddbEventCacheStoreError> { - let _timer = timer!("method"); - self.memory_store - .add_media_content_inner(request, content, current_time, policy, ignore_policy) - .await - .map_err(IndexeddbEventCacheStoreError::MemoryStore) - } - - #[instrument(skip_all)] - async fn set_ignore_media_retention_policy_inner( - &self, - request: &MediaRequestParameters, - ignore_policy: IgnoreMediaRetentionPolicy, - ) -> Result<(), IndexeddbEventCacheStoreError> { - let _timer = timer!("method"); - self.memory_store - .set_ignore_media_retention_policy_inner(request, ignore_policy) - .await - .map_err(IndexeddbEventCacheStoreError::MemoryStore) - } - - #[instrument(skip_all)] - async fn get_media_content_inner( - &self, - request: &MediaRequestParameters, - current_time: SystemTime, - ) -> Result>, IndexeddbEventCacheStoreError> { - let _timer = timer!("method"); - self.memory_store - .get_media_content_inner(request, current_time) - .await - .map_err(IndexeddbEventCacheStoreError::MemoryStore) - } - - #[instrument(skip_all)] - async fn get_media_content_for_uri_inner( - &self, - uri: &MxcUri, - current_time: SystemTime, - ) -> Result>, IndexeddbEventCacheStoreError> { - let _timer = timer!("method"); - self.memory_store - .get_media_content_for_uri_inner(uri, current_time) - .await - .map_err(IndexeddbEventCacheStoreError::MemoryStore) - } - - #[instrument(skip_all)] - async fn clean_up_media_cache_inner( - &self, - policy: MediaRetentionPolicy, - current_time: SystemTime, - ) -> Result<(), IndexeddbEventCacheStoreError> { - let _timer = timer!("method"); - self.memory_store - .clean_up_media_cache_inner(policy, current_time) - .await - .map_err(IndexeddbEventCacheStoreError::MemoryStore) - } - - #[instrument(skip_all)] - async fn last_media_cleanup_time_inner( - &self, - ) -> Result, IndexeddbEventCacheStoreError> { - let _timer = timer!("method"); - self.memory_store - .last_media_cleanup_time_inner() - .await - .map_err(IndexeddbEventCacheStoreError::MemoryStore) - } } #[cfg(all(test, target_family = "wasm"))] @@ -732,8 +508,7 @@ mod tests { use uuid::Uuid; use crate::{ - event_cache_store::IndexeddbEventCacheStore, event_cache_store_media_integration_tests, - indexeddb_event_cache_store_integration_tests, + event_cache_store::IndexeddbEventCacheStore, indexeddb_event_cache_store_integration_tests, }; mod unencrypted { @@ -750,8 +525,6 @@ mod tests { event_cache_store_integration_tests_time!(); indexeddb_event_cache_store_integration_tests!(); - - event_cache_store_media_integration_tests!(); } mod encrypted { @@ -768,7 +541,5 @@ mod tests { event_cache_store_integration_tests_time!(); indexeddb_event_cache_store_integration_tests!(); - - event_cache_store_media_integration_tests!(); } } diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/mod.rs index d3c6faa30c2..d15cea801d8 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/mod.rs @@ -28,7 +28,6 @@ use crate::{ serializer::IndexeddbSerializer, }; -pub mod foreign; pub mod traits; pub mod types; diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs index 3542980a62c..c9f69760f5d 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/types.rs @@ -29,30 +29,20 @@ use std::{sync::LazyLock, time::Duration}; -use matrix_sdk_base::{ - event_cache::store::media::{IgnoreMediaRetentionPolicy, MediaRetentionPolicy}, - linked_chunk::{ChunkIdentifier, LinkedChunkId}, - media::{MediaRequestParameters, UniqueKey}, -}; +use matrix_sdk_base::linked_chunk::{ChunkIdentifier, LinkedChunkId}; use matrix_sdk_crypto::CryptoStoreError; -use ruma::{ - events::{relation::RelationType, room::MediaSource}, - EventId, OwnedEventId, RoomId, -}; +use ruma::{events::relation::RelationType, EventId, OwnedEventId, RoomId}; use serde::{Deserialize, Serialize}; use thiserror::Error; use crate::{ event_cache_store::{ migrations::current::keys, - serializer::{ - foreign::ignore_media_retention_policy, - traits::{ - Indexed, IndexedKey, IndexedKeyBounds, IndexedKeyComponentBounds, - IndexedPrefixKeyBounds, IndexedPrefixKeyComponentBounds, - }, + serializer::traits::{ + Indexed, IndexedKey, IndexedKeyBounds, IndexedKeyComponentBounds, + IndexedPrefixKeyBounds, IndexedPrefixKeyComponentBounds, }, - types::{Chunk, Event, Gap, Lease, Media, Position}, + types::{Chunk, Event, Gap, Lease, Position}, }, serializer::{IndexeddbSerializer, MaybeEncrypted}, }; @@ -154,39 +144,6 @@ static INDEXED_KEY_UPPER_EVENT_POSITION: LazyLock = LazyLock::new(|| P index: INDEXED_KEY_UPPER_EVENT_INDEX, }); -/// An [`IndexedMediaContentSize`] set to it's minimal value - i.e., `0`. -/// -/// This value is useful for constructing a key range over all keys which -/// contain [`IndexedMediaContentSize`] values when used in conjunction with -/// [`INDEXED_KEY_UPPER_MEDIA_CONTENT_SIZE`]. -const INDEXED_KEY_LOWER_MEDIA_CONTENT_SIZE: IndexedMediaContentSize = 0; - -/// An [`IndexedMediaContentSize`] set to [`js_sys::Number::MAX_SAFE_INTEGER`]. -/// Note that this restricts the size of [`IndexedMedia::content`], which -/// ultimately restricts the size of [`Media::content`]. -/// -/// This value is useful for constructing a key range over all keys which -/// contain [`IndexedMediaContentSize`] values when used in conjunction with -/// [`INDEXED_KEY_LOWER_MEDIA_CONTENT_SIZE`]. -const INDEXED_KEY_UPPER_MEDIA_CONTENT_SIZE: IndexedMediaContentSize = - js_sys::Number::MAX_SAFE_INTEGER as usize; - -/// The minimum possible [`Duration`]. -/// -/// This value is useful for constructing a key range over all keys which -/// contain time-related values when used in conjunction with -/// [`INDEXED_KEY_UPPER_DURATION`]. -const INDEXED_KEY_LOWER_DURATION: Duration = Duration::ZERO; - -/// A [`Duration`] constructed with [`js_sys::Number::MAX_SAFE_INTEGER`] -/// seconds. -/// -/// This value is useful for constructing a key range over all keys which -/// contain time-related values in seconds when used in conjunction with -/// [`INDEXED_KEY_LOWER_DURATION`]. -const INDEXED_KEY_UPPER_DURATION_SECONDS: Duration = - Duration::from_secs(js_sys::Number::MAX_SAFE_INTEGER as u64); - /// Representation of a range of keys of type `K`. This is loosely /// correlated with [IDBKeyRange][1], with a few differences. /// @@ -318,21 +275,6 @@ pub type IndexedEventContent = MaybeEncrypted; /// A (possibly) encrypted representation of a [`Gap`] pub type IndexedGapContent = MaybeEncrypted; -/// A (possibly) encrypted representation of a [`MediaRetentionPolicy`] -pub type IndexedMediaRetentionPolicyContent = MaybeEncrypted; - -/// A (possibly) encrypted representation of a [`MediaMetadata`][1] -/// -/// [1]: crate::event_cache_store::types::MediaMetadata -pub type IndexedMediaMetadata = MaybeEncrypted; - -/// A (possibly) encrypted representation of [`Media::content`] -pub type IndexedMediaContent = Vec; - -/// A representation of the size in bytes of the [`IndexedMediaContent`] which -/// is suitable for use in an IndexedDB key -pub type IndexedMediaContentSize = usize; - /// A representation of time in seconds since the [Unix /// Epoch](std::time::UNIX_EPOCH) which is suitable for use in an IndexedDB key pub type IndexedSecondsSinceUnixEpoch = u64; @@ -895,334 +837,3 @@ impl<'a> IndexedPrefixKeyComponentBounds<'a, Gap, LinkedChunkId<'a>> for Indexed ) } } - -/// Represents the [`MediaRetentionPolicy`] record in the [`CORE`][1] object -/// store. -/// -/// [1]: crate::event_cache_store::migrations::v1::create_core_object_store -#[derive(Debug, Serialize, Deserialize)] -pub struct IndexedMediaRetentionPolicy { - /// The primary key of the object store. - pub id: IndexedCoreIdKey, - /// The (possibly) encrypted content - i.e., a [`MediaRetentionPolicy`]. - pub content: IndexedMediaRetentionPolicyContent, -} - -impl Indexed for MediaRetentionPolicy { - const OBJECT_STORE: &'static str = keys::CORE; - - type IndexedType = IndexedMediaRetentionPolicy; - type Error = CryptoStoreError; - - fn to_indexed( - &self, - serializer: &IndexeddbSerializer, - ) -> Result { - Ok(Self::IndexedType { - id: >::encode((), serializer), - content: serializer.maybe_encrypt_value(self)?, - }) - } - - fn from_indexed( - indexed: Self::IndexedType, - serializer: &IndexeddbSerializer, - ) -> Result { - serializer.maybe_decrypt_value(indexed.content) - } -} - -impl IndexedKey for IndexedCoreIdKey { - type KeyComponents<'a> = (); - - fn encode(components: Self::KeyComponents<'_>, serializer: &IndexeddbSerializer) -> Self { - serializer.encode_key_as_string(keys::CORE, keys::MEDIA_RETENTION_POLICY_KEY) - } -} - -/// Represents the [`MEDIA`][1] object store. -/// -/// [1]: crate::event_cache_store::migrations::v1::create_media_object_store -#[derive(Debug, Serialize, Deserialize)] -pub struct IndexedMedia { - /// The primary key of the object store - pub id: IndexedMediaIdKey, - /// The (possibly) hashed source of the media derived from - /// [`MediaSource::unique_key`] - pub source: IndexedMediaSourceKey, - /// The size (in bytes) of the media content and whether to ignore the - /// [`MediaRetentionPolicy`] - pub content_size: IndexedMediaContentSizeKey, - /// The last time the media was accessed and whether to ignore the - /// [`MediaRetentionPolicy`] - pub last_access: IndexedMediaLastAccessKey, - /// The last the media was accessed, the size (in bytes) of the media - /// content, and whether to ignore the [`MediaRetentionPolicy`] - pub retention_metadata: IndexedMediaRetentionMetadataKey, - /// The (possibly) encrypted metadata - i.e., [`MediaMetadata`][1] - /// - /// [1]: crate::event_cache_store::types::MediaMetadata - pub metadata: IndexedMediaMetadata, - /// The (possibly) encrypted content - i.e., [`Media::content`] - pub content: IndexedMediaContent, -} - -#[derive(Debug, Error)] -pub enum IndexedMediaError { - #[error("crypto store: {0}")] - CryptoStore(#[from] CryptoStoreError), - #[error("serialization: {0}")] - Serialization(#[from] rmp_serde::encode::Error), - #[error("deserialization: {0}")] - Deserialization(#[from] rmp_serde::decode::Error), -} - -impl Indexed for Media { - const OBJECT_STORE: &'static str = keys::MEDIA; - - type IndexedType = IndexedMedia; - type Error = IndexedMediaError; - - fn to_indexed( - &self, - serializer: &IndexeddbSerializer, - ) -> Result { - let content = rmp_serde::to_vec_named(&serializer.maybe_encrypt_value(&self.content)?)?; - Ok(Self::IndexedType { - id: >::encode( - &self.metadata.request_parameters, - serializer, - ), - source: >::encode( - &self.metadata.request_parameters.source, - serializer, - ), - content_size: IndexedMediaContentSizeKey::encode( - (self.metadata.ignore_policy, content.len()), - serializer, - ), - last_access: IndexedMediaLastAccessKey::encode( - (self.metadata.ignore_policy, self.metadata.last_access), - serializer, - ), - retention_metadata: IndexedMediaRetentionMetadataKey::encode( - (self.metadata.ignore_policy, self.metadata.last_access, content.len()), - serializer, - ), - metadata: serializer.maybe_encrypt_value(&self.metadata)?, - content, - }) - } - - fn from_indexed( - indexed: Self::IndexedType, - serializer: &IndexeddbSerializer, - ) -> Result { - Ok(Self { - metadata: serializer.maybe_decrypt_value(indexed.metadata)?, - content: serializer.maybe_decrypt_value(rmp_serde::from_slice(&indexed.content)?)?, - }) - } -} - -/// The primary key of the [`MEDIA`][1] object store, which is constructed from: -/// -/// - The (possibly) hashed value returned by -/// [`MediaRequestParameters::unique_key`] -/// -/// [1]: crate::event_cache_store::migrations::v1::create_media_object_store -#[derive(Debug, Serialize, Deserialize)] -pub struct IndexedMediaIdKey(String); - -impl IndexedKey for IndexedMediaIdKey { - type KeyComponents<'a> = &'a MediaRequestParameters; - - fn encode(components: Self::KeyComponents<'_>, serializer: &IndexeddbSerializer) -> Self { - Self(serializer.encode_key_as_string(keys::MEDIA, components.unique_key())) - } -} - -/// The value associated with the [`source`](IndexedMedia::source) index of the -/// [`MEDIA`][1] object store, which is constructed from: -/// -/// - The (possibly) hashed value returned by [`MediaSource::unique_key`] -/// -/// [1]: crate::event_cache_store::migrations::v1::create_media_object_store -#[derive(Debug, Serialize, Deserialize)] -pub struct IndexedMediaSourceKey(String); - -impl IndexedKey for IndexedMediaSourceKey { - type KeyComponents<'a> = &'a MediaSource; - - fn encode(components: Self::KeyComponents<'_>, serializer: &IndexeddbSerializer) -> Self { - Self(serializer.encode_key_as_string(keys::MEDIA_SOURCE, components.unique_key())) - } -} - -/// The value associated with the [`content_size`](IndexedMedia::content_size) -/// index of the [`MEDIA`][1] object store, which is constructed from: -/// -/// - The value of [`IgnoreMediaRetentionPolicy`] -/// - The size in bytes of the associated [`IndexedMedia::content`] -/// -/// [1]: crate::event_cache_store::migrations::v1::create_media_object_store -#[derive(Debug, Serialize, Deserialize)] -pub struct IndexedMediaContentSizeKey( - #[serde(with = "ignore_media_retention_policy")] IgnoreMediaRetentionPolicy, - IndexedMediaContentSize, -); - -impl IndexedMediaContentSizeKey { - /// Returns whether the associated [`IndexedMedia`] record should ignore the - /// global [`MediaRetentionPolicy`] - pub fn ignore_policy(&self) -> bool { - self.0.is_yes() - } - - /// Returns the size in bytes of the associated [`IndexedMedia::content`] - pub fn content_size(&self) -> usize { - self.1 - } -} - -impl IndexedKey for IndexedMediaContentSizeKey { - type KeyComponents<'a> = (IgnoreMediaRetentionPolicy, IndexedMediaContentSize); - - fn encode( - (ignore_policy, content_size): Self::KeyComponents<'_>, - _: &IndexeddbSerializer, - ) -> Self { - Self(ignore_policy, content_size) - } -} - -impl IndexedKeyComponentBounds for IndexedMediaContentSizeKey { - fn lower_key_components() -> Self::KeyComponents<'static> { - Self::lower_key_components_with_prefix(IgnoreMediaRetentionPolicy::No) - } - - fn upper_key_components() -> Self::KeyComponents<'static> { - Self::lower_key_components_with_prefix(IgnoreMediaRetentionPolicy::Yes) - } -} - -impl<'a> IndexedPrefixKeyComponentBounds<'a, Media, IgnoreMediaRetentionPolicy> - for IndexedMediaContentSizeKey -{ - fn lower_key_components_with_prefix( - prefix: IgnoreMediaRetentionPolicy, - ) -> Self::KeyComponents<'a> { - (prefix, INDEXED_KEY_LOWER_MEDIA_CONTENT_SIZE) - } - - fn upper_key_components_with_prefix( - prefix: IgnoreMediaRetentionPolicy, - ) -> Self::KeyComponents<'a> { - (prefix, INDEXED_KEY_UPPER_MEDIA_CONTENT_SIZE) - } -} - -/// The value associated with the [`last_access`](IndexedMedia::last_access) -/// index of the [`MEDIA`][1] object store, which is constructed from: -/// -/// - The value of [`IgnoreMediaRetentionPolicy`] -/// - The last time the associated [`IndexedMedia`] was accessed (in seconds -/// since the Unix Epoch) -/// -/// [1]: crate::event_cache_store::migrations::v1::create_media_object_store -#[derive(Debug, Serialize, Deserialize)] -pub struct IndexedMediaLastAccessKey( - #[serde(with = "ignore_media_retention_policy")] IgnoreMediaRetentionPolicy, - IndexedSecondsSinceUnixEpoch, -); - -impl IndexedKey for IndexedMediaLastAccessKey { - type KeyComponents<'a> = (IgnoreMediaRetentionPolicy, Duration); - - fn encode( - (ignore_policy, last_access): Self::KeyComponents<'_>, - _: &IndexeddbSerializer, - ) -> Self { - Self(ignore_policy, last_access.as_secs()) - } -} - -impl IndexedKeyComponentBounds for IndexedMediaLastAccessKey { - fn lower_key_components() -> Self::KeyComponents<'static> { - Self::lower_key_components_with_prefix(IgnoreMediaRetentionPolicy::No) - } - - fn upper_key_components() -> Self::KeyComponents<'static> { - Self::lower_key_components_with_prefix(IgnoreMediaRetentionPolicy::Yes) - } -} - -impl<'a> IndexedPrefixKeyComponentBounds<'a, Media, IgnoreMediaRetentionPolicy> - for IndexedMediaLastAccessKey -{ - fn lower_key_components_with_prefix( - prefix: IgnoreMediaRetentionPolicy, - ) -> Self::KeyComponents<'a> { - (prefix, INDEXED_KEY_LOWER_DURATION) - } - - fn upper_key_components_with_prefix( - prefix: IgnoreMediaRetentionPolicy, - ) -> Self::KeyComponents<'a> { - (prefix, INDEXED_KEY_UPPER_DURATION_SECONDS) - } -} - -/// The value associated with the -/// [`retention_metadata`](IndexedMedia::retention_metadata) index of the -/// [`MEDIA`][1] object store, which is constructed from: -/// -/// - The value of [`IgnoreMediaRetentionPolicy`] -/// - The last time the associated [`IndexedMedia`] was accessed (in seconds -/// since the Unix Epoch) -/// - The size in bytes of the associated [`IndexedMedia::content`] -/// -/// [1]: crate::event_cache_store::migrations::v1::create_media_object_store -#[derive(Debug, Serialize, Deserialize)] -pub struct IndexedMediaRetentionMetadataKey( - #[serde(with = "ignore_media_retention_policy")] IgnoreMediaRetentionPolicy, - IndexedSecondsSinceUnixEpoch, - IndexedMediaContentSize, -); - -impl IndexedKey for IndexedMediaRetentionMetadataKey { - type KeyComponents<'a> = (IgnoreMediaRetentionPolicy, Duration, IndexedMediaContentSize); - - fn encode( - (ignore_policy, last_access, content_size): Self::KeyComponents<'_>, - _: &IndexeddbSerializer, - ) -> Self { - Self(ignore_policy, last_access.as_secs(), content_size) - } -} - -impl IndexedKeyComponentBounds for IndexedMediaRetentionMetadataKey { - fn lower_key_components() -> Self::KeyComponents<'static> { - Self::lower_key_components_with_prefix(IgnoreMediaRetentionPolicy::No) - } - - fn upper_key_components() -> Self::KeyComponents<'static> { - Self::lower_key_components_with_prefix(IgnoreMediaRetentionPolicy::Yes) - } -} - -impl<'a> IndexedPrefixKeyComponentBounds<'a, Media, IgnoreMediaRetentionPolicy> - for IndexedMediaRetentionMetadataKey -{ - fn lower_key_components_with_prefix( - prefix: IgnoreMediaRetentionPolicy, - ) -> Self::KeyComponents<'a> { - (prefix, INDEXED_KEY_LOWER_DURATION, INDEXED_KEY_LOWER_MEDIA_CONTENT_SIZE) - } - - fn upper_key_components_with_prefix( - prefix: IgnoreMediaRetentionPolicy, - ) -> Self::KeyComponents<'a> { - (prefix, INDEXED_KEY_UPPER_DURATION_SECONDS, INDEXED_KEY_UPPER_MEDIA_CONTENT_SIZE) - } -} diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs index 3ea822f9d8d..3e58b32eeb4 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs @@ -14,10 +14,7 @@ use indexed_db_futures::{prelude::IdbTransaction, IdbQuerySource}; use matrix_sdk_base::{ - event_cache::{ - store::{media::MediaRetentionPolicy, EventCacheStoreError}, - Event as RawEvent, Gap as RawGap, - }, + event_cache::{store::EventCacheStoreError, Event as RawEvent, Gap as RawGap}, linked_chunk::{ChunkContent, ChunkIdentifier, LinkedChunkId, RawChunk}, }; use ruma::{events::relation::RelationType, EventId, OwnedEventId, RoomId}; @@ -865,11 +862,4 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { ) -> Result<(), IndexeddbEventCacheStoreTransactionError> { self.delete_items_by_linked_chunk_id::(linked_chunk_id).await } - - /// Query IndexedDB for the stored [`MediaRetentionPolicy`] - pub async fn get_media_retention_policy( - &self, - ) -> Result, IndexeddbEventCacheStoreTransactionError> { - self.get_item_by_key_components::(()).await - } } diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/types.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/types.rs index f6b3aa0c39a..62523fadb8c 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/types.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/types.rs @@ -16,9 +16,8 @@ use std::time::Duration; use matrix_sdk_base::{ deserialized_responses::TimelineEvent, - event_cache::store::{extract_event_relation, media::IgnoreMediaRetentionPolicy}, + event_cache::store::extract_event_relation, linked_chunk::{ChunkIdentifier, LinkedChunkId, OwnedLinkedChunkId}, - media::MediaRequestParameters, }; use ruma::{OwnedEventId, OwnedRoomId, RoomId}; use serde::{Deserialize, Serialize}; @@ -222,27 +221,3 @@ pub struct Gap { /// "end" field of a `/messages` response. pub prev_token: String, } - -/// A representation of media data which can be stored in IndexedDB. -#[derive(Debug, Serialize, Deserialize)] -pub struct Media { - /// The metadata associated with [`Media::content`] - pub metadata: MediaMetadata, - /// The content of the media - pub content: Vec, -} - -/// A representation of media metadata which can be stored in IndexedDB. -#[derive(Debug, Serialize, Deserialize)] -pub struct MediaMetadata { - /// The parameters specifying the type and source of the media contained in - /// [`Media::content`] - pub request_parameters: MediaRequestParameters, - /// The last time the media was accessed in IndexedDB - pub last_access: Duration, - /// Whether to ignore the [`MediaRetentionPolicy`][1] stored in IndexedDB - /// - /// [1]: matrix_sdk_base::event_cache::store::media::MediaRetentionPolicy - #[serde(with = "crate::event_cache_store::serializer::foreign::ignore_media_retention_policy")] - pub ignore_policy: IgnoreMediaRetentionPolicy, -} diff --git a/crates/matrix-sdk-indexeddb/src/lib.rs b/crates/matrix-sdk-indexeddb/src/lib.rs index df4ee66e49b..9cf630e5fb0 100644 --- a/crates/matrix-sdk-indexeddb/src/lib.rs +++ b/crates/matrix-sdk-indexeddb/src/lib.rs @@ -8,6 +8,8 @@ use thiserror::Error; mod crypto_store; #[cfg(feature = "event-cache-store")] mod event_cache_store; +#[cfg(feature = "event-cache-store")] +mod media_store; mod safe_encode; #[cfg(feature = "e2e-encryption")] mod serialize_bool_for_indexeddb; diff --git a/crates/matrix-sdk-indexeddb/src/media_store/builder.rs b/crates/matrix-sdk-indexeddb/src/media_store/builder.rs new file mode 100644 index 00000000000..cf4bae249ba --- /dev/null +++ b/crates/matrix-sdk-indexeddb/src/media_store/builder.rs @@ -0,0 +1,77 @@ +// Copyright 2025 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use std::{rc::Rc, sync::Arc}; + +use matrix_sdk_base::media::store::{MediaService, MemoryMediaStore}; +use matrix_sdk_store_encryption::StoreCipher; +use web_sys::DomException; + +use crate::{ + media_store::{ + error::IndexeddbMediaStoreError, migrations::open_and_upgrade_db, + serializer::IndexeddbMediaStoreSerializer, IndexeddbMediaStore, + }, + serializer::IndexeddbSerializer, +}; + +/// A type for conveniently building an [`IndexeddbMediaStore`] +pub struct IndexeddbMediaStoreBuilder { + // The name of the IndexedDB database which will be opened + database_name: String, + // The store cipher, if any, to use when encrypting data + // before it is persisted to the IndexedDB database + store_cipher: Option>, +} + +impl Default for IndexeddbMediaStoreBuilder { + fn default() -> Self { + Self { database_name: Self::DEFAULT_DATABASE_NAME.to_owned(), store_cipher: None } + } +} + +impl IndexeddbMediaStoreBuilder { + /// The default name of the IndexedDB database used to back the + /// [`IndexeddbEventCacheStore`] + pub const DEFAULT_DATABASE_NAME: &'static str = "media"; + + /// Sets the name of the IndexedDB database which will be opened. This + /// defaults to [`Self::DEFAULT_DATABASE_NAME`]. + pub fn database_name(mut self, name: String) -> Self { + self.database_name = name; + self + } + + /// Sets the store cipher to use when encrypting data before it is persisted + /// to the IndexedDB database. By default, no store cipher is used - + /// i.e., data is not encrypted before it is persisted. + pub fn store_cipher(mut self, store_cipher: Arc) -> Self { + self.store_cipher = Some(store_cipher); + self + } + + /// Opens the IndexedDB database with the provided name. If successfully + /// opened, builds the [`IndexeddbMediaStore`] with that database + /// and the provided store cipher. + pub async fn build(self) -> Result { + Ok(IndexeddbMediaStore { + inner: Rc::new(open_and_upgrade_db(&self.database_name).await?), + serializer: IndexeddbMediaStoreSerializer::new(IndexeddbSerializer::new( + self.store_cipher, + )), + media_service: MediaService::new(), + memory_store: MemoryMediaStore::new(), + }) + } +} diff --git a/crates/matrix-sdk-indexeddb/src/media_store/error.rs b/crates/matrix-sdk-indexeddb/src/media_store/error.rs new file mode 100644 index 00000000000..6ccd037cd1b --- /dev/null +++ b/crates/matrix-sdk-indexeddb/src/media_store/error.rs @@ -0,0 +1,53 @@ +// Copyright 2025 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use matrix_sdk_base::{ + media::store::{MediaStore, MediaStoreError, MemoryMediaStore}, + SendOutsideWasm, SyncOutsideWasm, +}; +use thiserror::Error; + +use crate::media_store::transaction::IndexeddbMediaStoreTransactionError; + +/// A trait that combines the necessary traits needed for asynchronous runtimes, +/// but excludes them when running in a web environment - i.e., when +/// `#[cfg(target_family = "wasm")]`. +pub trait AsyncErrorDeps: std::error::Error + SendOutsideWasm + SyncOutsideWasm + 'static {} + +impl AsyncErrorDeps for T where T: std::error::Error + SendOutsideWasm + SyncOutsideWasm + 'static +{} + +#[derive(Debug, Error)] +pub enum IndexeddbMediaStoreError { + #[error("media store: {0}")] + MemoryStore(::Error), + + #[error("transaction: {0}")] + Transaction(#[from] IndexeddbMediaStoreTransactionError), + + #[error("DomException {name} ({code}): {message}")] + DomException { name: String, message: String, code: u16 }, +} + +impl From for MediaStoreError { + fn from(value: IndexeddbMediaStoreError) -> Self { + Self::Backend(Box::new(value)) + } +} + +impl From for IndexeddbMediaStoreError { + fn from(value: web_sys::DomException) -> Self { + Self::DomException { name: value.name(), message: value.message(), code: value.code() } + } +} diff --git a/crates/matrix-sdk-indexeddb/src/media_store/migrations.rs b/crates/matrix-sdk-indexeddb/src/media_store/migrations.rs new file mode 100644 index 00000000000..ddb4f0e6ae7 --- /dev/null +++ b/crates/matrix-sdk-indexeddb/src/media_store/migrations.rs @@ -0,0 +1,181 @@ +// Copyright 2025 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use indexed_db_futures::{ + idb_object_store::IdbObjectStoreParameters, request::IdbOpenDbRequestLike, IdbDatabase, + IdbVersionChangeEvent, +}; +use thiserror::Error; +use wasm_bindgen::JsValue; +use web_sys::{DomException, IdbIndexParameters}; + +/// The current version and keys used in the database. +pub mod current { + use super::{v1, Version}; + + pub const VERSION: Version = Version::V1; + pub use v1::keys; +} + +/// Opens a connection to the IndexedDB database and takes care of upgrading it +/// if necessary. +#[allow(unused)] +pub async fn open_and_upgrade_db(name: &str) -> Result { + let mut request = IdbDatabase::open_u32(name, current::VERSION as u32)?; + request.set_on_upgrade_needed(Some(|event: &IdbVersionChangeEvent| -> Result<(), JsValue> { + let mut version = + Version::try_from(event.old_version() as u32).map_err(DomException::from)?; + while version < current::VERSION { + version = match version.upgrade(event.db())? { + Some(next) => next, + None => current::VERSION, /* No more upgrades to apply, jump forward! */ + }; + } + Ok(()) + })); + request.await +} + +/// Represents the version of the IndexedDB database. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[repr(u32)] +pub enum Version { + /// Version 0 of the database, for details see [`v0`] + V0 = 0, + /// Version 1 of the database, for details see [`v1`] + V1 = 1, +} + +impl Version { + /// Upgrade the database to the next version, if one exists. + pub fn upgrade(self, db: &IdbDatabase) -> Result, DomException> { + match self { + Self::V0 => v0::upgrade(db).map(Some), + Self::V1 => Ok(None), + } + } +} + +#[derive(Debug, Error)] +#[error("unknown version: {0}")] +pub struct UnknownVersionError(u32); + +impl TryFrom for Version { + type Error = UnknownVersionError; + + fn try_from(value: u32) -> Result { + match value { + 0 => Ok(Version::V0), + 1 => Ok(Version::V1), + v => Err(UnknownVersionError(v)), + } + } +} + +impl From for DomException { + fn from(value: UnknownVersionError) -> Self { + let message = format!("unknown version: {}", value.0); + let name = "UnknownVersionError"; + match DomException::new_with_message_and_name(&message, name) { + Ok(inner) => inner, + Err(err) => err.into(), + } + } +} + +pub mod v0 { + use super::*; + + /// Upgrade database from `v0` to `v1` + pub fn upgrade(db: &IdbDatabase) -> Result { + v1::create_object_stores(db)?; + Ok(Version::V1) + } +} + +pub mod v1 { + use super::*; + + pub mod keys { + pub const CORE: &str = "core"; + pub const CORE_KEY_PATH: &str = "id"; + pub const LEASES: &str = "leases"; + pub const LEASES_KEY_PATH: &str = "id"; + pub const MEDIA_RETENTION_POLICY_KEY: &str = "media_retention_policy"; + pub const MEDIA: &str = "media"; + pub const MEDIA_KEY_PATH: &str = "id"; + pub const MEDIA_SOURCE: &str = "media_source"; + pub const MEDIA_SOURCE_KEY_PATH: &str = "source"; + pub const MEDIA_CONTENT_SIZE: &str = "media_content_size"; + pub const MEDIA_CONTENT_SIZE_KEY_PATH: &str = "content_size"; + pub const MEDIA_LAST_ACCESS: &str = "media_last_access"; + pub const MEDIA_LAST_ACCESS_KEY_PATH: &str = "last_access"; + pub const MEDIA_RETENTION_METADATA: &str = "media_retention_metadata"; + pub const MEDIA_RETENTION_METADATA_KEY_PATH: &str = "retention_metadata"; + } + + /// Create all object stores and indices for v1 database + pub fn create_object_stores(db: &IdbDatabase) -> Result<(), DomException> { + create_core_object_store(db)?; + create_lease_object_store(db)?; + create_media_object_store(db)?; + Ok(()) + } + + /// Create an object store for tracking miscellaneous information + /// + /// * Primary Key - `id` + fn create_core_object_store(db: &IdbDatabase) -> Result<(), DomException> { + let mut object_store_params = IdbObjectStoreParameters::new(); + object_store_params.key_path(Some(&keys::CORE_KEY_PATH.into())); + let _ = db.create_object_store_with_params(keys::CORE, &object_store_params)?; + Ok(()) + } + + /// Create an object store tracking leases on time-based locks + fn create_lease_object_store(db: &IdbDatabase) -> Result<(), DomException> { + let mut object_store_params = IdbObjectStoreParameters::new(); + object_store_params.key_path(Some(&keys::LEASES_KEY_PATH.into())); + let _ = db.create_object_store_with_params(keys::LEASES, &object_store_params)?; + Ok(()) + } + + /// Create an object store for tracking information about media. + /// + /// * Primary Key - `id` + /// * Index - `source` - tracks the [`MediaSource`][1] of the associated + /// media + /// * Index - `content_size` - tracks the size of the media content and + /// whether to ignore the [`MediaRetentionPolicy`][2] + /// * Index - `last_access` - tracks the last time the associated media was + /// accessed + /// * Index - `retention_metadata` - tracks all retention metadata - i.e., + /// joins `content_size` and `last_access` + /// + /// [1]: ruma::events::room::MediaSource + /// [2]: matrix_sdk_base::media::store::MediaRetentionPolicy + fn create_media_object_store(db: &IdbDatabase) -> Result<(), DomException> { + let mut object_store_params = IdbObjectStoreParameters::new(); + object_store_params.key_path(Some(&keys::MEDIA_KEY_PATH.into())); + let media = db.create_object_store_with_params(keys::MEDIA, &object_store_params)?; + media.create_index(keys::MEDIA_SOURCE, &keys::MEDIA_SOURCE_KEY_PATH.into())?; + media.create_index(keys::MEDIA_CONTENT_SIZE, &keys::MEDIA_CONTENT_SIZE_KEY_PATH.into())?; + media.create_index(keys::MEDIA_LAST_ACCESS, &keys::MEDIA_LAST_ACCESS_KEY_PATH.into())?; + media.create_index( + keys::MEDIA_RETENTION_METADATA, + &keys::MEDIA_RETENTION_METADATA_KEY_PATH.into(), + )?; + Ok(()) + } +} diff --git a/crates/matrix-sdk-indexeddb/src/media_store/mod.rs b/crates/matrix-sdk-indexeddb/src/media_store/mod.rs new file mode 100644 index 00000000000..6bb5edafbbc --- /dev/null +++ b/crates/matrix-sdk-indexeddb/src/media_store/mod.rs @@ -0,0 +1,374 @@ +// Copyright 2025 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +mod builder; +mod error; +mod migrations; +mod serializer; +mod transaction; +mod types; +use std::{rc::Rc, time::Duration}; + +use indexed_db_futures::IdbDatabase; +use matrix_sdk_base::{ + media::{ + store::{ + IgnoreMediaRetentionPolicy, MediaRetentionPolicy, MediaService, MediaStore, + MediaStoreInner, MemoryMediaStore, + }, + MediaRequestParameters, + }, + timer, +}; +use ruma::{time::SystemTime, MilliSecondsSinceUnixEpoch, MxcUri}; +use serializer::IndexeddbMediaStoreSerializer; +use tracing::instrument; +use web_sys::IdbTransactionMode; + +use crate::media_store::{ + builder::IndexeddbMediaStoreBuilder, error::IndexeddbMediaStoreError, + serializer::traits::Indexed, transaction::IndexeddbMediaStoreTransaction, types::Lease, +}; + +/// A type for providing an IndexedDB implementation of [`MediaStore`][1]. +/// This is meant to be used as a backend to [`MediaStore`][1] in browser +/// contexts. +/// +/// [1]: matrix_sdk_base::media::store::MediaStore +#[derive(Debug, Clone)] +pub struct IndexeddbMediaStore { + // A handle to the IndexedDB database + inner: Rc, + // A serializer with functionality tailored to `IndexeddbMediaStore` + serializer: IndexeddbMediaStoreSerializer, + // A service for conveniently delegating media-related queries to an `MediaStoreInner` + // implementation + media_service: MediaService, + // An in-memory store for providing temporary implementations for + // functions of `MediaStore`. + // + // NOTE: This will be removed once we have IndexedDB-backed implementations for all + // functions in `MediaStore`. + memory_store: MemoryMediaStore, +} + +impl IndexeddbMediaStore { + /// Provides a type with which to conveniently build an + /// [`IndexeddbMediaStore`] + pub fn builder() -> IndexeddbMediaStoreBuilder { + IndexeddbMediaStoreBuilder::default() + } + + /// Initializes a new transaction on the underlying IndexedDB database and + /// returns a handle which can be used to combine database operations + /// into an atomic unit. + pub fn transaction<'a>( + &'a self, + stores: &[&str], + mode: IdbTransactionMode, + ) -> Result, IndexeddbMediaStoreError> { + Ok(IndexeddbMediaStoreTransaction::new( + self.inner.transaction_on_multi_with_mode(stores, mode)?, + &self.serializer, + )) + } +} + +#[cfg(target_family = "wasm")] +#[async_trait::async_trait(?Send)] +impl MediaStore for IndexeddbMediaStore { + type Error = IndexeddbMediaStoreError; + + #[instrument(skip(self))] + async fn try_take_leased_lock( + &self, + lease_duration_ms: u32, + key: &str, + holder: &str, + ) -> Result { + let _timer = timer!("method"); + + let now = Duration::from_millis(MilliSecondsSinceUnixEpoch::now().get().into()); + + let transaction = + self.transaction(&[Lease::OBJECT_STORE], IdbTransactionMode::Readwrite)?; + + if let Some(lease) = transaction.get_lease_by_id(key).await? { + if lease.holder != holder && !lease.has_expired(now) { + return Ok(false); + } + } + + transaction + .put_lease(&Lease { + key: key.to_owned(), + holder: holder.to_owned(), + expiration: now + Duration::from_millis(lease_duration_ms.into()), + }) + .await?; + + Ok(true) + } + + #[instrument(skip_all)] + async fn add_media_content( + &self, + request: &MediaRequestParameters, + content: Vec, + ignore_policy: IgnoreMediaRetentionPolicy, + ) -> Result<(), IndexeddbMediaStoreError> { + let _timer = timer!("method"); + self.media_service.add_media_content(self, request, content, ignore_policy).await + } + + #[instrument(skip_all)] + async fn replace_media_key( + &self, + from: &MediaRequestParameters, + to: &MediaRequestParameters, + ) -> Result<(), IndexeddbMediaStoreError> { + let _timer = timer!("method"); + self.memory_store + .replace_media_key(from, to) + .await + .map_err(IndexeddbMediaStoreError::MemoryStore) + } + + #[instrument(skip_all)] + async fn get_media_content( + &self, + request: &MediaRequestParameters, + ) -> Result>, IndexeddbMediaStoreError> { + let _timer = timer!("method"); + self.media_service.get_media_content(self, request).await + } + + #[instrument(skip_all)] + async fn remove_media_content( + &self, + request: &MediaRequestParameters, + ) -> Result<(), IndexeddbMediaStoreError> { + let _timer = timer!("method"); + self.memory_store + .remove_media_content(request) + .await + .map_err(IndexeddbMediaStoreError::MemoryStore) + } + + #[instrument(skip(self))] + async fn get_media_content_for_uri( + &self, + uri: &MxcUri, + ) -> Result>, IndexeddbMediaStoreError> { + let _timer = timer!("method"); + self.media_service.get_media_content_for_uri(self, uri).await + } + + #[instrument(skip(self))] + async fn remove_media_content_for_uri( + &self, + uri: &MxcUri, + ) -> Result<(), IndexeddbMediaStoreError> { + let _timer = timer!("method"); + self.memory_store + .remove_media_content_for_uri(uri) + .await + .map_err(IndexeddbMediaStoreError::MemoryStore) + } + + #[instrument(skip_all)] + async fn set_media_retention_policy( + &self, + policy: MediaRetentionPolicy, + ) -> Result<(), IndexeddbMediaStoreError> { + let _timer = timer!("method"); + self.media_service.set_media_retention_policy(self, policy).await + } + + #[instrument(skip_all)] + fn media_retention_policy(&self) -> MediaRetentionPolicy { + let _timer = timer!("method"); + self.media_service.media_retention_policy() + } + + #[instrument(skip_all)] + async fn set_ignore_media_retention_policy( + &self, + request: &MediaRequestParameters, + ignore_policy: IgnoreMediaRetentionPolicy, + ) -> Result<(), IndexeddbMediaStoreError> { + let _timer = timer!("method"); + self.media_service.set_ignore_media_retention_policy(self, request, ignore_policy).await + } + + #[instrument(skip_all)] + async fn clean_up_media_cache(&self) -> Result<(), IndexeddbMediaStoreError> { + let _timer = timer!("method"); + self.media_service.clean_up_media_cache(self).await + } +} + +#[cfg(target_family = "wasm")] +#[async_trait::async_trait(?Send)] +impl MediaStoreInner for IndexeddbMediaStore { + type Error = IndexeddbMediaStoreError; + + #[instrument(skip_all)] + async fn media_retention_policy_inner( + &self, + ) -> Result, IndexeddbMediaStoreError> { + let _timer = timer!("method"); + self.transaction(&[MediaRetentionPolicy::OBJECT_STORE], IdbTransactionMode::Readonly)? + .get_media_retention_policy() + .await + .map_err(Into::into) + } + + #[instrument(skip_all)] + async fn set_media_retention_policy_inner( + &self, + policy: MediaRetentionPolicy, + ) -> Result<(), IndexeddbMediaStoreError> { + let _timer = timer!("method"); + self.transaction(&[MediaRetentionPolicy::OBJECT_STORE], IdbTransactionMode::Readwrite)? + .put_item(&policy) + .await + .map_err(Into::into) + } + + #[instrument(skip_all)] + async fn add_media_content_inner( + &self, + request: &MediaRequestParameters, + content: Vec, + current_time: SystemTime, + policy: MediaRetentionPolicy, + ignore_policy: IgnoreMediaRetentionPolicy, + ) -> Result<(), IndexeddbMediaStoreError> { + let _timer = timer!("method"); + self.memory_store + .add_media_content_inner(request, content, current_time, policy, ignore_policy) + .await + .map_err(IndexeddbMediaStoreError::MemoryStore) + } + + #[instrument(skip_all)] + async fn set_ignore_media_retention_policy_inner( + &self, + request: &MediaRequestParameters, + ignore_policy: IgnoreMediaRetentionPolicy, + ) -> Result<(), IndexeddbMediaStoreError> { + let _timer = timer!("method"); + self.memory_store + .set_ignore_media_retention_policy_inner(request, ignore_policy) + .await + .map_err(IndexeddbMediaStoreError::MemoryStore) + } + + #[instrument(skip_all)] + async fn get_media_content_inner( + &self, + request: &MediaRequestParameters, + current_time: SystemTime, + ) -> Result>, IndexeddbMediaStoreError> { + let _timer = timer!("method"); + self.memory_store + .get_media_content_inner(request, current_time) + .await + .map_err(IndexeddbMediaStoreError::MemoryStore) + } + + #[instrument(skip_all)] + async fn get_media_content_for_uri_inner( + &self, + uri: &MxcUri, + current_time: SystemTime, + ) -> Result>, IndexeddbMediaStoreError> { + let _timer = timer!("method"); + self.memory_store + .get_media_content_for_uri_inner(uri, current_time) + .await + .map_err(IndexeddbMediaStoreError::MemoryStore) + } + + #[instrument(skip_all)] + async fn clean_up_media_cache_inner( + &self, + policy: MediaRetentionPolicy, + current_time: SystemTime, + ) -> Result<(), IndexeddbMediaStoreError> { + let _timer = timer!("method"); + self.memory_store + .clean_up_media_cache_inner(policy, current_time) + .await + .map_err(IndexeddbMediaStoreError::MemoryStore) + } + + #[instrument(skip_all)] + async fn last_media_cleanup_time_inner( + &self, + ) -> Result, IndexeddbMediaStoreError> { + let _timer = timer!("method"); + self.memory_store + .last_media_cleanup_time_inner() + .await + .map_err(IndexeddbMediaStoreError::MemoryStore) + } +} + +#[cfg(test)] +mod tests { + use matrix_sdk_base::{ + media::store::{MediaStore, MediaStoreError}, + media_store_integration_tests, media_store_integration_tests_time, + }; + use matrix_sdk_test::async_test; + use uuid::Uuid; + + use crate::media_store::{error::IndexeddbMediaStoreError, IndexeddbMediaStore}; + + mod unencrypted { + use super::*; + + wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + + async fn get_media_store() -> Result { + let name = format!("test-media-store-{}", Uuid::new_v4().as_hyphenated()); + Ok(IndexeddbMediaStore::builder().build().await?) + } + + #[cfg(target_family = "wasm")] + media_store_integration_tests!(); + + #[cfg(target_family = "wasm")] + media_store_integration_tests_time!(); + } + + mod encrypted { + use super::*; + + wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + + async fn get_media_store() -> Result { + let name = format!("test-media-store-{}", Uuid::new_v4().as_hyphenated()); + Ok(IndexeddbMediaStore::builder().build().await?) + } + + #[cfg(target_family = "wasm")] + media_store_integration_tests!(); + + #[cfg(target_family = "wasm")] + media_store_integration_tests_time!(); + } +} diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/foreign.rs b/crates/matrix-sdk-indexeddb/src/media_store/serializer/foreign.rs similarity index 96% rename from crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/foreign.rs rename to crates/matrix-sdk-indexeddb/src/media_store/serializer/foreign.rs index 2e4b6ec8abd..f001cc869dc 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/serializer/foreign.rs +++ b/crates/matrix-sdk-indexeddb/src/media_store/serializer/foreign.rs @@ -21,7 +21,7 @@ pub mod ignore_media_retention_policy { //! This is necessary, as [`IgnoreMediaRetentionPolicy`] does not implement //! these traits directly. - use matrix_sdk_base::event_cache::store::media::IgnoreMediaRetentionPolicy; + use matrix_sdk_base::media::store::IgnoreMediaRetentionPolicy; use serde::{Deserializer, Serializer}; /// Serializes an [`IgnoreMediaRetentionPolicy`] as a `u8`, where diff --git a/crates/matrix-sdk-indexeddb/src/media_store/serializer/mod.rs b/crates/matrix-sdk-indexeddb/src/media_store/serializer/mod.rs new file mode 100644 index 00000000000..c9d7f1d39cb --- /dev/null +++ b/crates/matrix-sdk-indexeddb/src/media_store/serializer/mod.rs @@ -0,0 +1,170 @@ +// Copyright 2025 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use gloo_utils::format::JsValueSerdeExt; +use matrix_sdk_crypto::CryptoStoreError; +use ruma::RoomId; +use serde::{de::DeserializeOwned, Serialize}; +use thiserror::Error; +use wasm_bindgen::JsValue; +use web_sys::IdbKeyRange; + +use crate::{ + media_store::serializer::{ + traits::{Indexed, IndexedKey, IndexedKeyBounds, IndexedKeyComponentBounds}, + types::IndexedKeyRange, + }, + serializer::IndexeddbSerializer, +}; + +pub mod foreign; +pub mod traits; +pub mod types; + +#[derive(Debug, Error)] +pub enum IndexeddbMediaStoreSerializerError { + #[error("indexing: {0}")] + Indexing(IndexingError), + #[error("serialization: {0}")] + Serialization(#[from] serde_json::Error), +} + +impl From for IndexeddbMediaStoreSerializerError { + fn from(e: serde_wasm_bindgen::Error) -> Self { + Self::Serialization(serde::de::Error::custom(e.to_string())) + } +} + +/// A (de)serializer for an IndexedDB implementation of [`MediaStore`][1]. +/// +/// This is primarily a wrapper around [`IndexeddbSerializer`] with a +/// convenience functions for (de)serializing types specific to the +/// [`MediaStore`][1]. +/// +/// [1]: matrix_sdk_base::media::store::MediaStore +#[derive(Debug, Clone)] +pub struct IndexeddbMediaStoreSerializer { + inner: IndexeddbSerializer, +} + +impl IndexeddbMediaStoreSerializer { + pub fn new(inner: IndexeddbSerializer) -> Self { + Self { inner } + } + + /// Returns a reference to the inner [`IndexeddbSerializer`]. + pub fn inner(&self) -> &IndexeddbSerializer { + &self.inner + } + + /// Encodes an key for a [`Indexed`] type. + /// + /// Note that the particular key which is encoded is defined by the type + /// `K`. + pub fn encode_key(&self, components: K::KeyComponents<'_>) -> K + where + T: Indexed, + K: IndexedKey, + { + K::encode(components, &self.inner) + } + + /// Encodes a key for a [`Indexed`] type as a [`JsValue`]. + /// + /// Note that the particular key which is encoded is defined by the type + /// `K`. + pub fn encode_key_as_value( + &self, + components: K::KeyComponents<'_>, + ) -> Result + where + T: Indexed, + K: IndexedKey + Serialize, + { + serde_wasm_bindgen::to_value(&self.encode_key::(components)) + } + + /// Encodes a key component range for an [`Indexed`] type. + /// + /// Note that the particular key which is encoded is defined by the type + /// `K`. + pub fn encode_key_range( + &self, + range: impl Into>, + ) -> Result + where + T: Indexed, + K: Serialize, + { + use serde_wasm_bindgen::to_value; + Ok(match range.into() { + IndexedKeyRange::Only(key) => IdbKeyRange::only(&to_value(&key)?)?, + IndexedKeyRange::Bound(lower, upper) => { + IdbKeyRange::bound(&to_value(&lower)?, &to_value(&upper)?)? + } + }) + } + + /// Encodes a key component range for an [`Indexed`] type. + /// + /// Note that the particular key which is encoded is defined by the type + /// `K`. + pub fn encode_key_component_range<'a, T, K>( + &self, + range: impl Into>>, + ) -> Result + where + T: Indexed, + K: IndexedKey + Serialize, + { + let range = match range.into() { + IndexedKeyRange::Only(components) => { + IndexedKeyRange::Only(K::encode(components, &self.inner)) + } + IndexedKeyRange::Bound(lower, upper) => { + let lower = K::encode(lower, &self.inner); + let upper = K::encode(upper, &self.inner); + IndexedKeyRange::Bound(lower, upper) + } + }; + self.encode_key_range::(range) + } + + /// Serializes an [`Indexed`] type into a [`JsValue`] + pub fn serialize( + &self, + t: &T, + ) -> Result> + where + T: Indexed, + T::IndexedType: Serialize, + { + let indexed = + t.to_indexed(&self.inner).map_err(IndexeddbMediaStoreSerializerError::Indexing)?; + serde_wasm_bindgen::to_value(&indexed).map_err(Into::into) + } + + /// Deserializes an [`Indexed`] type from a [`JsValue`] + pub fn deserialize( + &self, + value: JsValue, + ) -> Result> + where + T: Indexed, + T::IndexedType: DeserializeOwned, + { + let indexed: T::IndexedType = value.into_serde()?; + T::from_indexed(indexed, &self.inner).map_err(IndexeddbMediaStoreSerializerError::Indexing) + } +} diff --git a/crates/matrix-sdk-indexeddb/src/media_store/serializer/traits.rs b/crates/matrix-sdk-indexeddb/src/media_store/serializer/traits.rs new file mode 100644 index 00000000000..e1db633ed5a --- /dev/null +++ b/crates/matrix-sdk-indexeddb/src/media_store/serializer/traits.rs @@ -0,0 +1,189 @@ +// Copyright 2025 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use ruma::RoomId; + +use crate::serializer::IndexeddbSerializer; + +/// A conversion trait for preparing high-level types into indexed types +/// which are better suited for storage in IndexedDB. +/// +/// Note that the functions below take an [`IndexeddbSerializer`] as an +/// argument, which provides the necessary context for encryption and +/// decryption, in the case the high-level type must be encrypted before +/// storage. +pub trait Indexed: Sized { + /// The name of the object store in IndexedDB. + const OBJECT_STORE: &'static str; + + /// The indexed type that is used for storage in IndexedDB. + type IndexedType; + /// The error type that is returned when conversion fails. + type Error; + + /// Converts the high-level type into an indexed type. + fn to_indexed( + &self, + serializer: &IndexeddbSerializer, + ) -> Result; + + /// Converts an indexed type into the high-level type. + fn from_indexed( + indexed: Self::IndexedType, + serializer: &IndexeddbSerializer, + ) -> Result; +} + +/// A trait for encoding types which will be used as keys in IndexedDB. +/// +/// Each implementation represents a key on an [`Indexed`] type. +pub trait IndexedKey { + /// The index name for the key, if it represents an index. + const INDEX: Option<&'static str> = None; + + /// Any extra data used to construct the key. + type KeyComponents<'a>; + + /// Encodes the key components into a type that can be used as a key in + /// IndexedDB. + /// + /// Note that this function takes an [`IndexeddbSerializer`] as an + /// argument, which provides the necessary context for encryption and + /// decryption, in the case that certain components of the key must be + /// encrypted before storage. + fn encode(components: Self::KeyComponents<'_>, serializer: &IndexeddbSerializer) -> Self; +} + +/// A trait for constructing the bounds of an [`IndexedKey`]. +/// +/// This is useful when constructing range queries in IndexedDB. +/// +/// The [`IndexedKeyComponentBounds`] helps to specify the upper and lower +/// bounds of the components that are used to create the final key, while the +/// `IndexedKeyBounds` are the upper and lower bounds of the final key itself. +/// +/// While these concepts are similar and often produce the same results, there +/// are cases where these two concepts produce very different results. Namely, +/// when any of the components are encrypted in the process of constructing the +/// final key, then the component bounds and the key bounds produce very +/// different results. +/// +/// So, for instance, consider the `EventId`, which may be encrypted before +/// being used in a final key. One cannot construct the upper and lower bounds +/// of the final key using the upper and lower bounds of the `EventId`, because +/// once the `EventId` is encrypted, the resultant value will no longer express +/// the proper bound. +pub trait IndexedKeyBounds: IndexedKey { + /// Constructs the lower bound of the key. + fn lower_key(serializer: &IndexeddbSerializer) -> Self; + + /// Constructs the upper bound of the key. + fn upper_key(serializer: &IndexeddbSerializer) -> Self; +} + +impl IndexedKeyBounds for K +where + T: Indexed, + K: IndexedKeyComponentBounds + Sized, +{ + /// Constructs the lower bound of the key. + fn lower_key(serializer: &IndexeddbSerializer) -> Self { + >::encode(Self::lower_key_components(), serializer) + } + + /// Constructs the upper bound of the key. + fn upper_key(serializer: &IndexeddbSerializer) -> Self { + >::encode(Self::upper_key_components(), serializer) + } +} + +/// A trait for constructing the bounds of the components of an [`IndexedKey`]. +/// +/// This is useful when constructing range queries in IndexedDB. Note that this +/// trait should not be implemented for key components that are going to be +/// encrypted as ordering properties will not be preserved. +/// +/// One may be interested to read the documentation of [`IndexedKeyBounds`] to +/// get a better overview of how these two interact. +pub trait IndexedKeyComponentBounds: IndexedKeyBounds { + /// Constructs the lower bound of the key components. + fn lower_key_components() -> Self::KeyComponents<'static>; + + /// Constructs the upper bound of the key components. + fn upper_key_components() -> Self::KeyComponents<'static>; +} + +/// A trait for constructing the bounds of an [`IndexedKey`] given a prefix `P` +/// of that key. +/// +/// The key bounds should be constructed by keeping the prefix constant while +/// the remaining components of the key are set to their lower and upper limits. +/// +/// This is useful when constructing prefixed range queries in IndexedDB. +/// +/// Note that the [`IndexedPrefixKeyComponentBounds`] helps to specify the upper +/// and lower bounds of the components that are used to create the final key, +/// while the `IndexedPrefixKeyBounds` are the upper and lower bounds of the +/// final key itself. +/// +/// For details on the differences between key bounds and key component bounds, +/// see the documentation on [`IndexedKeyBounds`]. +pub trait IndexedPrefixKeyBounds: IndexedKey { + /// Constructs the lower bound of the key while maintaining a constant + /// prefix. + fn lower_key_with_prefix(prefix: P, serializer: &IndexeddbSerializer) -> Self; + + /// Constructs the upper bound of the key while maintaining a constant + /// prefix. + fn upper_key_with_prefix(prefix: P, serializer: &IndexeddbSerializer) -> Self; +} + +impl<'a, T, K, P> IndexedPrefixKeyBounds for K +where + T: Indexed, + K: IndexedPrefixKeyComponentBounds<'a, T, P> + Sized, + P: 'a, +{ + fn lower_key_with_prefix(prefix: P, serializer: &IndexeddbSerializer) -> Self { + >::encode(Self::lower_key_components_with_prefix(prefix), serializer) + } + + fn upper_key_with_prefix(prefix: P, serializer: &IndexeddbSerializer) -> Self { + >::encode(Self::upper_key_components_with_prefix(prefix), serializer) + } +} + +/// A trait for constructing the bounds of the components of an [`IndexedKey`] +/// given a prefix `P` of that key. +/// +/// The key component bounds should be constructed by keeping the prefix +/// constant while the remaining components of the key are set to their lower +/// and upper limits. +/// +/// This is useful when constructing range queries in IndexedDB. +/// +/// Note that this trait should not be implemented for key components that are +/// going to be encrypted as ordering properties will not be preserved. +/// +/// One may be interested to read the documentation of [`IndexedKeyBounds`] to +/// get a better overview of how these two interact. +pub trait IndexedPrefixKeyComponentBounds<'a, T: Indexed, P: 'a>: IndexedKey { + /// Constructs the lower bound of the key components while maintaining a + /// constant prefix. + fn lower_key_components_with_prefix(prefix: P) -> Self::KeyComponents<'a>; + + /// Constructs the upper bound of the key components while maintaining a + /// constant prefix. + fn upper_key_components_with_prefix(prefix: P) -> Self::KeyComponents<'a>; +} diff --git a/crates/matrix-sdk-indexeddb/src/media_store/serializer/types.rs b/crates/matrix-sdk-indexeddb/src/media_store/serializer/types.rs new file mode 100644 index 00000000000..9756a6e4db6 --- /dev/null +++ b/crates/matrix-sdk-indexeddb/src/media_store/serializer/types.rs @@ -0,0 +1,622 @@ +// Copyright 2025 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +//! Types used for (de)serialization of media store data. +//! +//! These types are wrappers around the types found in +//! [`crate::media_store::types`] and prepare those types for +//! serialization in IndexedDB. They are constructed by extracting +//! relevant values from the inner types, storing those values in indexed +//! fields, and then storing the full types in a possibly encrypted form. This +//! allows the data to be encrypted, while still allowing for efficient querying +//! and retrieval of data. +//! +//! Each top-level type represents an object store in IndexedDB and each +//! field - except the content field - represents an index on that object store. +//! These types mimic the structure of the object stores and indices created in +//! [`crate::media_store::migrations`]. + +use std::{sync::LazyLock, time::Duration}; + +use matrix_sdk_base::media::{ + store::{IgnoreMediaRetentionPolicy, MediaRetentionPolicy}, + MediaRequestParameters, UniqueKey, +}; +use matrix_sdk_crypto::CryptoStoreError; +use ruma::events::room::MediaSource; +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +use crate::{ + media_store::{ + migrations::current::keys, + serializer::{ + foreign::ignore_media_retention_policy, + traits::{ + Indexed, IndexedKey, IndexedKeyBounds, IndexedKeyComponentBounds, + IndexedPrefixKeyBounds, IndexedPrefixKeyComponentBounds, + }, + }, + types::{Lease, Media}, + }, + serializer::{IndexeddbSerializer, MaybeEncrypted}, +}; + +/// The first unicode character, and hence the lower bound for IndexedDB keys +/// (or key components) which are represented as strings. +/// +/// This value is useful for constructing a key range over all strings when used +/// in conjunction with [`INDEXED_KEY_UPPER_CHARACTER`]. +const INDEXED_KEY_LOWER_CHARACTER: char = '\u{0000}'; + +/// The last unicode character in the [Basic Multilingual Plane][1]. This seems +/// like a reasonable place to set the upper bound for IndexedDB keys (or key +/// components) which are represented as strings, though one could +/// theoretically set it to `\u{10FFFF}`. +/// +/// This value is useful for constructing a key range over all strings when used +/// in conjunction with [`INDEXED_KEY_LOWER_CHARACTER`]. +/// +/// [1]: https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane +const INDEXED_KEY_UPPER_CHARACTER: char = '\u{FFFF}'; + +/// Identical to [`INDEXED_KEY_LOWER_CHARACTER`] but represented as a [`String`] +static INDEXED_KEY_LOWER_STRING: LazyLock = + LazyLock::new(|| String::from(INDEXED_KEY_LOWER_CHARACTER)); + +/// Identical to [`INDEXED_KEY_UPPER_CHARACTER`] but represented as a [`String`] +static INDEXED_KEY_UPPER_STRING: LazyLock = + LazyLock::new(|| String::from(INDEXED_KEY_UPPER_CHARACTER)); + +/// An [`IndexedMediaContentSize`] set to it's minimal value - i.e., `0`. +/// +/// This value is useful for constructing a key range over all keys which +/// contain [`IndexedMediaContentSize`] values when used in conjunction with +/// [`INDEXED_KEY_UPPER_MEDIA_CONTENT_SIZE`]. +const INDEXED_KEY_LOWER_MEDIA_CONTENT_SIZE: IndexedMediaContentSize = 0; + +/// An [`IndexedMediaContentSize`] set to [`js_sys::Number::MAX_SAFE_INTEGER`]. +/// Note that this restricts the size of [`IndexedMedia::content`], which +/// ultimately restricts the size of [`Media::content`]. +/// +/// This value is useful for constructing a key range over all keys which +/// contain [`IndexedMediaContentSize`] values when used in conjunction with +/// [`INDEXED_KEY_LOWER_MEDIA_CONTENT_SIZE`]. +const INDEXED_KEY_UPPER_MEDIA_CONTENT_SIZE: IndexedMediaContentSize = + js_sys::Number::MAX_SAFE_INTEGER as usize; + +/// The minimum possible [`Duration`]. +/// +/// This value is useful for constructing a key range over all keys which +/// contain time-related values when used in conjunction with +/// [`INDEXED_KEY_UPPER_DURATION`]. +const INDEXED_KEY_LOWER_DURATION: Duration = Duration::ZERO; + +/// A [`Duration`] constructed with [`js_sys::Number::MAX_SAFE_INTEGER`] +/// seconds. +/// +/// This value is useful for constructing a key range over all keys which +/// contain time-related values in seconds when used in conjunction with +/// [`INDEXED_KEY_LOWER_DURATION`]. +const INDEXED_KEY_UPPER_DURATION_SECONDS: Duration = + Duration::from_secs(js_sys::Number::MAX_SAFE_INTEGER as u64); + +/// Representation of a range of keys of type `K`. This is loosely +/// correlated with [IDBKeyRange][1], with a few differences. +/// +/// Namely, this enum only provides a single way to express a bounded range +/// which is always inclusive on both bounds. While all ranges can still be +/// represented, [`IDBKeyRange`][1] provides more flexibility in this regard. +/// +/// [1]: https://developer.mozilla.org/en-US/docs/Web/API/IDBKeyRange +#[derive(Debug, Copy, Clone)] +pub enum IndexedKeyRange { + /// Represents a single key of type `K`. + /// + /// Identical to [`IDBKeyRange.only`][1]. + /// + /// [1]: https://developer.mozilla.org/en-US/docs/Web/API/IDBKeyRange/only + Only(K), + /// Represents an inclusive range of keys of type `K` + /// where the first item is the lower bound and the + /// second item is the upper bound. + /// + /// Similar to [`IDBKeyRange.bound`][1]. + /// + /// [1]: https://developer.mozilla.org/en-US/docs/Web/API/IDBKeyRange/bound + Bound(K, K), +} + +impl<'a, C: 'a> IndexedKeyRange { + /// Encodes a range of key components of type `K::KeyComponents` + /// into a range of keys of type `K`. + pub fn encoded(self, serializer: &IndexeddbSerializer) -> IndexedKeyRange + where + T: Indexed, + K: IndexedKey = C>, + { + match self { + Self::Only(components) => IndexedKeyRange::Only(K::encode(components, serializer)), + Self::Bound(lower, upper) => { + IndexedKeyRange::Bound(K::encode(lower, serializer), K::encode(upper, serializer)) + } + } + } +} + +impl IndexedKeyRange { + pub fn map(self, f: F) -> IndexedKeyRange + where + F: Fn(K) -> T, + { + match self { + IndexedKeyRange::Only(key) => IndexedKeyRange::Only(f(key)), + IndexedKeyRange::Bound(lower, upper) => IndexedKeyRange::Bound(f(lower), f(upper)), + } + } + + pub fn all(serializer: &IndexeddbSerializer) -> IndexedKeyRange + where + T: Indexed, + K: IndexedKeyBounds, + { + IndexedKeyRange::Bound(K::lower_key(serializer), K::upper_key(serializer)) + } + + pub fn all_with_prefix(prefix: P, serializer: &IndexeddbSerializer) -> IndexedKeyRange + where + T: Indexed, + K: IndexedPrefixKeyBounds, + P: Clone, + { + IndexedKeyRange::Bound( + K::lower_key_with_prefix(prefix.clone(), serializer), + K::upper_key_with_prefix(prefix, serializer), + ) + } +} + +impl From<(K, K)> for IndexedKeyRange { + fn from(value: (K, K)) -> Self { + Self::Bound(value.0, value.1) + } +} + +impl From for IndexedKeyRange { + fn from(value: K) -> Self { + Self::Only(value) + } +} + +/// A representation of the primary key of the [`CORE`][1] object store. +/// The key may or may not be hashed depending on the +/// provided [`IndexeddbSerializer`]. +/// +/// [1]: crate::media_store::migrations::v1::create_core_object_store +pub type IndexedCoreIdKey = String; + +/// A (possibly) encrypted representation of a [`Lease`] +pub type IndexedLeaseContent = MaybeEncrypted; + +/// A (possibly) encrypted representation of a [`MediaRetentionPolicy`] +pub type IndexedMediaRetentionPolicyContent = MaybeEncrypted; + +/// A (possibly) encrypted representation of a [`MediaMetadata`][1] +/// +/// [1]: crate::media_store::types::MediaMetadata +pub type IndexedMediaMetadata = MaybeEncrypted; + +/// A (possibly) encrypted representation of [`Media::content`] +pub type IndexedMediaContent = Vec; + +/// A representation of the size in bytes of the [`IndexedMediaContent`] which +/// is suitable for use in an IndexedDB key +pub type IndexedMediaContentSize = usize; + +/// A representation of time in seconds since the [Unix +/// Epoch](std::time::UNIX_EPOCH) which is suitable for use in an IndexedDB key +pub type IndexedSecondsSinceUnixEpoch = u64; + +/// Represents the [`LEASES`][1] object store. +/// +/// [1]: crate::media_store::migrations::v1::create_lease_object_store +#[derive(Debug, Serialize, Deserialize)] +pub struct IndexedLease { + /// The primary key of the object store. + pub id: IndexedLeaseIdKey, + /// The (possibly encrypted) content - i.e., a [`Lease`]. + pub content: IndexedLeaseContent, +} + +impl Indexed for Lease { + type IndexedType = IndexedLease; + + const OBJECT_STORE: &'static str = keys::LEASES; + + type Error = CryptoStoreError; + + fn to_indexed( + &self, + serializer: &IndexeddbSerializer, + ) -> Result { + Ok(IndexedLease { + id: >::encode(&self.key, serializer), + content: serializer.maybe_encrypt_value(self)?, + }) + } + + fn from_indexed( + indexed: Self::IndexedType, + serializer: &IndexeddbSerializer, + ) -> Result { + serializer.maybe_decrypt_value(indexed.content) + } +} + +/// The value associated with the [primary key](IndexedLease::id) of the +/// [`LEASES`][1] object store, which is constructed from the value in +/// [`Lease::key`]. This value may or may not be hashed depending on the +/// provided [`IndexeddbSerializer`]. +/// +/// [1]: crate::media_store::migrations::v1::create_lease_object_store +pub type IndexedLeaseIdKey = String; + +impl IndexedKey for IndexedLeaseIdKey { + type KeyComponents<'a> = &'a str; + + fn encode(components: Self::KeyComponents<'_>, serializer: &IndexeddbSerializer) -> Self { + serializer.encode_key_as_string(keys::LEASES, components) + } +} + +impl IndexedKeyComponentBounds for IndexedLeaseIdKey { + fn lower_key_components() -> Self::KeyComponents<'static> { + INDEXED_KEY_LOWER_STRING.as_str() + } + + fn upper_key_components() -> Self::KeyComponents<'static> { + INDEXED_KEY_UPPER_STRING.as_str() + } +} + +/// Represents the [`MediaRetentionPolicy`] record in the [`CORE`][1] object +/// store. +/// +/// [1]: crate::media_store::migrations::v1::create_core_object_store +#[derive(Debug, Serialize, Deserialize)] +pub struct IndexedMediaRetentionPolicy { + /// The primary key of the object store. + pub id: IndexedCoreIdKey, + /// The (possibly) encrypted content - i.e., a [`MediaRetentionPolicy`]. + pub content: IndexedMediaRetentionPolicyContent, +} + +impl Indexed for MediaRetentionPolicy { + const OBJECT_STORE: &'static str = keys::CORE; + + type IndexedType = IndexedMediaRetentionPolicy; + type Error = CryptoStoreError; + + fn to_indexed( + &self, + serializer: &IndexeddbSerializer, + ) -> Result { + Ok(Self::IndexedType { + id: >::encode((), serializer), + content: serializer.maybe_encrypt_value(self)?, + }) + } + + fn from_indexed( + indexed: Self::IndexedType, + serializer: &IndexeddbSerializer, + ) -> Result { + serializer.maybe_decrypt_value(indexed.content) + } +} + +impl IndexedKey for IndexedCoreIdKey { + type KeyComponents<'a> = (); + + fn encode(components: Self::KeyComponents<'_>, serializer: &IndexeddbSerializer) -> Self { + serializer.encode_key_as_string(keys::CORE, keys::MEDIA_RETENTION_POLICY_KEY) + } +} + +/// Represents the [`MEDIA`][1] object store. +/// +/// [1]: crate::media_store::migrations::v1::create_media_object_store +#[derive(Debug, Serialize, Deserialize)] +pub struct IndexedMedia { + /// The primary key of the object store + pub id: IndexedMediaIdKey, + /// The (possibly) hashed source of the media derived from + /// [`MediaSource::unique_key`] + pub source: IndexedMediaSourceKey, + /// The size (in bytes) of the media content and whether to ignore the + /// [`MediaRetentionPolicy`] + pub content_size: IndexedMediaContentSizeKey, + /// The last time the media was accessed and whether to ignore the + /// [`MediaRetentionPolicy`] + pub last_access: IndexedMediaLastAccessKey, + /// The last the media was accessed, the size (in bytes) of the media + /// content, and whether to ignore the [`MediaRetentionPolicy`] + pub retention_metadata: IndexedMediaRetentionMetadataKey, + /// The (possibly) encrypted metadata - i.e., [`MediaMetadata`][1] + /// + /// [1]: crate::media_store::types::MediaMetadata + pub metadata: IndexedMediaMetadata, + /// The (possibly) encrypted content - i.e., [`Media::content`] + pub content: IndexedMediaContent, +} + +#[derive(Debug, Error)] +pub enum IndexedMediaError { + #[error("crypto store: {0}")] + CryptoStore(#[from] CryptoStoreError), + #[error("serialization: {0}")] + Serialization(#[from] rmp_serde::encode::Error), + #[error("deserialization: {0}")] + Deserialization(#[from] rmp_serde::decode::Error), +} + +impl Indexed for Media { + const OBJECT_STORE: &'static str = keys::MEDIA; + + type IndexedType = IndexedMedia; + type Error = IndexedMediaError; + + fn to_indexed( + &self, + serializer: &IndexeddbSerializer, + ) -> Result { + let content = rmp_serde::to_vec_named(&serializer.maybe_encrypt_value(&self.content)?)?; + Ok(Self::IndexedType { + id: >::encode( + &self.metadata.request_parameters, + serializer, + ), + source: >::encode( + &self.metadata.request_parameters.source, + serializer, + ), + content_size: IndexedMediaContentSizeKey::encode( + (self.metadata.ignore_policy, content.len()), + serializer, + ), + last_access: IndexedMediaLastAccessKey::encode( + (self.metadata.ignore_policy, self.metadata.last_access), + serializer, + ), + retention_metadata: IndexedMediaRetentionMetadataKey::encode( + (self.metadata.ignore_policy, self.metadata.last_access, content.len()), + serializer, + ), + metadata: serializer.maybe_encrypt_value(&self.metadata)?, + content, + }) + } + + fn from_indexed( + indexed: Self::IndexedType, + serializer: &IndexeddbSerializer, + ) -> Result { + Ok(Self { + metadata: serializer.maybe_decrypt_value(indexed.metadata)?, + content: serializer.maybe_decrypt_value(rmp_serde::from_slice(&indexed.content)?)?, + }) + } +} + +/// The primary key of the [`MEDIA`][1] object store, which is constructed from: +/// +/// - The (possibly) hashed value returned by +/// [`MediaRequestParameters::unique_key`] +/// +/// [1]: crate::media_store::migrations::v1::create_media_object_store +#[derive(Debug, Serialize, Deserialize)] +pub struct IndexedMediaIdKey(String); + +impl IndexedKey for IndexedMediaIdKey { + type KeyComponents<'a> = &'a MediaRequestParameters; + + fn encode(components: Self::KeyComponents<'_>, serializer: &IndexeddbSerializer) -> Self { + Self(serializer.encode_key_as_string(keys::MEDIA, components.unique_key())) + } +} + +/// The value associated with the [`source`](IndexedMedia::source) index of the +/// [`MEDIA`][1] object store, which is constructed from: +/// +/// - The (possibly) hashed value returned by [`MediaSource::unique_key`] +/// +/// [1]: crate::media_store::migrations::v1::create_media_object_store +#[derive(Debug, Serialize, Deserialize)] +pub struct IndexedMediaSourceKey(String); + +impl IndexedKey for IndexedMediaSourceKey { + type KeyComponents<'a> = &'a MediaSource; + + fn encode(components: Self::KeyComponents<'_>, serializer: &IndexeddbSerializer) -> Self { + Self(serializer.encode_key_as_string(keys::MEDIA_SOURCE, components.unique_key())) + } +} + +/// The value associated with the [`content_size`](IndexedMedia::content_size) +/// index of the [`MEDIA`][1] object store, which is constructed from: +/// +/// - The value of [`IgnoreMediaRetentionPolicy`] +/// - The size in bytes of the associated [`IndexedMedia::content`] +/// +/// [1]: crate::media_store::migrations::v1::create_media_object_store +#[derive(Debug, Serialize, Deserialize)] +pub struct IndexedMediaContentSizeKey( + #[serde(with = "ignore_media_retention_policy")] IgnoreMediaRetentionPolicy, + IndexedMediaContentSize, +); + +impl IndexedMediaContentSizeKey { + /// Returns whether the associated [`IndexedMedia`] record should ignore the + /// global [`MediaRetentionPolicy`] + pub fn ignore_policy(&self) -> bool { + self.0.is_yes() + } + + /// Returns the size in bytes of the associated [`IndexedMedia::content`] + pub fn content_size(&self) -> usize { + self.1 + } +} + +impl IndexedKey for IndexedMediaContentSizeKey { + type KeyComponents<'a> = (IgnoreMediaRetentionPolicy, IndexedMediaContentSize); + + fn encode( + (ignore_policy, content_size): Self::KeyComponents<'_>, + _: &IndexeddbSerializer, + ) -> Self { + Self(ignore_policy, content_size) + } +} + +impl IndexedKeyComponentBounds for IndexedMediaContentSizeKey { + fn lower_key_components() -> Self::KeyComponents<'static> { + Self::lower_key_components_with_prefix(IgnoreMediaRetentionPolicy::No) + } + + fn upper_key_components() -> Self::KeyComponents<'static> { + Self::lower_key_components_with_prefix(IgnoreMediaRetentionPolicy::Yes) + } +} + +impl<'a> IndexedPrefixKeyComponentBounds<'a, Media, IgnoreMediaRetentionPolicy> + for IndexedMediaContentSizeKey +{ + fn lower_key_components_with_prefix( + prefix: IgnoreMediaRetentionPolicy, + ) -> Self::KeyComponents<'a> { + (prefix, INDEXED_KEY_LOWER_MEDIA_CONTENT_SIZE) + } + + fn upper_key_components_with_prefix( + prefix: IgnoreMediaRetentionPolicy, + ) -> Self::KeyComponents<'a> { + (prefix, INDEXED_KEY_UPPER_MEDIA_CONTENT_SIZE) + } +} + +/// The value associated with the [`last_access`](IndexedMedia::last_access) +/// index of the [`MEDIA`][1] object store, which is constructed from: +/// +/// - The value of [`IgnoreMediaRetentionPolicy`] +/// - The last time the associated [`IndexedMedia`] was accessed (in seconds +/// since the Unix Epoch) +/// +/// [1]: crate::media_store::migrations::v1::create_media_object_store +#[derive(Debug, Serialize, Deserialize)] +pub struct IndexedMediaLastAccessKey( + #[serde(with = "ignore_media_retention_policy")] IgnoreMediaRetentionPolicy, + IndexedSecondsSinceUnixEpoch, +); + +impl IndexedKey for IndexedMediaLastAccessKey { + type KeyComponents<'a> = (IgnoreMediaRetentionPolicy, Duration); + + fn encode( + (ignore_policy, last_access): Self::KeyComponents<'_>, + _: &IndexeddbSerializer, + ) -> Self { + Self(ignore_policy, last_access.as_secs()) + } +} + +impl IndexedKeyComponentBounds for IndexedMediaLastAccessKey { + fn lower_key_components() -> Self::KeyComponents<'static> { + Self::lower_key_components_with_prefix(IgnoreMediaRetentionPolicy::No) + } + + fn upper_key_components() -> Self::KeyComponents<'static> { + Self::lower_key_components_with_prefix(IgnoreMediaRetentionPolicy::Yes) + } +} + +impl<'a> IndexedPrefixKeyComponentBounds<'a, Media, IgnoreMediaRetentionPolicy> + for IndexedMediaLastAccessKey +{ + fn lower_key_components_with_prefix( + prefix: IgnoreMediaRetentionPolicy, + ) -> Self::KeyComponents<'a> { + (prefix, INDEXED_KEY_LOWER_DURATION) + } + + fn upper_key_components_with_prefix( + prefix: IgnoreMediaRetentionPolicy, + ) -> Self::KeyComponents<'a> { + (prefix, INDEXED_KEY_UPPER_DURATION_SECONDS) + } +} + +/// The value associated with the +/// [`retention_metadata`](IndexedMedia::retention_metadata) index of the +/// [`MEDIA`][1] object store, which is constructed from: +/// +/// - The value of [`IgnoreMediaRetentionPolicy`] +/// - The last time the associated [`IndexedMedia`] was accessed (in seconds +/// since the Unix Epoch) +/// - The size in bytes of the associated [`IndexedMedia::content`] +/// +/// [1]: crate::media_store::migrations::v1::create_media_object_store +#[derive(Debug, Serialize, Deserialize)] +pub struct IndexedMediaRetentionMetadataKey( + #[serde(with = "ignore_media_retention_policy")] IgnoreMediaRetentionPolicy, + IndexedSecondsSinceUnixEpoch, + IndexedMediaContentSize, +); + +impl IndexedKey for IndexedMediaRetentionMetadataKey { + type KeyComponents<'a> = (IgnoreMediaRetentionPolicy, Duration, IndexedMediaContentSize); + + fn encode( + (ignore_policy, last_access, content_size): Self::KeyComponents<'_>, + _: &IndexeddbSerializer, + ) -> Self { + Self(ignore_policy, last_access.as_secs(), content_size) + } +} + +impl IndexedKeyComponentBounds for IndexedMediaRetentionMetadataKey { + fn lower_key_components() -> Self::KeyComponents<'static> { + Self::lower_key_components_with_prefix(IgnoreMediaRetentionPolicy::No) + } + + fn upper_key_components() -> Self::KeyComponents<'static> { + Self::lower_key_components_with_prefix(IgnoreMediaRetentionPolicy::Yes) + } +} + +impl<'a> IndexedPrefixKeyComponentBounds<'a, Media, IgnoreMediaRetentionPolicy> + for IndexedMediaRetentionMetadataKey +{ + fn lower_key_components_with_prefix( + prefix: IgnoreMediaRetentionPolicy, + ) -> Self::KeyComponents<'a> { + (prefix, INDEXED_KEY_LOWER_DURATION, INDEXED_KEY_LOWER_MEDIA_CONTENT_SIZE) + } + + fn upper_key_components_with_prefix( + prefix: IgnoreMediaRetentionPolicy, + ) -> Self::KeyComponents<'a> { + (prefix, INDEXED_KEY_UPPER_DURATION_SECONDS, INDEXED_KEY_UPPER_MEDIA_CONTENT_SIZE) + } +} diff --git a/crates/matrix-sdk-indexeddb/src/media_store/transaction.rs b/crates/matrix-sdk-indexeddb/src/media_store/transaction.rs new file mode 100644 index 00000000000..a804552921f --- /dev/null +++ b/crates/matrix-sdk-indexeddb/src/media_store/transaction.rs @@ -0,0 +1,379 @@ +// Copyright 2025 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use indexed_db_futures::{prelude::IdbTransaction, IdbQuerySource}; +use matrix_sdk_base::{ + linked_chunk::{ChunkContent, ChunkIdentifier, LinkedChunkId, RawChunk}, + media::store::{MediaRetentionPolicy, MediaStoreError}, +}; +use serde::{ + de::{DeserializeOwned, Error}, + Serialize, +}; +use thiserror::Error; +use web_sys::IdbCursorDirection; + +use crate::media_store::{ + error::{AsyncErrorDeps, IndexeddbMediaStoreError}, + migrations::v1::keys, + serializer::{ + traits::{ + Indexed, IndexedKey, IndexedKeyBounds, IndexedKeyComponentBounds, + IndexedPrefixKeyBounds, IndexedPrefixKeyComponentBounds, + }, + types::{IndexedCoreIdKey, IndexedKeyRange, IndexedLeaseIdKey}, + IndexeddbMediaStoreSerializer, + }, + types::Lease, +}; + +#[derive(Debug, Error)] +pub enum IndexeddbMediaStoreTransactionError { + #[error("DomException {name} ({code}): {message}")] + DomException { name: String, message: String, code: u16 }, + #[error("serialization: {0}")] + Serialization(Box), + #[error("item is not unique")] + ItemIsNotUnique, + #[error("item not found")] + ItemNotFound, +} + +impl From for IndexeddbMediaStoreTransactionError { + fn from(value: web_sys::DomException) -> Self { + Self::DomException { name: value.name(), message: value.message(), code: value.code() } + } +} + +impl From for IndexeddbMediaStoreTransactionError { + fn from(e: serde_wasm_bindgen::Error) -> Self { + Self::Serialization(Box::new(serde_json::Error::custom(e.to_string()))) + } +} + +impl From for MediaStoreError { + fn from(value: IndexeddbMediaStoreTransactionError) -> Self { + use IndexeddbMediaStoreTransactionError::*; + + match value { + DomException { .. } => Self::InvalidData { details: value.to_string() }, + Serialization(e) => Self::Serialization(serde_json::Error::custom(e.to_string())), + ItemIsNotUnique | ItemNotFound => Self::InvalidData { details: value.to_string() }, + } + } +} + +/// Represents an IndexedDB transaction, but provides a convenient interface for +/// performing operations relevant to the IndexedDB implementation of +/// [`MediaStore`](matrix_sdk_base::media::store::MediaStore). +pub struct IndexeddbMediaStoreTransaction<'a> { + transaction: IdbTransaction<'a>, + serializer: &'a IndexeddbMediaStoreSerializer, +} + +impl<'a> IndexeddbMediaStoreTransaction<'a> { + pub fn new( + transaction: IdbTransaction<'a>, + serializer: &'a IndexeddbMediaStoreSerializer, + ) -> Self { + Self { transaction, serializer } + } + + /// Returns the underlying IndexedDB transaction. + pub fn into_inner(self) -> IdbTransaction<'a> { + self.transaction + } + + /// Commit all operations tracked in this transaction to IndexedDB. + pub async fn commit(self) -> Result<(), IndexeddbMediaStoreTransactionError> { + self.transaction.await.into_result().map_err(Into::into) + } + + /// Query IndexedDB for items that match the given key range + pub async fn get_items_by_key( + &self, + range: impl Into>, + ) -> Result, IndexeddbMediaStoreTransactionError> + where + T: Indexed, + T::IndexedType: DeserializeOwned, + T::Error: AsyncErrorDeps, + K: IndexedKey + Serialize, + { + let range = self.serializer.encode_key_range::(range)?; + let object_store = self.transaction.object_store(T::OBJECT_STORE)?; + let array = if let Some(index) = K::INDEX { + object_store.index(index)?.get_all_with_key(&range)?.await? + } else { + object_store.get_all_with_key(&range)?.await? + }; + let mut items = Vec::with_capacity(array.length() as usize); + for value in array { + let item = self + .serializer + .deserialize(value) + .map_err(|e| IndexeddbMediaStoreTransactionError::Serialization(Box::new(e)))?; + items.push(item); + } + Ok(items) + } + + /// Query IndexedDB for items that match the given key component range + pub async fn get_items_by_key_components<'b, T, K>( + &self, + range: impl Into>>, + ) -> Result, IndexeddbMediaStoreTransactionError> + where + T: Indexed + 'b, + T::IndexedType: DeserializeOwned, + T::Error: AsyncErrorDeps, + K: IndexedKey + Serialize + 'b, + { + let range: IndexedKeyRange = range.into().encoded(self.serializer.inner()); + self.get_items_by_key::(range).await + } + + /// Query IndexedDB for items that match the given key. If + /// more than one item is found, an error is returned. + pub async fn get_item_by_key( + &self, + key: K, + ) -> Result, IndexeddbMediaStoreTransactionError> + where + T: Indexed, + T::IndexedType: DeserializeOwned, + T::Error: AsyncErrorDeps, + K: IndexedKey + Serialize, + { + let mut items = self.get_items_by_key::(key).await?; + if items.len() > 1 { + return Err(IndexeddbMediaStoreTransactionError::ItemIsNotUnique); + } + Ok(items.pop()) + } + + /// Query IndexedDB for items that match the given key components. If more + /// than one item is found, an error is returned. + pub async fn get_item_by_key_components<'b, T, K>( + &self, + components: K::KeyComponents<'b>, + ) -> Result, IndexeddbMediaStoreTransactionError> + where + T: Indexed + 'b, + T::IndexedType: DeserializeOwned, + T::Error: AsyncErrorDeps, + K: IndexedKey + Serialize + 'b, + { + let mut items = self.get_items_by_key_components::(components).await?; + if items.len() > 1 { + return Err(IndexeddbMediaStoreTransactionError::ItemIsNotUnique); + } + Ok(items.pop()) + } + + /// Query IndexedDB for the number of items that match the given key range. + pub async fn get_items_count_by_key( + &self, + range: impl Into>, + ) -> Result + where + T: Indexed, + T::IndexedType: DeserializeOwned, + T::Error: AsyncErrorDeps, + K: IndexedKey + Serialize, + { + let range = self.serializer.encode_key_range::(range)?; + let object_store = self.transaction.object_store(T::OBJECT_STORE)?; + let count = if let Some(index) = K::INDEX { + object_store.index(index)?.count_with_key(&range)?.await? + } else { + object_store.count_with_key(&range)?.await? + }; + Ok(count as usize) + } + + /// Query IndexedDB for the number of items that match the given key + /// components range. + pub async fn get_items_count_by_key_components<'b, T, K>( + &self, + range: impl Into>>, + ) -> Result + where + T: Indexed + 'b, + T::IndexedType: DeserializeOwned, + T::Error: AsyncErrorDeps, + K: IndexedKey + Serialize + 'b, + { + let range: IndexedKeyRange = range.into().encoded(self.serializer.inner()); + self.get_items_count_by_key::(range).await + } + + /// Query IndexedDB for the item with the maximum key in the given range. + pub async fn get_max_item_by_key( + &self, + range: impl Into>, + ) -> Result, IndexeddbMediaStoreTransactionError> + where + T: Indexed, + T::IndexedType: DeserializeOwned, + T::Error: AsyncErrorDeps, + K: IndexedKey + Serialize, + { + let range = self.serializer.encode_key_range::(range)?; + let direction = IdbCursorDirection::Prev; + let object_store = self.transaction.object_store(T::OBJECT_STORE)?; + if let Some(index) = K::INDEX { + object_store + .index(index)? + .open_cursor_with_range_and_direction(&range, direction)? + .await? + .map(|cursor| self.serializer.deserialize(cursor.value())) + .transpose() + .map_err(|e| IndexeddbMediaStoreTransactionError::Serialization(Box::new(e))) + } else { + object_store + .open_cursor_with_range_and_direction(&range, direction)? + .await? + .map(|cursor| self.serializer.deserialize(cursor.value())) + .transpose() + .map_err(|e| IndexeddbMediaStoreTransactionError::Serialization(Box::new(e))) + } + } + + /// Adds an item to the corresponding IndexedDB object + /// store, i.e., `T::OBJECT_STORE`. If an item with the same key already + /// exists, it will be rejected. + pub async fn add_item(&self, item: &T) -> Result<(), IndexeddbMediaStoreTransactionError> + where + T: Indexed + Serialize, + T::IndexedType: Serialize, + T::Error: AsyncErrorDeps, + { + self.transaction + .object_store(T::OBJECT_STORE)? + .add_val_owned( + self.serializer + .serialize(item) + .map_err(|e| IndexeddbMediaStoreTransactionError::Serialization(Box::new(e)))?, + )? + .await + .map_err(Into::into) + } + + /// Puts an item in the corresponding IndexedDB object + /// store, i.e., `T::OBJECT_STORE`. If an item with the same key already + /// exists, it will be overwritten. + pub async fn put_item(&self, item: &T) -> Result<(), IndexeddbMediaStoreTransactionError> + where + T: Indexed + Serialize, + T::IndexedType: Serialize, + T::Error: AsyncErrorDeps, + { + self.transaction + .object_store(T::OBJECT_STORE)? + .put_val_owned( + self.serializer + .serialize(item) + .map_err(|e| IndexeddbMediaStoreTransactionError::Serialization(Box::new(e)))?, + )? + .await + .map_err(Into::into) + } + + /// Delete items in given key range from IndexedDB + pub async fn delete_items_by_key( + &self, + range: impl Into>, + ) -> Result<(), IndexeddbMediaStoreTransactionError> + where + T: Indexed, + K: IndexedKey + Serialize, + { + let range = self.serializer.encode_key_range::(range)?; + let object_store = self.transaction.object_store(T::OBJECT_STORE)?; + if let Some(index) = K::INDEX { + let index = object_store.index(index)?; + if let Some(cursor) = index.open_cursor_with_range(&range)?.await? { + while cursor.key().is_some() { + cursor.delete()?.await?; + cursor.continue_cursor()?.await?; + } + } + } else { + object_store.delete_owned(&range)?.await?; + } + Ok(()) + } + + /// Delete items in the given key component range from + /// IndexedDB + pub async fn delete_items_by_key_components<'b, T, K>( + &self, + range: impl Into>>, + ) -> Result<(), IndexeddbMediaStoreTransactionError> + where + T: Indexed + 'b, + K: IndexedKey + Serialize + 'b, + { + let range: IndexedKeyRange = range.into().encoded(self.serializer.inner()); + self.delete_items_by_key::(range).await + } + + /// Delete item that matches the given key components from + /// IndexedDB + pub async fn delete_item_by_key<'b, T, K>( + &self, + key: K::KeyComponents<'b>, + ) -> Result<(), IndexeddbMediaStoreTransactionError> + where + T: Indexed + 'b, + K: IndexedKey + Serialize + 'b, + { + self.delete_items_by_key_components::(key).await + } + + /// Clear all items of type `T` from the associated object store + /// `T::OBJECT_STORE` from IndexedDB + pub async fn clear(&self) -> Result<(), IndexeddbMediaStoreTransactionError> + where + T: Indexed, + { + self.transaction.object_store(T::OBJECT_STORE)?.clear()?.await.map_err(Into::into) + } + + /// Query IndexedDB for the lease that matches the given key `id`. If more + /// than one lease is found, an error is returned. + pub async fn get_lease_by_id( + &self, + id: &str, + ) -> Result, IndexeddbMediaStoreTransactionError> { + self.get_item_by_key_components::(id).await + } + + /// Puts a lease into IndexedDB. If a media with the same key already + /// exists, it will be overwritten. + pub async fn put_lease( + &self, + lease: &Lease, + ) -> Result<(), IndexeddbMediaStoreTransactionError> { + self.put_item(lease).await + } + + /// Query IndexedDB for the stored [`MediaRetentionPolicy`] + pub async fn get_media_retention_policy( + &self, + ) -> Result, IndexeddbMediaStoreTransactionError> { + self.get_item_by_key_components::(()).await + } +} diff --git a/crates/matrix-sdk-indexeddb/src/media_store/types.rs b/crates/matrix-sdk-indexeddb/src/media_store/types.rs new file mode 100644 index 00000000000..b123d7fa210 --- /dev/null +++ b/crates/matrix-sdk-indexeddb/src/media_store/types.rs @@ -0,0 +1,63 @@ +// Copyright 2025 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use std::time::Duration; + +use matrix_sdk_base::{ + deserialized_responses::TimelineEvent, + linked_chunk::{ChunkIdentifier, LinkedChunkId, OwnedLinkedChunkId}, + media::{store::IgnoreMediaRetentionPolicy, MediaRequestParameters}, +}; +use ruma::{OwnedEventId, OwnedRoomId, RoomId}; +use serde::{Deserialize, Serialize}; + +/// Representation of a time-based lock on the entire +/// [`IndexeddbMediaStore`](crate::media_store::IndexeddbMediaStore) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Lease { + pub key: String, + pub holder: String, + pub expiration: Duration, +} + +impl Lease { + /// Determines whether the lease is expired at a given time `t` + pub fn has_expired(&self, t: Duration) -> bool { + self.expiration < t + } +} + +/// A representation of media data which can be stored in IndexedDB. +#[derive(Debug, Serialize, Deserialize)] +pub struct Media { + /// The metadata associated with [`Media::content`] + pub metadata: MediaMetadata, + /// The content of the media + pub content: Vec, +} + +/// A representation of media metadata which can be stored in IndexedDB. +#[derive(Debug, Serialize, Deserialize)] +pub struct MediaMetadata { + /// The parameters specifying the type and source of the media contained in + /// [`Media::content`] + pub request_parameters: MediaRequestParameters, + /// The last time the media was accessed in IndexedDB + pub last_access: Duration, + /// Whether to ignore the [`MediaRetentionPolicy`][1] stored in IndexedDB + /// + /// [1]: matrix_sdk_base::media::store::MediaRetentionPolicy + #[serde(with = "crate::media_store::serializer::foreign::ignore_media_retention_policy")] + pub ignore_policy: IgnoreMediaRetentionPolicy, +} diff --git a/crates/matrix-sdk-sqlite/migrations/media_store/001_init.sql b/crates/matrix-sdk-sqlite/migrations/media_store/001_init.sql new file mode 100644 index 00000000000..473fe8376e8 --- /dev/null +++ b/crates/matrix-sdk-sqlite/migrations/media_store/001_init.sql @@ -0,0 +1,21 @@ +-- basic kv metadata like the database version and store cipher +CREATE TABLE "kv" ( + "key" TEXT PRIMARY KEY NOT NULL, + "value" BLOB NOT NULL +); + +CREATE TABLE "media" ( + "uri" BLOB NOT NULL, + "format" BLOB NOT NULL, + "data" BLOB NOT NULL, + "last_access" INTEGER NOT NULL, + "ignore_policy" BOOLEAN NOT NULL DEFAULT FALSE, + + PRIMARY KEY ("uri", "format") +); + +CREATE TABLE "lease_locks" ( + "key" TEXT PRIMARY KEY NOT NULL, + "holder" TEXT NOT NULL, + "expiration" REAL NOT NULL +); diff --git a/crates/matrix-sdk-sqlite/src/error.rs b/crates/matrix-sdk-sqlite/src/error.rs index 877031f7f7a..598c2344f38 100644 --- a/crates/matrix-sdk-sqlite/src/error.rs +++ b/crates/matrix-sdk-sqlite/src/error.rs @@ -15,6 +15,8 @@ use deadpool_sqlite::{CreatePoolError, PoolError}; #[cfg(feature = "event-cache")] use matrix_sdk_base::event_cache::store::EventCacheStoreError; +#[cfg(feature = "event-cache")] +use matrix_sdk_base::media::store::MediaStoreError; #[cfg(feature = "state-store")] use matrix_sdk_base::store::StoreError as StateStoreError; #[cfg(feature = "crypto-store")] @@ -169,4 +171,14 @@ impl From for EventCacheStoreError { } } +#[cfg(feature = "event-cache")] +impl From for MediaStoreError { + fn from(e: Error) -> Self { + match e { + Error::Encryption(e) => MediaStoreError::Encryption(e), + e => MediaStoreError::backend(e), + } + } +} + pub(crate) type Result = std::result::Result; diff --git a/crates/matrix-sdk-sqlite/src/event_cache_store.rs b/crates/matrix-sdk-sqlite/src/event_cache_store.rs index 39e1450264e..35327676b70 100644 --- a/crates/matrix-sdk-sqlite/src/event_cache_store.rs +++ b/crates/matrix-sdk-sqlite/src/event_cache_store.rs @@ -21,26 +21,18 @@ use deadpool_sqlite::{Object as SqliteAsyncConn, Pool as SqlitePool, Runtime}; use matrix_sdk_base::{ deserialized_responses::TimelineEvent, event_cache::{ - store::{ - compute_filters_string, extract_event_relation, - media::{ - IgnoreMediaRetentionPolicy, MediaRetentionPolicy, MediaService, MediaStoreInner, - }, - EventCacheStore, - }, + store::{compute_filters_string, extract_event_relation, EventCacheStore}, Event, Gap, }, linked_chunk::{ ChunkContent, ChunkIdentifier, ChunkIdentifierGenerator, ChunkMetadata, LinkedChunkId, Position, RawChunk, Update, }, - media::{MediaRequestParameters, UniqueKey}, timer, }; use matrix_sdk_store_encryption::StoreCipher; use ruma::{ - events::relation::RelationType, time::SystemTime, EventId, MilliSecondsSinceUnixEpoch, MxcUri, - OwnedEventId, RoomId, + events::relation::RelationType, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, }; use rusqlite::{params_from_iter, OptionalExtension, ToSql, Transaction, TransactionBehavior}; use tokio::{ @@ -52,20 +44,15 @@ use tracing::{debug, error, instrument, trace}; use crate::{ error::{Error, Result}, utils::{ - repeat_vars, time_to_timestamp, EncryptableStore, Key, SqliteAsyncConnExt, - SqliteKeyValueStoreAsyncConnExt, SqliteKeyValueStoreConnExt, SqliteTransactionExt, + repeat_vars, EncryptableStore, Key, SqliteAsyncConnExt, SqliteKeyValueStoreAsyncConnExt, + SqliteKeyValueStoreConnExt, SqliteTransactionExt, }, OpenStoreError, SqliteStoreConfig, }; mod keys { - // Entries in Key-value store - pub const MEDIA_RETENTION_POLICY: &str = "media_retention_policy"; - pub const LAST_MEDIA_CLEANUP_TIME: &str = "last_media_cleanup_time"; - // Tables pub const LINKED_CHUNKS: &str = "linked_chunks"; - pub const MEDIA: &str = "media"; } /// The database name. @@ -98,8 +85,6 @@ pub struct SqliteEventCacheStore { /// operations. All other connections are used for read operations. The /// lock is used to ensure there is one owner at a time. write_connection: Arc>, - - media_service: MediaService, } #[cfg(not(tarpaulin_include))] @@ -163,17 +148,11 @@ impl SqliteEventCacheStore { None => None, }; - let media_service = MediaService::new(); - let media_retention_policy = conn.get_serialized_kv(keys::MEDIA_RETENTION_POLICY).await?; - let last_media_cleanup_time = conn.get_serialized_kv(keys::LAST_MEDIA_CLEANUP_TIME).await?; - media_service.restore(media_retention_policy, last_media_cleanup_time); - Ok(Self { store_cipher, pool, // Use `conn` as our selected write connections. write_connection: Arc::new(Mutex::new(conn)), - media_service, }) } @@ -1313,382 +1292,6 @@ impl EventCacheStore for SqliteEventCacheStore { }) .await } - - #[instrument(skip_all)] - async fn add_media_content( - &self, - request: &MediaRequestParameters, - content: Vec, - ignore_policy: IgnoreMediaRetentionPolicy, - ) -> Result<()> { - let _timer = timer!("method"); - - self.media_service.add_media_content(self, request, content, ignore_policy).await - } - - #[instrument(skip_all)] - async fn replace_media_key( - &self, - from: &MediaRequestParameters, - to: &MediaRequestParameters, - ) -> Result<(), Self::Error> { - let _timer = timer!("method"); - - let prev_uri = self.encode_key(keys::MEDIA, from.source.unique_key()); - let prev_format = self.encode_key(keys::MEDIA, from.format.unique_key()); - - let new_uri = self.encode_key(keys::MEDIA, to.source.unique_key()); - let new_format = self.encode_key(keys::MEDIA, to.format.unique_key()); - - let conn = self.write().await?; - conn.execute( - r#"UPDATE media SET uri = ?, format = ? WHERE uri = ? AND format = ?"#, - (new_uri, new_format, prev_uri, prev_format), - ) - .await?; - - Ok(()) - } - - #[instrument(skip_all)] - async fn get_media_content(&self, request: &MediaRequestParameters) -> Result>> { - let _timer = timer!("method"); - - self.media_service.get_media_content(self, request).await - } - - #[instrument(skip_all)] - async fn remove_media_content(&self, request: &MediaRequestParameters) -> Result<()> { - let _timer = timer!("method"); - - let uri = self.encode_key(keys::MEDIA, request.source.unique_key()); - let format = self.encode_key(keys::MEDIA, request.format.unique_key()); - - let conn = self.write().await?; - conn.execute("DELETE FROM media WHERE uri = ? AND format = ?", (uri, format)).await?; - - Ok(()) - } - - #[instrument(skip(self))] - async fn get_media_content_for_uri( - &self, - uri: &MxcUri, - ) -> Result>, Self::Error> { - let _timer = timer!("method"); - - self.media_service.get_media_content_for_uri(self, uri).await - } - - #[instrument(skip(self))] - async fn remove_media_content_for_uri(&self, uri: &MxcUri) -> Result<()> { - let _timer = timer!("method"); - - let uri = self.encode_key(keys::MEDIA, uri); - - let conn = self.write().await?; - conn.execute("DELETE FROM media WHERE uri = ?", (uri,)).await?; - - Ok(()) - } - - #[instrument(skip_all)] - async fn set_media_retention_policy( - &self, - policy: MediaRetentionPolicy, - ) -> Result<(), Self::Error> { - let _timer = timer!("method"); - - self.media_service.set_media_retention_policy(self, policy).await - } - - #[instrument(skip_all)] - fn media_retention_policy(&self) -> MediaRetentionPolicy { - let _timer = timer!("method"); - - self.media_service.media_retention_policy() - } - - #[instrument(skip_all)] - async fn set_ignore_media_retention_policy( - &self, - request: &MediaRequestParameters, - ignore_policy: IgnoreMediaRetentionPolicy, - ) -> Result<(), Self::Error> { - let _timer = timer!("method"); - - self.media_service.set_ignore_media_retention_policy(self, request, ignore_policy).await - } - - #[instrument(skip_all)] - async fn clean_up_media_cache(&self) -> Result<(), Self::Error> { - let _timer = timer!("method"); - - self.media_service.clean_up_media_cache(self).await - } -} - -#[cfg_attr(target_family = "wasm", async_trait(?Send))] -#[cfg_attr(not(target_family = "wasm"), async_trait)] -impl MediaStoreInner for SqliteEventCacheStore { - type Error = Error; - - async fn media_retention_policy_inner( - &self, - ) -> Result, Self::Error> { - let conn = self.read().await?; - conn.get_serialized_kv(keys::MEDIA_RETENTION_POLICY).await - } - - async fn set_media_retention_policy_inner( - &self, - policy: MediaRetentionPolicy, - ) -> Result<(), Self::Error> { - let conn = self.write().await?; - conn.set_serialized_kv(keys::MEDIA_RETENTION_POLICY, policy).await?; - Ok(()) - } - - async fn add_media_content_inner( - &self, - request: &MediaRequestParameters, - data: Vec, - last_access: SystemTime, - policy: MediaRetentionPolicy, - ignore_policy: IgnoreMediaRetentionPolicy, - ) -> Result<(), Self::Error> { - let ignore_policy = ignore_policy.is_yes(); - let data = self.encode_value(data)?; - - if !ignore_policy && policy.exceeds_max_file_size(data.len() as u64) { - return Ok(()); - } - - let uri = self.encode_key(keys::MEDIA, request.source.unique_key()); - let format = self.encode_key(keys::MEDIA, request.format.unique_key()); - let timestamp = time_to_timestamp(last_access); - - let conn = self.write().await?; - conn.execute( - "INSERT OR REPLACE INTO media (uri, format, data, last_access, ignore_policy) VALUES (?, ?, ?, ?, ?)", - (uri, format, data, timestamp, ignore_policy), - ) - .await?; - - Ok(()) - } - - async fn set_ignore_media_retention_policy_inner( - &self, - request: &MediaRequestParameters, - ignore_policy: IgnoreMediaRetentionPolicy, - ) -> Result<(), Self::Error> { - let uri = self.encode_key(keys::MEDIA, request.source.unique_key()); - let format = self.encode_key(keys::MEDIA, request.format.unique_key()); - let ignore_policy = ignore_policy.is_yes(); - - let conn = self.write().await?; - conn.execute( - r#"UPDATE media SET ignore_policy = ? WHERE uri = ? AND format = ?"#, - (ignore_policy, uri, format), - ) - .await?; - - Ok(()) - } - - async fn get_media_content_inner( - &self, - request: &MediaRequestParameters, - current_time: SystemTime, - ) -> Result>, Self::Error> { - let uri = self.encode_key(keys::MEDIA, request.source.unique_key()); - let format = self.encode_key(keys::MEDIA, request.format.unique_key()); - let timestamp = time_to_timestamp(current_time); - - let conn = self.write().await?; - let data = conn - .with_transaction::<_, rusqlite::Error, _>(move |txn| { - // Update the last access. - // We need to do this first so the transaction is in write mode right away. - // See: https://sqlite.org/lang_transaction.html#read_transactions_versus_write_transactions - txn.execute( - "UPDATE media SET last_access = ? WHERE uri = ? AND format = ?", - (timestamp, &uri, &format), - )?; - - txn.query_row::, _, _>( - "SELECT data FROM media WHERE uri = ? AND format = ?", - (&uri, &format), - |row| row.get(0), - ) - .optional() - }) - .await?; - - data.map(|v| self.decode_value(&v).map(Into::into)).transpose() - } - - async fn get_media_content_for_uri_inner( - &self, - uri: &MxcUri, - current_time: SystemTime, - ) -> Result>, Self::Error> { - let uri = self.encode_key(keys::MEDIA, uri); - let timestamp = time_to_timestamp(current_time); - - let conn = self.write().await?; - let data = conn - .with_transaction::<_, rusqlite::Error, _>(move |txn| { - // Update the last access. - // We need to do this first so the transaction is in write mode right away. - // See: https://sqlite.org/lang_transaction.html#read_transactions_versus_write_transactions - txn.execute("UPDATE media SET last_access = ? WHERE uri = ?", (timestamp, &uri))?; - - txn.query_row::, _, _>( - "SELECT data FROM media WHERE uri = ?", - (&uri,), - |row| row.get(0), - ) - .optional() - }) - .await?; - - data.map(|v| self.decode_value(&v).map(Into::into)).transpose() - } - - async fn clean_up_media_cache_inner( - &self, - policy: MediaRetentionPolicy, - current_time: SystemTime, - ) -> Result<(), Self::Error> { - if !policy.has_limitations() { - // We can safely skip all the checks. - return Ok(()); - } - - let conn = self.write().await?; - let removed = conn - .with_transaction::<_, Error, _>(move |txn| { - let mut removed = false; - - // First, check media content that exceed the max filesize. - if let Some(max_file_size) = policy.computed_max_file_size() { - let count = txn.execute( - "DELETE FROM media WHERE ignore_policy IS FALSE AND length(data) > ?", - (max_file_size,), - )?; - - if count > 0 { - removed = true; - } - } - - // Then, clean up expired media content. - if let Some(last_access_expiry) = policy.last_access_expiry { - let current_timestamp = time_to_timestamp(current_time); - let expiry_secs = last_access_expiry.as_secs(); - let count = txn.execute( - "DELETE FROM media WHERE ignore_policy IS FALSE AND (? - last_access) >= ?", - (current_timestamp, expiry_secs), - )?; - - if count > 0 { - removed = true; - } - } - - // Finally, if the cache size is too big, remove old items until it fits. - if let Some(max_cache_size) = policy.max_cache_size { - // i64 is the integer type used by SQLite, use it here to avoid usize overflow - // during the conversion of the result. - let cache_size = txn - .query_row( - "SELECT sum(length(data)) FROM media WHERE ignore_policy IS FALSE", - (), - |row| { - // `sum()` returns `NULL` if there are no rows. - row.get::<_, Option>(0) - }, - )? - .unwrap_or_default(); - - // If the cache size is overflowing or bigger than max cache size, clean up. - if cache_size > max_cache_size { - // Get the sizes of the media contents ordered by last access. - let mut cached_stmt = txn.prepare_cached( - "SELECT rowid, length(data) FROM media \ - WHERE ignore_policy IS FALSE ORDER BY last_access DESC", - )?; - let content_sizes = cached_stmt - .query(())? - .mapped(|row| Ok((row.get::<_, i64>(0)?, row.get::<_, u64>(1)?))); - - let mut accumulated_items_size = 0u64; - let mut limit_reached = false; - let mut rows_to_remove = Vec::new(); - - for result in content_sizes { - let (row_id, size) = match result { - Ok(content_size) => content_size, - Err(error) => { - return Err(error.into()); - } - }; - - if limit_reached { - rows_to_remove.push(row_id); - continue; - } - - match accumulated_items_size.checked_add(size) { - Some(acc) if acc > max_cache_size => { - // We can stop accumulating. - limit_reached = true; - rows_to_remove.push(row_id); - } - Some(acc) => accumulated_items_size = acc, - None => { - // The accumulated size is overflowing but the setting cannot be - // bigger than usize::MAX, we can stop accumulating. - limit_reached = true; - rows_to_remove.push(row_id); - } - } - } - - if !rows_to_remove.is_empty() { - removed = true; - } - - txn.chunk_large_query_over(rows_to_remove, None, |txn, row_ids| { - let sql_params = repeat_vars(row_ids.len()); - let query = format!("DELETE FROM media WHERE rowid IN ({sql_params})"); - txn.prepare(&query)?.execute(params_from_iter(row_ids))?; - Ok(Vec::<()>::new()) - })?; - } - } - - txn.set_serialized_kv(keys::LAST_MEDIA_CLEANUP_TIME, current_time)?; - - Ok(removed) - }) - .await?; - - // If we removed media, defragment the database and free space on the - // filesystem. - if removed { - conn.vacuum().await?; - } - - Ok(()) - } - - async fn last_media_cleanup_time_inner(&self) -> Result, Self::Error> { - let conn = self.read().await?; - conn.get_serialized_kv(keys::LAST_MEDIA_CLEANUP_TIME).await - } } fn find_event_relations_transaction( @@ -1864,7 +1467,6 @@ mod tests { use std::{ path::PathBuf, sync::atomic::{AtomicU32, Ordering::SeqCst}, - time::Duration, }; use assert_matches::assert_matches; @@ -1874,19 +1476,16 @@ mod tests { integration_tests::{ check_test_event, make_test_event, make_test_event_with_event_id, }, - media::IgnoreMediaRetentionPolicy, EventCacheStore, EventCacheStoreError, }, Gap, }, event_cache_store_integration_tests, event_cache_store_integration_tests_time, linked_chunk::{ChunkContent, ChunkIdentifier, LinkedChunkId, Position, Update}, - media::{MediaFormat, MediaRequestParameters, MediaThumbnailSettings}, - media_store_inner_integration_tests, }; use matrix_sdk_test::{async_test, DEFAULT_TEST_ROOM_ID}; use once_cell::sync::Lazy; - use ruma::{event_id, events::room::MediaSource, media::Method, mxc_uri, room_id, uint}; + use ruma::{event_id, room_id}; use tempfile::{tempdir, TempDir}; use super::SqliteEventCacheStore; @@ -1914,19 +1513,6 @@ mod tests { event_cache_store_integration_tests!(); event_cache_store_integration_tests_time!(); - media_store_inner_integration_tests!(with_media_size_tests); - - async fn get_event_cache_store_content_sorted_by_last_access( - event_cache_store: &SqliteEventCacheStore, - ) -> Vec> { - let sqlite_db = event_cache_store.read().await.expect("accessing sqlite db failed"); - sqlite_db - .prepare("SELECT data FROM media ORDER BY last_access DESC", |mut stmt| { - stmt.query(())?.mapped(|row| row.get(0)).collect() - }) - .await - .expect("querying media cache content by last access failed") - } #[async_test] async fn test_pool_size() { @@ -1938,73 +1524,6 @@ mod tests { assert_eq!(store.pool.status().max_size, 42); } - #[async_test] - async fn test_last_access() { - let event_cache_store = get_event_cache_store().await.expect("creating media cache failed"); - let uri = mxc_uri!("mxc://localhost/media"); - let file_request = MediaRequestParameters { - source: MediaSource::Plain(uri.to_owned()), - format: MediaFormat::File, - }; - let thumbnail_request = MediaRequestParameters { - source: MediaSource::Plain(uri.to_owned()), - format: MediaFormat::Thumbnail(MediaThumbnailSettings::with_method( - Method::Crop, - uint!(100), - uint!(100), - )), - }; - - let content: Vec = "hello world".into(); - let thumbnail_content: Vec = "hello…".into(); - - // Add the media. - event_cache_store - .add_media_content(&file_request, content.clone(), IgnoreMediaRetentionPolicy::No) - .await - .expect("adding file failed"); - - // Since the precision of the timestamp is in seconds, wait so the timestamps - // differ. - tokio::time::sleep(Duration::from_secs(3)).await; - - event_cache_store - .add_media_content( - &thumbnail_request, - thumbnail_content.clone(), - IgnoreMediaRetentionPolicy::No, - ) - .await - .expect("adding thumbnail failed"); - - // File's last access is older than thumbnail. - let contents = - get_event_cache_store_content_sorted_by_last_access(&event_cache_store).await; - - assert_eq!(contents.len(), 2, "media cache contents length is wrong"); - assert_eq!(contents[0], thumbnail_content, "thumbnail is not last access"); - assert_eq!(contents[1], content, "file is not second-to-last access"); - - // Since the precision of the timestamp is in seconds, wait so the timestamps - // differ. - tokio::time::sleep(Duration::from_secs(3)).await; - - // Access the file so its last access is more recent. - let _ = event_cache_store - .get_media_content(&file_request) - .await - .expect("getting file failed") - .expect("file is missing"); - - // File's last access is more recent than thumbnail. - let contents = - get_event_cache_store_content_sorted_by_last_access(&event_cache_store).await; - - assert_eq!(contents.len(), 2, "media cache contents length is wrong"); - assert_eq!(contents[0], content, "file is not last access"); - assert_eq!(contents[1], thumbnail_content, "thumbnail is not second-to-last access"); - } - #[async_test] async fn test_linked_chunk_new_items_chunk() { let store = get_event_cache_store().await.expect("creating cache store failed"); @@ -2851,7 +2370,6 @@ mod encrypted_tests { use matrix_sdk_base::{ event_cache::store::{EventCacheStore, EventCacheStoreError}, event_cache_store_integration_tests, event_cache_store_integration_tests_time, - media_store_inner_integration_tests, }; use matrix_sdk_test::{async_test, event_factory::EventFactory}; use once_cell::sync::Lazy; @@ -2883,7 +2401,6 @@ mod encrypted_tests { event_cache_store_integration_tests!(); event_cache_store_integration_tests_time!(); - media_store_inner_integration_tests!(); #[async_test] async fn test_no_sqlite_injection_in_find_event_relations() { diff --git a/crates/matrix-sdk-sqlite/src/lib.rs b/crates/matrix-sdk-sqlite/src/lib.rs index 91d931271d4..4ab6b51820e 100644 --- a/crates/matrix-sdk-sqlite/src/lib.rs +++ b/crates/matrix-sdk-sqlite/src/lib.rs @@ -21,6 +21,8 @@ mod crypto_store; mod error; #[cfg(feature = "event-cache")] mod event_cache_store; +#[cfg(feature = "event-cache")] +mod media_store; #[cfg(feature = "state-store")] mod state_store; mod utils; @@ -37,6 +39,8 @@ pub use self::crypto_store::SqliteCryptoStore; pub use self::error::OpenStoreError; #[cfg(feature = "event-cache")] pub use self::event_cache_store::SqliteEventCacheStore; +#[cfg(feature = "event-cache")] +pub use self::media_store::SqliteMediaStore; #[cfg(feature = "state-store")] pub use self::state_store::{SqliteStateStore, DATABASE_NAME as STATE_STORE_DATABASE_NAME}; diff --git a/crates/matrix-sdk-sqlite/src/media_store.rs b/crates/matrix-sdk-sqlite/src/media_store.rs new file mode 100644 index 00000000000..8c9c734d27d --- /dev/null +++ b/crates/matrix-sdk-sqlite/src/media_store.rs @@ -0,0 +1,800 @@ +// Copyright 2024 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! An SQLite-based backend for the [`EventCacheStore`]. + +use std::{fmt, path::Path, sync::Arc}; + +use async_trait::async_trait; +use deadpool_sqlite::{Object as SqliteAsyncConn, Pool as SqlitePool, Runtime}; +use matrix_sdk_base::{ + media::{ + store::{ + IgnoreMediaRetentionPolicy, MediaRetentionPolicy, MediaService, MediaStore, + MediaStoreInner, + }, + MediaRequestParameters, UniqueKey, + }, + timer, +}; +use matrix_sdk_store_encryption::StoreCipher; +use ruma::{time::SystemTime, MilliSecondsSinceUnixEpoch, MxcUri}; +use rusqlite::{params_from_iter, OptionalExtension}; +use tokio::{ + fs, + sync::{Mutex, OwnedMutexGuard}, +}; +use tracing::{debug, instrument, trace}; + +use crate::{ + error::{Error, Result}, + utils::{ + repeat_vars, time_to_timestamp, EncryptableStore, SqliteAsyncConnExt, + SqliteKeyValueStoreAsyncConnExt, SqliteKeyValueStoreConnExt, SqliteTransactionExt, + }, + OpenStoreError, SqliteStoreConfig, +}; + +mod keys { + // Entries in Key-value store + pub const MEDIA_RETENTION_POLICY: &str = "media_retention_policy"; + pub const LAST_MEDIA_CLEANUP_TIME: &str = "last_media_cleanup_time"; + + // Tables + pub const MEDIA: &str = "media"; +} + +/// The database name. +const DATABASE_NAME: &str = "matrix-sdk-media.sqlite3"; + +/// Identifier of the latest database version. +/// +/// This is used to figure whether the SQLite database requires a migration. +/// Every new SQL migration should imply a bump of this number, and changes in +/// the [`run_migrations`] function. +const DATABASE_VERSION: u8 = 1; + +/// An SQLite-based event cache store. +#[derive(Clone)] +pub struct SqliteMediaStore { + store_cipher: Option>, + + /// The pool of connections. + pool: SqlitePool, + + /// We make the difference between connections for read operations, and for + /// write operations. We keep a single connection apart from write + /// operations. All other connections are used for read operations. The + /// lock is used to ensure there is one owner at a time. + write_connection: Arc>, + + media_service: MediaService, +} + +#[cfg(not(tarpaulin_include))] +impl fmt::Debug for SqliteMediaStore { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SqliteMediaStore").finish_non_exhaustive() + } +} + +impl EncryptableStore for SqliteMediaStore { + fn get_cypher(&self) -> Option<&StoreCipher> { + self.store_cipher.as_deref() + } +} + +impl SqliteMediaStore { + /// Open the SQLite-based event cache store at the given path using the + /// given passphrase to encrypt private data. + pub async fn open( + path: impl AsRef, + passphrase: Option<&str>, + ) -> Result { + Self::open_with_config(SqliteStoreConfig::new(path).passphrase(passphrase)).await + } + + /// Open the SQLite-based event cache store with the config open config. + #[instrument(skip(config), fields(path = ?config.path))] + pub async fn open_with_config(config: SqliteStoreConfig) -> Result { + debug!(?config); + + let _timer = timer!("open_with_config"); + + let SqliteStoreConfig { path, passphrase, pool_config, runtime_config } = config; + + fs::create_dir_all(&path).await.map_err(OpenStoreError::CreateDir)?; + + let mut config = deadpool_sqlite::Config::new(path.join(DATABASE_NAME)); + config.pool = Some(pool_config); + + let pool = config.create_pool(Runtime::Tokio1)?; + + let this = Self::open_with_pool(pool, passphrase.as_deref()).await?; + this.write().await?.apply_runtime_config(runtime_config).await?; + + Ok(this) + } + + /// Open an SQLite-based event cache store using the given SQLite database + /// pool. The given passphrase will be used to encrypt private data. + async fn open_with_pool( + pool: SqlitePool, + passphrase: Option<&str>, + ) -> Result { + let conn = pool.get().await?; + + let version = conn.db_version().await?; + run_migrations(&conn, version).await?; + + let store_cipher = match passphrase { + Some(p) => Some(Arc::new(conn.get_or_create_store_cipher(p).await?)), + None => None, + }; + + let media_service = MediaService::new(); + let media_retention_policy = conn.get_serialized_kv(keys::MEDIA_RETENTION_POLICY).await?; + let last_media_cleanup_time = conn.get_serialized_kv(keys::LAST_MEDIA_CLEANUP_TIME).await?; + media_service.restore(media_retention_policy, last_media_cleanup_time); + + Ok(Self { + store_cipher, + pool, + // Use `conn` as our selected write connections. + write_connection: Arc::new(Mutex::new(conn)), + media_service, + }) + } + + // Acquire a connection for executing read operations. + #[instrument(skip_all)] + async fn read(&self) -> Result { + trace!("Taking a `read` connection"); + let _timer = timer!("connection"); + + let connection = self.pool.get().await?; + + // Per https://www.sqlite.org/foreignkeys.html#fk_enable, foreign key + // support must be enabled on a per-connection basis. Execute it every + // time we try to get a connection, since we can't guarantee a previous + // connection did enable it before. + connection.execute_batch("PRAGMA foreign_keys = ON;").await?; + + Ok(connection) + } + + // Acquire a connection for executing write operations. + #[instrument(skip_all)] + async fn write(&self) -> Result> { + trace!("Taking a `write` connection"); + let _timer = timer!("connection"); + + let connection = self.write_connection.clone().lock_owned().await; + + // Per https://www.sqlite.org/foreignkeys.html#fk_enable, foreign key + // support must be enabled on a per-connection basis. Execute it every + // time we try to get a connection, since we can't guarantee a previous + // connection did enable it before. + connection.execute_batch("PRAGMA foreign_keys = ON;").await?; + + Ok(connection) + } +} + +/// Run migrations for the given version of the database. +async fn run_migrations(conn: &SqliteAsyncConn, version: u8) -> Result<()> { + if version == 0 { + debug!("Creating database"); + } else if version < DATABASE_VERSION { + debug!(version, new_version = DATABASE_VERSION, "Upgrading database"); + } else { + return Ok(()); + } + + // Always enable foreign keys for the current connection. + conn.execute_batch("PRAGMA foreign_keys = ON;").await?; + + if version < 1 { + // First turn on WAL mode, this can't be done in the transaction, it fails with + // the error message: "cannot change into wal mode from within a transaction". + conn.execute_batch("PRAGMA journal_mode = wal;").await?; + conn.with_transaction(|txn| { + txn.execute_batch(include_str!("../migrations/media_store/001_init.sql"))?; + txn.set_db_version(1) + }) + .await?; + } + + Ok(()) +} + +#[async_trait] +impl MediaStore for SqliteMediaStore { + type Error = Error; + + #[instrument(skip(self))] + async fn try_take_leased_lock( + &self, + lease_duration_ms: u32, + key: &str, + holder: &str, + ) -> Result { + let _timer = timer!("method"); + + let key = key.to_owned(); + let holder = holder.to_owned(); + + let now: u64 = MilliSecondsSinceUnixEpoch::now().get().into(); + let expiration = now + lease_duration_ms as u64; + + let num_touched = self + .write() + .await? + .with_transaction(move |txn| { + txn.execute( + "INSERT INTO lease_locks (key, holder, expiration) + VALUES (?1, ?2, ?3) + ON CONFLICT (key) + DO + UPDATE SET holder = ?2, expiration = ?3 + WHERE holder = ?2 + OR expiration < ?4 + ", + (key, holder, expiration, now), + ) + }) + .await?; + + Ok(num_touched == 1) + } + + async fn add_media_content( + &self, + request: &MediaRequestParameters, + content: Vec, + ignore_policy: IgnoreMediaRetentionPolicy, + ) -> Result<()> { + let _timer = timer!("method"); + + self.media_service.add_media_content(self, request, content, ignore_policy).await + } + + #[instrument(skip_all)] + async fn replace_media_key( + &self, + from: &MediaRequestParameters, + to: &MediaRequestParameters, + ) -> Result<(), Self::Error> { + let _timer = timer!("method"); + + let prev_uri = self.encode_key(keys::MEDIA, from.source.unique_key()); + let prev_format = self.encode_key(keys::MEDIA, from.format.unique_key()); + + let new_uri = self.encode_key(keys::MEDIA, to.source.unique_key()); + let new_format = self.encode_key(keys::MEDIA, to.format.unique_key()); + + let conn = self.write().await?; + conn.execute( + r#"UPDATE media SET uri = ?, format = ? WHERE uri = ? AND format = ?"#, + (new_uri, new_format, prev_uri, prev_format), + ) + .await?; + + Ok(()) + } + + #[instrument(skip_all)] + async fn get_media_content(&self, request: &MediaRequestParameters) -> Result>> { + let _timer = timer!("method"); + + self.media_service.get_media_content(self, request).await + } + + #[instrument(skip_all)] + async fn remove_media_content(&self, request: &MediaRequestParameters) -> Result<()> { + let _timer = timer!("method"); + + let uri = self.encode_key(keys::MEDIA, request.source.unique_key()); + let format = self.encode_key(keys::MEDIA, request.format.unique_key()); + + let conn = self.write().await?; + conn.execute("DELETE FROM media WHERE uri = ? AND format = ?", (uri, format)).await?; + + Ok(()) + } + + #[instrument(skip(self))] + async fn get_media_content_for_uri( + &self, + uri: &MxcUri, + ) -> Result>, Self::Error> { + let _timer = timer!("method"); + + self.media_service.get_media_content_for_uri(self, uri).await + } + + #[instrument(skip(self))] + async fn remove_media_content_for_uri(&self, uri: &MxcUri) -> Result<()> { + let _timer = timer!("method"); + + let uri = self.encode_key(keys::MEDIA, uri); + + let conn = self.write().await?; + conn.execute("DELETE FROM media WHERE uri = ?", (uri,)).await?; + + Ok(()) + } + + #[instrument(skip_all)] + async fn set_media_retention_policy( + &self, + policy: MediaRetentionPolicy, + ) -> Result<(), Self::Error> { + let _timer = timer!("method"); + + self.media_service.set_media_retention_policy(self, policy).await + } + + #[instrument(skip_all)] + fn media_retention_policy(&self) -> MediaRetentionPolicy { + let _timer = timer!("method"); + + self.media_service.media_retention_policy() + } + + #[instrument(skip_all)] + async fn set_ignore_media_retention_policy( + &self, + request: &MediaRequestParameters, + ignore_policy: IgnoreMediaRetentionPolicy, + ) -> Result<(), Self::Error> { + let _timer = timer!("method"); + + self.media_service.set_ignore_media_retention_policy(self, request, ignore_policy).await + } + + #[instrument(skip_all)] + async fn clean_up_media_cache(&self) -> Result<(), Self::Error> { + let _timer = timer!("method"); + + self.media_service.clean_up_media_cache(self).await + } +} + +#[cfg_attr(target_family = "wasm", async_trait(?Send))] +#[cfg_attr(not(target_family = "wasm"), async_trait)] +impl MediaStoreInner for SqliteMediaStore { + type Error = Error; + + async fn media_retention_policy_inner( + &self, + ) -> Result, Self::Error> { + let conn = self.read().await?; + conn.get_serialized_kv(keys::MEDIA_RETENTION_POLICY).await + } + + async fn set_media_retention_policy_inner( + &self, + policy: MediaRetentionPolicy, + ) -> Result<(), Self::Error> { + let conn = self.write().await?; + conn.set_serialized_kv(keys::MEDIA_RETENTION_POLICY, policy).await?; + Ok(()) + } + + async fn add_media_content_inner( + &self, + request: &MediaRequestParameters, + data: Vec, + last_access: SystemTime, + policy: MediaRetentionPolicy, + ignore_policy: IgnoreMediaRetentionPolicy, + ) -> Result<(), Self::Error> { + let ignore_policy = ignore_policy.is_yes(); + let data = self.encode_value(data)?; + + if !ignore_policy && policy.exceeds_max_file_size(data.len() as u64) { + return Ok(()); + } + + let uri = self.encode_key(keys::MEDIA, request.source.unique_key()); + let format = self.encode_key(keys::MEDIA, request.format.unique_key()); + let timestamp = time_to_timestamp(last_access); + + let conn = self.write().await?; + conn.execute( + "INSERT OR REPLACE INTO media (uri, format, data, last_access, ignore_policy) VALUES (?, ?, ?, ?, ?)", + (uri, format, data, timestamp, ignore_policy), + ) + .await?; + + Ok(()) + } + + async fn set_ignore_media_retention_policy_inner( + &self, + request: &MediaRequestParameters, + ignore_policy: IgnoreMediaRetentionPolicy, + ) -> Result<(), Self::Error> { + let uri = self.encode_key(keys::MEDIA, request.source.unique_key()); + let format = self.encode_key(keys::MEDIA, request.format.unique_key()); + let ignore_policy = ignore_policy.is_yes(); + + let conn = self.write().await?; + conn.execute( + r#"UPDATE media SET ignore_policy = ? WHERE uri = ? AND format = ?"#, + (ignore_policy, uri, format), + ) + .await?; + + Ok(()) + } + + async fn get_media_content_inner( + &self, + request: &MediaRequestParameters, + current_time: SystemTime, + ) -> Result>, Self::Error> { + let uri = self.encode_key(keys::MEDIA, request.source.unique_key()); + let format = self.encode_key(keys::MEDIA, request.format.unique_key()); + let timestamp = time_to_timestamp(current_time); + + let conn = self.write().await?; + let data = conn + .with_transaction::<_, rusqlite::Error, _>(move |txn| { + // Update the last access. + // We need to do this first so the transaction is in write mode right away. + // See: https://sqlite.org/lang_transaction.html#read_transactions_versus_write_transactions + txn.execute( + "UPDATE media SET last_access = ? WHERE uri = ? AND format = ?", + (timestamp, &uri, &format), + )?; + + txn.query_row::, _, _>( + "SELECT data FROM media WHERE uri = ? AND format = ?", + (&uri, &format), + |row| row.get(0), + ) + .optional() + }) + .await?; + + data.map(|v| self.decode_value(&v).map(Into::into)).transpose() + } + + async fn get_media_content_for_uri_inner( + &self, + uri: &MxcUri, + current_time: SystemTime, + ) -> Result>, Self::Error> { + let uri = self.encode_key(keys::MEDIA, uri); + let timestamp = time_to_timestamp(current_time); + + let conn = self.write().await?; + let data = conn + .with_transaction::<_, rusqlite::Error, _>(move |txn| { + // Update the last access. + // We need to do this first so the transaction is in write mode right away. + // See: https://sqlite.org/lang_transaction.html#read_transactions_versus_write_transactions + txn.execute("UPDATE media SET last_access = ? WHERE uri = ?", (timestamp, &uri))?; + + txn.query_row::, _, _>( + "SELECT data FROM media WHERE uri = ?", + (&uri,), + |row| row.get(0), + ) + .optional() + }) + .await?; + + data.map(|v| self.decode_value(&v).map(Into::into)).transpose() + } + + async fn clean_up_media_cache_inner( + &self, + policy: MediaRetentionPolicy, + current_time: SystemTime, + ) -> Result<(), Self::Error> { + if !policy.has_limitations() { + // We can safely skip all the checks. + return Ok(()); + } + + let conn = self.write().await?; + let removed = conn + .with_transaction::<_, Error, _>(move |txn| { + let mut removed = false; + + // First, check media content that exceed the max filesize. + if let Some(max_file_size) = policy.computed_max_file_size() { + let count = txn.execute( + "DELETE FROM media WHERE ignore_policy IS FALSE AND length(data) > ?", + (max_file_size,), + )?; + + if count > 0 { + removed = true; + } + } + + // Then, clean up expired media content. + if let Some(last_access_expiry) = policy.last_access_expiry { + let current_timestamp = time_to_timestamp(current_time); + let expiry_secs = last_access_expiry.as_secs(); + let count = txn.execute( + "DELETE FROM media WHERE ignore_policy IS FALSE AND (? - last_access) >= ?", + (current_timestamp, expiry_secs), + )?; + + if count > 0 { + removed = true; + } + } + + // Finally, if the cache size is too big, remove old items until it fits. + if let Some(max_cache_size) = policy.max_cache_size { + // i64 is the integer type used by SQLite, use it here to avoid usize overflow + // during the conversion of the result. + let cache_size = txn + .query_row( + "SELECT sum(length(data)) FROM media WHERE ignore_policy IS FALSE", + (), + |row| { + // `sum()` returns `NULL` if there are no rows. + row.get::<_, Option>(0) + }, + )? + .unwrap_or_default(); + + // If the cache size is overflowing or bigger than max cache size, clean up. + if cache_size > max_cache_size { + // Get the sizes of the media contents ordered by last access. + let mut cached_stmt = txn.prepare_cached( + "SELECT rowid, length(data) FROM media \ + WHERE ignore_policy IS FALSE ORDER BY last_access DESC", + )?; + let content_sizes = cached_stmt + .query(())? + .mapped(|row| Ok((row.get::<_, i64>(0)?, row.get::<_, u64>(1)?))); + + let mut accumulated_items_size = 0u64; + let mut limit_reached = false; + let mut rows_to_remove = Vec::new(); + + for result in content_sizes { + let (row_id, size) = match result { + Ok(content_size) => content_size, + Err(error) => { + return Err(error.into()); + } + }; + + if limit_reached { + rows_to_remove.push(row_id); + continue; + } + + match accumulated_items_size.checked_add(size) { + Some(acc) if acc > max_cache_size => { + // We can stop accumulating. + limit_reached = true; + rows_to_remove.push(row_id); + } + Some(acc) => accumulated_items_size = acc, + None => { + // The accumulated size is overflowing but the setting cannot be + // bigger than usize::MAX, we can stop accumulating. + limit_reached = true; + rows_to_remove.push(row_id); + } + } + } + + if !rows_to_remove.is_empty() { + removed = true; + } + + txn.chunk_large_query_over(rows_to_remove, None, |txn, row_ids| { + let sql_params = repeat_vars(row_ids.len()); + let query = format!("DELETE FROM media WHERE rowid IN ({sql_params})"); + txn.prepare(&query)?.execute(params_from_iter(row_ids))?; + Ok(Vec::<()>::new()) + })?; + } + } + + txn.set_serialized_kv(keys::LAST_MEDIA_CLEANUP_TIME, current_time)?; + + Ok(removed) + }) + .await?; + + // If we removed media, defragment the database and free space on the + // filesystem. + if removed { + conn.vacuum().await?; + } + + Ok(()) + } + + async fn last_media_cleanup_time_inner(&self) -> Result, Self::Error> { + let conn = self.read().await?; + conn.get_serialized_kv(keys::LAST_MEDIA_CLEANUP_TIME).await + } +} + +#[cfg(test)] +mod tests { + use std::{ + path::PathBuf, + sync::atomic::{AtomicU32, Ordering::SeqCst}, + time::Duration, + }; + + use matrix_sdk_base::{ + media::{ + store::{IgnoreMediaRetentionPolicy, MediaStore, MediaStoreError}, + MediaFormat, MediaRequestParameters, MediaThumbnailSettings, + }, + media_store_inner_integration_tests, media_store_integration_tests, + media_store_integration_tests_time, + }; + use matrix_sdk_test::async_test; + use once_cell::sync::Lazy; + use ruma::{events::room::MediaSource, media::Method, mxc_uri, uint}; + use tempfile::{tempdir, TempDir}; + + use super::SqliteMediaStore; + use crate::{utils::SqliteAsyncConnExt, SqliteStoreConfig}; + + static TMP_DIR: Lazy = Lazy::new(|| tempdir().unwrap()); + static NUM: AtomicU32 = AtomicU32::new(0); + + fn new_media_store_workspace() -> PathBuf { + let name = NUM.fetch_add(1, SeqCst).to_string(); + TMP_DIR.path().join(name) + } + + async fn get_media_store() -> Result { + let tmpdir_path = new_media_store_workspace(); + + tracing::info!("using event cache store @ {}", tmpdir_path.to_str().unwrap()); + + Ok(SqliteMediaStore::open(tmpdir_path.to_str().unwrap(), None).await.unwrap()) + } + + media_store_integration_tests!(); + media_store_integration_tests_time!(); + media_store_inner_integration_tests!(); + + async fn get_media_store_content_sorted_by_last_access( + media_store: &SqliteMediaStore, + ) -> Vec> { + let sqlite_db = media_store.read().await.expect("accessing sqlite db failed"); + sqlite_db + .prepare("SELECT data FROM media ORDER BY last_access DESC", |mut stmt| { + stmt.query(())?.mapped(|row| row.get(0)).collect() + }) + .await + .expect("querying media cache content by last access failed") + } + + #[async_test] + async fn test_pool_size() { + let tmpdir_path = new_media_store_workspace(); + let store_open_config = SqliteStoreConfig::new(tmpdir_path).pool_max_size(42); + + let store = SqliteMediaStore::open_with_config(store_open_config).await.unwrap(); + + assert_eq!(store.pool.status().max_size, 42); + } + + #[async_test] + async fn test_last_access() { + let media_store = get_media_store().await.expect("creating media cache failed"); + let uri = mxc_uri!("mxc://localhost/media"); + let file_request = MediaRequestParameters { + source: MediaSource::Plain(uri.to_owned()), + format: MediaFormat::File, + }; + let thumbnail_request = MediaRequestParameters { + source: MediaSource::Plain(uri.to_owned()), + format: MediaFormat::Thumbnail(MediaThumbnailSettings::with_method( + Method::Crop, + uint!(100), + uint!(100), + )), + }; + + let content: Vec = "hello world".into(); + let thumbnail_content: Vec = "hello…".into(); + + // Add the media. + media_store + .add_media_content(&file_request, content.clone(), IgnoreMediaRetentionPolicy::No) + .await + .expect("adding file failed"); + + // Since the precision of the timestamp is in seconds, wait so the timestamps + // differ. + tokio::time::sleep(Duration::from_secs(3)).await; + + media_store + .add_media_content( + &thumbnail_request, + thumbnail_content.clone(), + IgnoreMediaRetentionPolicy::No, + ) + .await + .expect("adding thumbnail failed"); + + // File's last access is older than thumbnail. + let contents = get_media_store_content_sorted_by_last_access(&media_store).await; + + assert_eq!(contents.len(), 2, "media cache contents length is wrong"); + assert_eq!(contents[0], thumbnail_content, "thumbnail is not last access"); + assert_eq!(contents[1], content, "file is not second-to-last access"); + + // Since the precision of the timestamp is in seconds, wait so the timestamps + // differ. + tokio::time::sleep(Duration::from_secs(3)).await; + + // Access the file so its last access is more recent. + let _ = media_store + .get_media_content(&file_request) + .await + .expect("getting file failed") + .expect("file is missing"); + + // File's last access is more recent than thumbnail. + let contents = get_media_store_content_sorted_by_last_access(&media_store).await; + + assert_eq!(contents.len(), 2, "media cache contents length is wrong"); + assert_eq!(contents[0], content, "file is not last access"); + assert_eq!(contents[1], thumbnail_content, "thumbnail is not second-to-last access"); + } +} + +#[cfg(test)] +mod encrypted_tests { + use std::sync::atomic::{AtomicU32, Ordering::SeqCst}; + + use matrix_sdk_base::{ + media::store::MediaStoreError, media_store_inner_integration_tests, + media_store_integration_tests, media_store_integration_tests_time, + }; + use once_cell::sync::Lazy; + use tempfile::{tempdir, TempDir}; + + use super::SqliteMediaStore; + + static TMP_DIR: Lazy = Lazy::new(|| tempdir().unwrap()); + static NUM: AtomicU32 = AtomicU32::new(0); + + async fn get_media_store() -> Result { + let name = NUM.fetch_add(1, SeqCst).to_string(); + let tmpdir_path = TMP_DIR.path().join(name); + + tracing::info!("using event cache store @ {}", tmpdir_path.to_str().unwrap()); + + Ok(SqliteMediaStore::open(tmpdir_path.to_str().unwrap(), Some("default_test_password")) + .await + .unwrap()) + } + + media_store_integration_tests!(); + media_store_integration_tests_time!(); + media_store_inner_integration_tests!(); +} diff --git a/crates/matrix-sdk/src/client/builder/mod.rs b/crates/matrix-sdk/src/client/builder/mod.rs index abeb36c150b..409b5f5c54d 100644 --- a/crates/matrix-sdk/src/client/builder/mod.rs +++ b/crates/matrix-sdk/src/client/builder/mod.rs @@ -668,11 +668,20 @@ async fn build_store_config( .event_cache_store({ let mut config = config.clone(); - if let Some(cache_path) = cache_path { + if let Some(ref cache_path) = cache_path { config = config.path(cache_path); } matrix_sdk_sqlite::SqliteEventCacheStore::open_with_config(config).await? + }) + .media_store({ + let mut config = config.clone(); + + if let Some(ref cache_path) = cache_path { + config = config.path(cache_path); + } + + matrix_sdk_sqlite::SqliteMediaStore::open_with_config(config).await? }); #[cfg(feature = "e2e-encryption")] diff --git a/crates/matrix-sdk/src/client/mod.rs b/crates/matrix-sdk/src/client/mod.rs index e7c82f821a6..5e333b21687 100644 --- a/crates/matrix-sdk/src/client/mod.rs +++ b/crates/matrix-sdk/src/client/mod.rs @@ -32,6 +32,7 @@ use futures_util::StreamExt; use matrix_sdk_base::crypto::{store::LockableCryptoStore, DecryptionSettings}; use matrix_sdk_base::{ event_cache::store::EventCacheStoreLock, + media::store::MediaStoreLock, store::{DynStateStore, RoomLoadSettings, ServerInfo, WellKnownResponse}, sync::{Notification, RoomUpdates}, BaseClient, RoomInfoNotableUpdate, RoomState, RoomStateFilter, SendOutsideWasm, SessionMeta, @@ -741,6 +742,11 @@ impl Client { self.base_client().event_cache_store() } + /// Get a reference to the media store. + pub fn media_store(&self) -> &MediaStoreLock { + self.base_client().media_store() + } + /// Access the native Matrix authentication API with this client. pub fn matrix_auth(&self) -> MatrixAuth { MatrixAuth::new(self.clone()) diff --git a/crates/matrix-sdk/src/error.rs b/crates/matrix-sdk/src/error.rs index 34a05236bfc..3a9f9185023 100644 --- a/crates/matrix-sdk/src/error.rs +++ b/crates/matrix-sdk/src/error.rs @@ -25,8 +25,8 @@ use matrix_sdk_base::crypto::{ CryptoStoreError, DecryptorError, KeyExportError, MegolmError, OlmError, }; use matrix_sdk_base::{ - event_cache::store::EventCacheStoreError, Error as SdkBaseError, QueueWedgeError, RoomState, - StoreError, + event_cache::store::EventCacheStoreError, media::store::MediaStoreError, Error as SdkBaseError, + QueueWedgeError, RoomState, StoreError, }; use reqwest::Error as ReqwestError; use ruma::{ @@ -340,6 +340,10 @@ pub enum Error { #[error(transparent)] EventCacheStore(Box), + /// An error occurred in the media store. + #[error(transparent)] + MediaStore(Box), + /// An error encountered when trying to parse an identifier. #[error(transparent)] Identifier(#[from] IdParseError), @@ -507,6 +511,12 @@ impl From for Error { } } +impl From for Error { + fn from(error: MediaStoreError) -> Self { + Error::MediaStore(Box::new(error)) + } +} + #[cfg(feature = "qrcode")] impl From for Error { fn from(error: ScanError) -> Self { diff --git a/crates/matrix-sdk/src/lib.rs b/crates/matrix-sdk/src/lib.rs index 65dc912defd..aa01fdbc534 100644 --- a/crates/matrix-sdk/src/lib.rs +++ b/crates/matrix-sdk/src/lib.rs @@ -82,7 +82,8 @@ pub use http_client::TransmissionProgress; pub use matrix_sdk_sqlite::SqliteCryptoStore; #[cfg(feature = "sqlite")] pub use matrix_sdk_sqlite::{ - SqliteEventCacheStore, SqliteStateStore, SqliteStoreConfig, STATE_STORE_DATABASE_NAME, + SqliteEventCacheStore, SqliteMediaStore, SqliteStateStore, SqliteStoreConfig, + STATE_STORE_DATABASE_NAME, }; pub use media::Media; pub use pusher::Pusher; diff --git a/crates/matrix-sdk/src/media.rs b/crates/matrix-sdk/src/media.rs index 6bb41831bb6..b4016e3ed49 100644 --- a/crates/matrix-sdk/src/media.rs +++ b/crates/matrix-sdk/src/media.rs @@ -23,8 +23,8 @@ use std::{fmt, fs::File, path::Path}; use eyeball::SharedObservable; use futures_util::future::try_join; -use matrix_sdk_base::event_cache::store::media::IgnoreMediaRetentionPolicy; -pub use matrix_sdk_base::{event_cache::store::media::MediaRetentionPolicy, media::*}; +use matrix_sdk_base::media::store::IgnoreMediaRetentionPolicy; +pub use matrix_sdk_base::media::{store::MediaRetentionPolicy, *}; use mime::Mime; use ruma::{ api::{ @@ -428,7 +428,7 @@ impl Media { // Read from the cache. if use_cache { if let Some(content) = - self.client.event_cache_store().lock().await?.get_media_content(request).await? + self.client.media_store().lock().await?.get_media_content(request).await? { return Ok(content); } @@ -520,7 +520,7 @@ impl Media { if use_cache { self.client - .event_cache_store() + .media_store() .lock() .await? .add_media_content(request, content.clone(), IgnoreMediaRetentionPolicy::No) @@ -538,7 +538,7 @@ impl Media { async fn get_local_media_content(&self, uri: &MxcUri) -> Result> { // Read from the cache. self.client - .event_cache_store() + .media_store() .lock() .await? .get_media_content_for_uri(uri) @@ -552,7 +552,7 @@ impl Media { /// /// * `request` - The `MediaRequest` of the content. pub async fn remove_media_content(&self, request: &MediaRequestParameters) -> Result<()> { - Ok(self.client.event_cache_store().lock().await?.remove_media_content(request).await?) + Ok(self.client.media_store().lock().await?.remove_media_content(request).await?) } /// Delete all the media content corresponding to the given @@ -562,7 +562,7 @@ impl Media { /// /// * `uri` - The `MxcUri` of the files. pub async fn remove_media_content_for_uri(&self, uri: &MxcUri) -> Result<()> { - Ok(self.client.event_cache_store().lock().await?.remove_media_content_for_uri(uri).await?) + Ok(self.client.media_store().lock().await?.remove_media_content_for_uri(uri).await?) } /// Get the file of the given media event content. @@ -697,20 +697,20 @@ impl Media { /// /// * `policy` - The `MediaRetentionPolicy` to use. pub async fn set_media_retention_policy(&self, policy: MediaRetentionPolicy) -> Result<()> { - self.client.event_cache_store().lock().await?.set_media_retention_policy(policy).await?; + self.client.media_store().lock().await?.set_media_retention_policy(policy).await?; Ok(()) } /// Get the current `MediaRetentionPolicy`. pub async fn media_retention_policy(&self) -> Result { - Ok(self.client.event_cache_store().lock().await?.media_retention_policy()) + Ok(self.client.media_store().lock().await?.media_retention_policy()) } /// Clean up the media cache with the current [`MediaRetentionPolicy`]. /// /// If there is already an ongoing cleanup, this is a noop. pub async fn clean_up_media_cache(&self) -> Result<()> { - self.client.event_cache_store().lock().await?.clean_up_media_cache().await?; + self.client.media_store().lock().await?.clean_up_media_cache().await?; Ok(()) } diff --git a/crates/matrix-sdk/src/room/mod.rs b/crates/matrix-sdk/src/room/mod.rs index b04edce7341..9e82d33c008 100644 --- a/crates/matrix-sdk/src/room/mod.rs +++ b/crates/matrix-sdk/src/room/mod.rs @@ -43,8 +43,7 @@ use matrix_sdk_base::{ deserialized_responses::{ RawAnySyncOrStrippedState, RawSyncOrStrippedState, SyncOrStrippedState, }, - event_cache::store::media::IgnoreMediaRetentionPolicy, - media::MediaThumbnailSettings, + media::{store::IgnoreMediaRetentionPolicy, MediaThumbnailSettings}, store::{StateStoreExt, ThreadSubscriptionStatus}, ComposerDraft, EncryptionState, RoomInfoNotableUpdateReasons, RoomMemberships, SendOutsideWasm, StateChanges, StateStoreDataKey, StateStoreDataValue, @@ -2582,7 +2581,7 @@ impl Room { .await?; if store_in_cache { - let cache_store_lock_guard = self.client.event_cache_store().lock().await?; + let media_store_lock_guard = self.client.media_store().lock().await?; // A failure to cache shouldn't prevent the whole upload from finishing // properly, so only log errors during caching. @@ -2591,7 +2590,7 @@ impl Room { let request = MediaRequestParameters { source: media_source.clone(), format: MediaFormat::File }; - if let Err(err) = cache_store_lock_guard + if let Err(err) = media_store_lock_guard .add_media_content(&request, data, IgnoreMediaRetentionPolicy::No) .await { @@ -2608,7 +2607,7 @@ impl Room { format: MediaFormat::Thumbnail(MediaThumbnailSettings::new(width, height)), }; - if let Err(err) = cache_store_lock_guard + if let Err(err) = media_store_lock_guard .add_media_content(&request, data, IgnoreMediaRetentionPolicy::No) .await { diff --git a/crates/matrix-sdk/src/send_queue/mod.rs b/crates/matrix-sdk/src/send_queue/mod.rs index d7146031041..03eef775eb8 100644 --- a/crates/matrix-sdk/src/send_queue/mod.rs +++ b/crates/matrix-sdk/src/send_queue/mod.rs @@ -142,7 +142,7 @@ use eyeball::SharedObservable; use matrix_sdk_base::store::FinishGalleryItemInfo; use matrix_sdk_base::{ event_cache::store::EventCacheStoreError, - media::MediaRequestParameters, + media::{store::MediaStoreError, MediaRequestParameters}, store::{ ChildTransactionId, DependentQueuedRequest, DependentQueuedRequestKind, DynStateStore, FinishUploadThumbnailInfo, QueueWedgeError, QueuedRequest, QueuedRequestKind, @@ -842,7 +842,7 @@ impl RoomSendQueue { let fut = async move { let data = room .client() - .event_cache_store() + .media_store() .lock() .await? .get_media_content(&cache_key) @@ -2348,6 +2348,10 @@ pub enum RoomSendQueueStorageError { #[error(transparent)] EventCacheStoreError(#[from] EventCacheStoreError), + /// Error caused by the event cache store. + #[error(transparent)] + MediaStoreError(#[from] MediaStoreError), + /// Error caused when attempting to get a handle on the event cache store. #[error(transparent)] LockError(#[from] LockStoreError), diff --git a/crates/matrix-sdk/src/send_queue/progress.rs b/crates/matrix-sdk/src/send_queue/progress.rs index 72fa1b900bc..11a0de2fab0 100644 --- a/crates/matrix-sdk/src/send_queue/progress.rs +++ b/crates/matrix-sdk/src/send_queue/progress.rs @@ -94,17 +94,17 @@ impl RoomSendQueue { }; // Get the size of the file being uploaded from the event cache. - let bytes = match room.client().event_cache_store().lock().await { + let bytes = match room.client().media_store().lock().await { Ok(cache) => match cache.get_media_content(cache_key).await { Ok(Some(content)) => content.len(), Ok(None) => 0, Err(err) => { - warn!("error when reading media content from cache store: {err}"); + warn!("error when reading media content from media store: {err}"); 0 } }, Err(err) => { - warn!("couldn't acquire cache store lock: {err}"); + warn!("couldn't acquire media store lock: {err}"); 0 } }; @@ -195,9 +195,9 @@ impl RoomSendQueue { return Ok(None); } - let cache_store_guard = client.event_cache_store().lock().await?; + let media_store_guard = client.media_store().lock().await?; - let maybe_content = cache_store_guard.get_media_content(&cache_key).await?; + let maybe_content = media_store_guard.get_media_content(&cache_key).await?; Ok(maybe_content.map(|c| c.len())) } diff --git a/crates/matrix-sdk/src/send_queue/upload.rs b/crates/matrix-sdk/src/send_queue/upload.rs index c12b5d496bb..30ec4ca0099 100644 --- a/crates/matrix-sdk/src/send_queue/upload.rs +++ b/crates/matrix-sdk/src/send_queue/upload.rs @@ -17,20 +17,19 @@ #[cfg(feature = "unstable-msc4274")] use std::{collections::HashMap, iter::zip}; +#[cfg(feature = "unstable-msc4274")] +use matrix_sdk_base::{ + media::UniqueKey, + store::{AccumulatedSentMediaInfo, FinishGalleryItemInfo}, +}; use matrix_sdk_base::{ - event_cache::store::media::IgnoreMediaRetentionPolicy, - media::{MediaFormat, MediaRequestParameters}, + media::{store::IgnoreMediaRetentionPolicy, MediaFormat, MediaRequestParameters}, store::{ ChildTransactionId, DependentQueuedRequestKind, FinishUploadThumbnailInfo, QueuedRequestKind, SentMediaInfo, SentRequestKey, SerializableEventContent, }, RoomState, }; -#[cfg(feature = "unstable-msc4274")] -use matrix_sdk_base::{ - media::UniqueKey, - store::{AccumulatedSentMediaInfo, FinishGalleryItemInfo}, -}; use mime::Mime; #[cfg(feature = "unstable-msc4274")] use ruma::events::room::message::{GalleryItemType, GalleryMessageEventContent}; @@ -420,14 +419,11 @@ impl RoomSendQueue { file_media_request: &MediaRequestParameters, ) -> Result { let client = room.client(); - let cache_store = client - .event_cache_store() - .lock() - .await - .map_err(RoomSendQueueStorageError::LockError)?; + let media_store = + client.media_store().lock().await.map_err(RoomSendQueueStorageError::LockError)?; // Cache the file itself in the cache store. - cache_store + media_store .add_media_content( file_media_request, data, @@ -435,7 +431,7 @@ impl RoomSendQueue { IgnoreMediaRetentionPolicy::Yes, ) .await - .map_err(RoomSendQueueStorageError::EventCacheStoreError)?; + .map_err(RoomSendQueueStorageError::MediaStoreError)?; // Process the thumbnail, if it's been provided. if let Some(thumbnail) = thumbnail { @@ -449,7 +445,7 @@ impl RoomSendQueue { // Cache thumbnail in the cache store. let thumbnail_media_request = Media::make_local_file_media_request(&txn); - cache_store + media_store .add_media_content( &thumbnail_media_request, data, @@ -457,7 +453,7 @@ impl RoomSendQueue { IgnoreMediaRetentionPolicy::Yes, ) .await - .map_err(RoomSendQueueStorageError::EventCacheStoreError)?; + .map_err(RoomSendQueueStorageError::MediaStoreError)?; Ok(MediaCacheResult { upload_thumbnail_txn: Some(txn.clone()), @@ -793,12 +789,12 @@ impl QueueStorage { // At this point, all the requests and dependent requests have been cleaned up. // Perform the final step: empty the cache from the local items. { - let event_cache = client.event_cache_store().lock().await?; - event_cache + let media_store = client.media_store().lock().await?; + media_store .remove_media_content_for_uri(&Media::make_local_uri(&handles.upload_file_txn)) .await?; if let Some(txn) = &handles.upload_thumbnail_txn { - event_cache.remove_media_content_for_uri(&Media::make_local_uri(txn)).await?; + media_store.remove_media_content_for_uri(&Media::make_local_uri(txn)).await?; } } @@ -938,22 +934,22 @@ async fn update_media_cache_keys_after_upload( let from_req = Media::make_local_file_media_request(file_upload_txn); trace!(from = ?from_req.source, to = ?sent_media.file, "renaming media file key in cache store"); - let cache_store = - client.event_cache_store().lock().await.map_err(RoomSendQueueStorageError::LockError)?; + let media_store = + client.media_store().lock().await.map_err(RoomSendQueueStorageError::LockError)?; // The media can now be removed during cleanups. - cache_store + media_store .set_ignore_media_retention_policy(&from_req, IgnoreMediaRetentionPolicy::No) .await - .map_err(RoomSendQueueStorageError::EventCacheStoreError)?; + .map_err(RoomSendQueueStorageError::MediaStoreError)?; - cache_store + media_store .replace_media_key( &from_req, &MediaRequestParameters { source: sent_media.file.clone(), format: MediaFormat::File }, ) .await - .map_err(RoomSendQueueStorageError::EventCacheStoreError)?; + .map_err(RoomSendQueueStorageError::MediaStoreError)?; // Rename the thumbnail too, if needs be. if let Some((info, new_source)) = thumbnail_info.as_ref().zip(sent_media.thumbnail.clone()) { @@ -968,18 +964,18 @@ async fn update_media_cache_keys_after_upload( trace!(from = ?from_req.source, to = ?new_source, "renaming thumbnail file key in cache store"); // The media can now be removed during cleanups. - cache_store + media_store .set_ignore_media_retention_policy(&from_req, IgnoreMediaRetentionPolicy::No) .await - .map_err(RoomSendQueueStorageError::EventCacheStoreError)?; + .map_err(RoomSendQueueStorageError::MediaStoreError)?; - cache_store + media_store .replace_media_key( &from_req, &MediaRequestParameters { source: new_source, format: MediaFormat::File }, ) .await - .map_err(RoomSendQueueStorageError::EventCacheStoreError)?; + .map_err(RoomSendQueueStorageError::MediaStoreError)?; } Ok(())