diff --git a/Cargo.lock b/Cargo.lock index 209543318..37c9969e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1559,7 +1559,6 @@ dependencies = [ "prometheus", "r2d2", "r2d2_postgres", - "r2d2_sqlite", "rand 0.8.5", "rayon", "regex", @@ -4406,17 +4405,6 @@ dependencies = [ "r2d2", ] -[[package]] -name = "r2d2_sqlite" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99f31323d6161385f385046738df520e0e8694fa74852d35891fc0be08348ddc" -dependencies = [ - "r2d2", - "rusqlite", - "uuid", -] - [[package]] name = "rand" version = "0.7.3" @@ -6355,7 +6343,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" dependencies = [ "getrandom 0.2.10", - "rand 0.8.5", "serde", ] diff --git a/Cargo.toml b/Cargo.toml index a01b4a720..3de9acf1d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,7 +40,6 @@ semver = { version = "1.0.4", features = ["serde"] } slug = "0.1.1" r2d2 = "0.8" r2d2_postgres = "0.18" -r2d2_sqlite = "0.22.0" sqlx = { version = "0.7", features = [ "runtime-tokio", "postgres", "chrono" ] } url = { version = "2.1.1", features = ["serde"] } docsrs-metadata = { path = "crates/metadata" } diff --git a/src/config.rs b/src/config.rs index b0e63b63a..a773a8302 100644 --- a/src/config.rs +++ b/src/config.rs @@ -15,9 +15,6 @@ pub struct Config { pub(crate) max_pool_size: u32, pub(crate) min_pool_idle: u32, - // local pool for sqlite connections - pub(crate) max_sqlite_pool_size: u64, - // Storage params pub(crate) storage_backend: StorageKind, @@ -143,7 +140,6 @@ impl Config { database_url: require_env("DOCSRS_DATABASE_URL")?, max_pool_size: env("DOCSRS_MAX_POOL_SIZE", 90)?, - max_sqlite_pool_size: env("DOCSRS_MAX_SQLITE_POOL_SIZE", 500)?, min_pool_idle: env("DOCSRS_MIN_POOL_IDLE", 10)?, storage_backend: env("DOCSRS_STORAGE_BACKEND", StorageKind::Database)?, diff --git a/src/storage/archive_index.rs b/src/storage/archive_index.rs index 9cd2ee96f..cb4b7dc91 100644 --- a/src/storage/archive_index.rs +++ b/src/storage/archive_index.rs @@ -1,11 +1,9 @@ use crate::error::Result; use crate::storage::{compression::CompressionAlgorithm, FileRange}; use anyhow::{bail, Context as _}; -use rusqlite::{Connection, OptionalExtension}; +use rusqlite::{Connection, OpenFlags, OptionalExtension}; use std::{fs, io, path::Path}; -use super::sqlite_pool::SqliteConnectionPool; - #[derive(PartialEq, Eq, Debug)] pub(crate) struct FileInfo { range: FileRange, @@ -77,8 +75,8 @@ pub(crate) fn create>( fn find_in_sqlite_index(conn: &Connection, search_for: &str) -> Result> { let mut stmt = conn.prepare( " - SELECT start, end, compression - FROM files + SELECT start, end, compression + FROM files WHERE path = ? ", )?; @@ -104,11 +102,12 @@ fn find_in_sqlite_index(conn: &Connection, search_for: &str) -> Result>( archive_index_path: P, search_for: &str, - pool: &SqliteConnectionPool, ) -> Result> { - pool.with_connection(archive_index_path, |connection| { - find_in_sqlite_index(connection, search_for) - }) + let connection = Connection::open_with_flags( + archive_index_path, + OpenFlags::SQLITE_OPEN_READ_ONLY | OpenFlags::SQLITE_OPEN_NO_MUTEX, + )?; + find_in_sqlite_index(&connection, search_for) } #[cfg(test)] @@ -141,19 +140,13 @@ mod tests { let tempfile = tempfile::NamedTempFile::new().unwrap().into_temp_path(); create(&mut tf, &tempfile).unwrap(); - let fi = find_in_file(&tempfile, "testfile1", &SqliteConnectionPool::default()) - .unwrap() - .unwrap(); + let fi = find_in_file(&tempfile, "testfile1").unwrap().unwrap(); assert_eq!(fi.range, FileRange::new(39, 459)); assert_eq!(fi.compression, CompressionAlgorithm::Bzip2); - assert!(find_in_file( - &tempfile, - "some_other_file", - &SqliteConnectionPool::default(), - ) - .unwrap() - .is_none()); + assert!(find_in_file(&tempfile, "some_other_file",) + .unwrap() + .is_none()); } } diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 4da93cfdb..0a833450f 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -2,12 +2,10 @@ mod archive_index; mod compression; mod database; mod s3; -mod sqlite_pool; pub use self::compression::{compress, decompress, CompressionAlgorithm, CompressionAlgorithms}; use self::database::DatabaseBackend; use self::s3::S3Backend; -use self::sqlite_pool::SqliteConnectionPool; use crate::error::Result; use crate::web::metrics::RenderingTimesRecorder; use crate::{db::Pool, utils::spawn_blocking, Config, InstanceMetrics}; @@ -16,7 +14,6 @@ use chrono::{DateTime, Utc}; use fn_error_context::context; use path_slash::PathExt; use std::io::BufReader; -use std::num::NonZeroU64; use std::{ collections::{HashMap, HashSet}, ffi::OsStr, @@ -114,7 +111,6 @@ enum StorageBackend { pub struct AsyncStorage { backend: StorageBackend, config: Arc, - sqlite_pool: Arc, } impl AsyncStorage { @@ -124,10 +120,6 @@ impl AsyncStorage { config: Arc, ) -> Result { Ok(Self { - sqlite_pool: Arc::new(SqliteConnectionPool::new( - NonZeroU64::new(config.max_sqlite_pool_size) - .ok_or_else(|| anyhow!("invalid sqlite pool size"))?, - )), config: config.clone(), backend: match config.storage_backend { StorageKind::Database => { @@ -239,10 +231,9 @@ impl AsyncStorage { pub(crate) async fn exists_in_archive(&self, archive_path: &str, path: &str) -> Result { match self.get_index_filename(archive_path).await { Ok(index_filename) => Ok({ - let sqlite_pool = self.sqlite_pool.clone(); let path = path.to_owned(); spawn_blocking(move || { - Ok(archive_index::find_in_file(index_filename, &path, &sqlite_pool)?.is_some()) + Ok(archive_index::find_in_file(index_filename, &path)?.is_some()) }) .await? }), @@ -333,10 +324,8 @@ impl AsyncStorage { } let index_filename = self.get_index_filename(archive_path).await?; let info = { - let sqlite_pool = self.sqlite_pool.clone(); let path = path.to_owned(); - spawn_blocking(move || archive_index::find_in_file(index_filename, &path, &sqlite_pool)) - .await + spawn_blocking(move || archive_index::find_in_file(index_filename, &path)).await }? .ok_or(PathNotFoundError)?; diff --git a/src/storage/sqlite_pool.rs b/src/storage/sqlite_pool.rs deleted file mode 100644 index e0e4c3104..000000000 --- a/src/storage/sqlite_pool.rs +++ /dev/null @@ -1,102 +0,0 @@ -use anyhow::Result; -use moka::sync::Cache; -use r2d2_sqlite::SqliteConnectionManager; -use rusqlite::{Connection, OpenFlags}; -use std::{ - num::NonZeroU64, - path::{Path, PathBuf}, - time::Duration, -}; - -static MAX_IDLE_TIME: Duration = Duration::from_secs(10 * 60); -static MAX_LIFE_TIME: Duration = Duration::from_secs(60 * 60); - -/// SQLite connection pool. -/// -/// Typical connection pools handle many connections to a single database, -/// while this one handles some connections to many databases. -/// -/// The more connections we keep alive, the more open files we have, -/// so you might need to tweak this limit based on the max open files -/// on your system. -/// -/// We open the databases in readonly mode. -/// We are using an additional connection pool per database to parallel requests -/// can be efficiently answered. Because of this the actual max connection count -/// might be higher than the given max_connections. -/// -/// We keep at minimum of one connection per database, for one hour. -/// Any additional connections will be dropped after 10 minutes of inactivity. -/// -/// * `max_databases` is the maximum amout of databases in the pool. -/// * for each of the databases, we manage a pool of 1-10 connections -#[derive(Clone)] -pub(crate) struct SqliteConnectionPool { - pools: Cache>, -} - -impl Default for SqliteConnectionPool { - fn default() -> Self { - Self::new(NonZeroU64::new(10).unwrap()) - } -} - -impl SqliteConnectionPool { - pub(crate) fn new(max_databases: NonZeroU64) -> Self { - Self { - pools: Cache::builder() - .max_capacity(max_databases.get()) - .time_to_idle(MAX_LIFE_TIME) - .build(), - } - } - - pub(crate) fn with_connection, F: Fn(&Connection) -> Result>( - &self, - path: P, - f: F, - ) -> Result { - let path = path.as_ref().to_owned(); - - let pool = self - .pools - .entry(path.clone()) - .or_insert_with(|| { - let manager = SqliteConnectionManager::file(path) - .with_flags(OpenFlags::SQLITE_OPEN_READ_ONLY | OpenFlags::SQLITE_OPEN_NO_MUTEX); - r2d2::Pool::builder() - .min_idle(Some(1)) - .max_lifetime(Some(MAX_LIFE_TIME)) - .idle_timeout(Some(MAX_IDLE_TIME)) - .max_size(10) - .build_unchecked(manager) - }) - .into_value(); - - let conn = pool.get()?; - f(&conn) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_simple_connection() { - let filename = tempfile::NamedTempFile::new().unwrap().into_temp_path(); - rusqlite::Connection::open(&filename).unwrap(); - - let pool = SqliteConnectionPool::new(NonZeroU64::new(1).unwrap()); - - pool.with_connection(&filename, |conn| { - conn.query_row("SELECT 1", [], |row| { - assert_eq!(row.get::<_, i32>(0).unwrap(), 1); - Ok(()) - }) - .unwrap(); - Ok(()) - }) - .unwrap(); - } -}