Skip to content

Commit

Permalink
remove archive index sqlite connection pool for simplicity
Browse files Browse the repository at this point in the history
  • Loading branch information
syphar committed Oct 14, 2023
1 parent 3f50d6b commit 21968db
Show file tree
Hide file tree
Showing 6 changed files with 14 additions and 152 deletions.
13 changes: 0 additions & 13 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 0 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ semver = { version = "1.0.4", features = ["serde"] }
slug = "0.1.1"
r2d2 = "0.8"
r2d2_postgres = "0.18"
r2d2_sqlite = "0.22.0"
sqlx = { version = "0.7", features = [ "runtime-tokio", "postgres", "chrono" ] }
url = { version = "2.1.1", features = ["serde"] }
docsrs-metadata = { path = "crates/metadata" }
Expand Down
4 changes: 0 additions & 4 deletions src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,6 @@ pub struct Config {
pub(crate) max_pool_size: u32,
pub(crate) min_pool_idle: u32,

// local pool for sqlite connections
pub(crate) max_sqlite_pool_size: u64,

// Storage params
pub(crate) storage_backend: StorageKind,

Expand Down Expand Up @@ -143,7 +140,6 @@ impl Config {

database_url: require_env("DOCSRS_DATABASE_URL")?,
max_pool_size: env("DOCSRS_MAX_POOL_SIZE", 90)?,
max_sqlite_pool_size: env("DOCSRS_MAX_SQLITE_POOL_SIZE", 500)?,
min_pool_idle: env("DOCSRS_MIN_POOL_IDLE", 10)?,

storage_backend: env("DOCSRS_STORAGE_BACKEND", StorageKind::Database)?,
Expand Down
31 changes: 12 additions & 19 deletions src/storage/archive_index.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,9 @@
use crate::error::Result;
use crate::storage::{compression::CompressionAlgorithm, FileRange};
use anyhow::{bail, Context as _};
use rusqlite::{Connection, OptionalExtension};
use rusqlite::{Connection, OpenFlags, OptionalExtension};
use std::{fs, io, path::Path};

use super::sqlite_pool::SqliteConnectionPool;

#[derive(PartialEq, Eq, Debug)]
pub(crate) struct FileInfo {
range: FileRange,
Expand Down Expand Up @@ -77,8 +75,8 @@ pub(crate) fn create<R: io::Read + io::Seek, P: AsRef<Path>>(
fn find_in_sqlite_index(conn: &Connection, search_for: &str) -> Result<Option<FileInfo>> {
let mut stmt = conn.prepare(
"
SELECT start, end, compression
FROM files
SELECT start, end, compression
FROM files
WHERE path = ?
",
)?;
Expand All @@ -104,11 +102,12 @@ fn find_in_sqlite_index(conn: &Connection, search_for: &str) -> Result<Option<Fi
pub(crate) fn find_in_file<P: AsRef<Path>>(
archive_index_path: P,
search_for: &str,
pool: &SqliteConnectionPool,
) -> Result<Option<FileInfo>> {
pool.with_connection(archive_index_path, |connection| {
find_in_sqlite_index(connection, search_for)
})
let connection = Connection::open_with_flags(
archive_index_path,
OpenFlags::SQLITE_OPEN_READ_ONLY | OpenFlags::SQLITE_OPEN_NO_MUTEX,
)?;
find_in_sqlite_index(&connection, search_for)
}

#[cfg(test)]
Expand Down Expand Up @@ -141,19 +140,13 @@ mod tests {
let tempfile = tempfile::NamedTempFile::new().unwrap().into_temp_path();
create(&mut tf, &tempfile).unwrap();

let fi = find_in_file(&tempfile, "testfile1", &SqliteConnectionPool::default())
.unwrap()
.unwrap();
let fi = find_in_file(&tempfile, "testfile1").unwrap().unwrap();

assert_eq!(fi.range, FileRange::new(39, 459));
assert_eq!(fi.compression, CompressionAlgorithm::Bzip2);

assert!(find_in_file(
&tempfile,
"some_other_file",
&SqliteConnectionPool::default(),
)
.unwrap()
.is_none());
assert!(find_in_file(&tempfile, "some_other_file",)
.unwrap()
.is_none());
}
}
15 changes: 2 additions & 13 deletions src/storage/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,10 @@ mod archive_index;
mod compression;
mod database;
mod s3;
mod sqlite_pool;

pub use self::compression::{compress, decompress, CompressionAlgorithm, CompressionAlgorithms};
use self::database::DatabaseBackend;
use self::s3::S3Backend;
use self::sqlite_pool::SqliteConnectionPool;
use crate::error::Result;
use crate::web::metrics::RenderingTimesRecorder;
use crate::{db::Pool, utils::spawn_blocking, Config, InstanceMetrics};
Expand All @@ -16,7 +14,6 @@ use chrono::{DateTime, Utc};
use fn_error_context::context;
use path_slash::PathExt;
use std::io::BufReader;
use std::num::NonZeroU64;
use std::{
collections::{HashMap, HashSet},
ffi::OsStr,
Expand Down Expand Up @@ -114,7 +111,6 @@ enum StorageBackend {
pub struct AsyncStorage {
backend: StorageBackend,
config: Arc<Config>,
sqlite_pool: Arc<SqliteConnectionPool>,
}

impl AsyncStorage {
Expand All @@ -124,10 +120,6 @@ impl AsyncStorage {
config: Arc<Config>,
) -> Result<Self> {
Ok(Self {
sqlite_pool: Arc::new(SqliteConnectionPool::new(
NonZeroU64::new(config.max_sqlite_pool_size)
.ok_or_else(|| anyhow!("invalid sqlite pool size"))?,
)),
config: config.clone(),
backend: match config.storage_backend {
StorageKind::Database => {
Expand Down Expand Up @@ -239,10 +231,9 @@ impl AsyncStorage {
pub(crate) async fn exists_in_archive(&self, archive_path: &str, path: &str) -> Result<bool> {
match self.get_index_filename(archive_path).await {
Ok(index_filename) => Ok({
let sqlite_pool = self.sqlite_pool.clone();
let path = path.to_owned();
spawn_blocking(move || {
Ok(archive_index::find_in_file(index_filename, &path, &sqlite_pool)?.is_some())
Ok(archive_index::find_in_file(index_filename, &path)?.is_some())
})
.await?
}),
Expand Down Expand Up @@ -333,10 +324,8 @@ impl AsyncStorage {
}
let index_filename = self.get_index_filename(archive_path).await?;
let info = {
let sqlite_pool = self.sqlite_pool.clone();
let path = path.to_owned();
spawn_blocking(move || archive_index::find_in_file(index_filename, &path, &sqlite_pool))
.await
spawn_blocking(move || archive_index::find_in_file(index_filename, &path)).await
}?
.ok_or(PathNotFoundError)?;

Expand Down
102 changes: 0 additions & 102 deletions src/storage/sqlite_pool.rs

This file was deleted.

0 comments on commit 21968db

Please sign in to comment.