diff --git a/.sqlx/query-96b68919f9016705a1a36ef11a5a659e7fb431beb0017fbcfd21132f105ce722.json b/.sqlx/query-96b68919f9016705a1a36ef11a5a659e7fb431beb0017fbcfd21132f105ce722.json new file mode 100644 index 000000000..984eff3ac --- /dev/null +++ b/.sqlx/query-96b68919f9016705a1a36ef11a5a659e7fb431beb0017fbcfd21132f105ce722.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT relname\n FROM pg_class\n INNER JOIN pg_namespace ON\n pg_class.relnamespace = pg_namespace.oid\n WHERE pg_class.relkind = 'S'\n AND pg_namespace.nspname = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "relname", + "type_info": "Name" + } + ], + "parameters": { + "Left": [ + "Name" + ] + }, + "nullable": [ + false + ] + }, + "hash": "96b68919f9016705a1a36ef11a5a659e7fb431beb0017fbcfd21132f105ce722" +} diff --git a/.sqlx/query-ce93aecd02be2e662c7a384174afc309d10ce84e09a46fcae768d324efa5c822.json b/.sqlx/query-ce93aecd02be2e662c7a384174afc309d10ce84e09a46fcae768d324efa5c822.json deleted file mode 100644 index 26082e60a..000000000 --- a/.sqlx/query-ce93aecd02be2e662c7a384174afc309d10ce84e09a46fcae768d324efa5c822.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "SELECT relname\n FROM pg_class\n INNER JOIN pg_namespace ON\n pg_class.relnamespace = pg_namespace.oid\n WHERE pg_class.relkind = 'S'\n AND pg_namespace.nspname = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "relname", - "type_info": "Name" - } - ], - "parameters": { - "Left": [ - "Name" - ] - }, - "nullable": [ - false - ] - }, - "hash": "ce93aecd02be2e662c7a384174afc309d10ce84e09a46fcae768d324efa5c822" -} diff --git a/Cargo.lock b/Cargo.lock index 52bf8bfe3..9d0ea4dd4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1761,14 +1761,38 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "darling" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" +dependencies = [ + "darling_core 0.20.11", + "darling_macro 0.20.11", +] + [[package]] name = "darling" version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.21.3", + "darling_macro 0.21.3", +] + +[[package]] +name = "darling_core" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.108", ] [[package]] @@ -1785,13 +1809,24 @@ dependencies = [ "syn 2.0.108", ] +[[package]] +name = "darling_macro" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" +dependencies = [ + "darling_core 0.20.11", + "quote", + "syn 2.0.108", +] + [[package]] name = "darling_macro" version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ - "darling_core", + "darling_core 0.21.3", "quote", "syn 2.0.108", ] @@ -1868,6 +1903,37 @@ dependencies = [ "syn 2.0.108", ] +[[package]] +name = "derive_builder" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" +dependencies = [ + "darling 0.20.11", + "proc-macro2", + "quote", + "syn 2.0.108", +] + +[[package]] +name = "derive_builder_macro" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" +dependencies = [ + "derive_builder_core", + "syn 2.0.108", +] + [[package]] name = "derive_more" version = "0.99.20" @@ -1966,6 +2032,7 @@ dependencies = [ "crates-index-diff", "criterion", "dashmap", + "derive_builder", "derive_more 2.0.1", "docsrs-metadata", "flate2", @@ -6465,7 +6532,7 @@ version = "3.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b91a903660542fced4e99881aa481bdbaec1634568ee02e0b8bd57c64cb38955" dependencies = [ - "darling", + "darling 0.21.3", "proc-macro2", "quote", "syn 2.0.108", diff --git a/Cargo.toml b/Cargo.toml index 21db9b415..ece661ea7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,6 +61,7 @@ itertools = { version = "0.14.0" } hex = "0.4.3" derive_more = { version = "2.0.0", features = ["display"] } sysinfo = { version = "0.37.2", default-features = false, features = ["system"] } +derive_builder = "0.20.2" # Async tokio = { version = "1.0", features = ["rt-multi-thread", "signal", "macros"] } diff --git a/src/bin/cratesfyi.rs b/src/bin/cratesfyi.rs index 5e674b912..61c4dc1dd 100644 --- a/src/bin/cratesfyi.rs +++ b/src/bin/cratesfyi.rs @@ -1,31 +1,21 @@ -use std::env; -use std::fmt::Write; -use std::net::SocketAddr; -use std::path::PathBuf; -use std::str::FromStr; -use std::sync::Arc; - -use anyhow::{Context as _, Error, Result, anyhow}; +use anyhow::{Context as _, Result, anyhow}; use clap::{Parser, Subcommand, ValueEnum}; -use docs_rs::cdn::CdnBackend; -use docs_rs::db::{self, CrateId, Overrides, Pool, add_path_into_database}; -use docs_rs::repositories::RepositoryStatsUpdater; -use docs_rs::utils::{ - ConfigName, get_config, get_crate_pattern_and_priority, list_crate_priorities, queue_builder, - remove_crate_priority, set_config, set_crate_priority, -}; use docs_rs::{ - AsyncBuildQueue, AsyncStorage, BuildQueue, Config, Context, Index, InstanceMetrics, - PackageKind, RegistryApi, RustwideBuilder, ServiceMetrics, Storage, + Config, Context, PackageKind, RustwideBuilder, + db::{self, CrateId, Overrides, add_path_into_database}, start_background_metrics_webserver, start_web_server, + utils::{ + ConfigName, get_config, get_crate_pattern_and_priority, list_crate_priorities, + queue_builder, remove_crate_priority, set_config, set_crate_priority, + }, }; use futures_util::StreamExt; -use once_cell::sync::OnceCell; use sentry::{ TransactionContext, integrations::panic as sentry_panic, integrations::tracing as sentry_tracing, }; -use tokio::runtime::{Builder, Runtime}; +use std::{env, fmt::Write, net::SocketAddr, path::PathBuf, str::FromStr, sync::Arc}; +use tokio::runtime; use tracing_log::LogTracer; use tracing_subscriber::{EnvFilter, filter::Directive, prelude::*}; @@ -187,7 +177,9 @@ enum CommandLine { impl CommandLine { fn handle_args(self) -> Result<()> { - let ctx = BinContext::new(); + let config = Config::from_env()?.build()?; + let runtime = Arc::new(runtime::Builder::new_multi_thread().enable_all().build()?); + let ctx = runtime.block_on(Context::from_config(config))?; match self { Self::Build { subcommand } => subcommand.handle_args(ctx)?, @@ -209,13 +201,9 @@ impl CommandLine { start_background_metrics_webserver(Some(metric_server_socket_addr), &ctx)?; - ctx.runtime()?.block_on(async { - docs_rs::utils::watch_registry( - ctx.async_build_queue().await?, - ctx.config()?, - ctx.index()?, - ) - .await + ctx.runtime.block_on(async move { + docs_rs::utils::watch_registry(&ctx.async_build_queue, &ctx.config, ctx.index) + .await })?; } Self::StartBuildServer { @@ -223,10 +211,7 @@ impl CommandLine { } => { start_background_metrics_webserver(Some(metric_server_socket_addr), &ctx)?; - let build_queue = ctx.build_queue()?; - let config = ctx.config()?; - let rustwide_builder = RustwideBuilder::init(&ctx)?; - queue_builder(&ctx, rustwide_builder, build_queue, config)?; + queue_builder(&ctx, RustwideBuilder::init(&ctx)?)?; } Self::StartWebServer { socket_addr } => { // Blocks indefinitely @@ -287,22 +272,21 @@ enum QueueSubcommand { } impl QueueSubcommand { - fn handle_args(self, ctx: BinContext) -> Result<()> { - let build_queue = ctx.build_queue()?; + fn handle_args(self, ctx: Context) -> Result<()> { match self { Self::Add { crate_name, crate_version, build_priority, - } => build_queue.add_crate( + } => ctx.build_queue.add_crate( &crate_name, &crate_version, build_priority, - ctx.config()?.registry_url.as_deref(), + ctx.config.registry_url.as_deref(), )?, Self::GetLastSeenReference => { - if let Some(reference) = build_queue.last_seen_reference()? { + if let Some(reference) = ctx.build_queue.last_seen_reference()? { println!("Last seen reference: {reference}"); } else { println!("No last seen reference available"); @@ -314,13 +298,13 @@ impl QueueSubcommand { (Some(reference), false) => reference, (None, true) => { println!("Fetching changes to set reference to HEAD"); - let (_, oid) = ctx.index()?.diff()?.peek_changes()?; + let (_, oid) = ctx.index.diff()?.peek_changes()?; oid } (_, _) => unreachable!(), }; - build_queue.set_last_seen_reference(reference)?; + ctx.build_queue.set_last_seen_reference(reference)?; println!("Set last seen reference: {reference}"); } @@ -359,9 +343,9 @@ enum PrioritySubcommand { } impl PrioritySubcommand { - fn handle_args(self, ctx: BinContext) -> Result<()> { - ctx.runtime()?.block_on(async move { - let mut conn = ctx.pool()?.get_async().await?; + fn handle_args(self, ctx: Context) -> Result<()> { + ctx.runtime.block_on(async move { + let mut conn = ctx.pool.get_async().await?; match self { Self::List => { for (pattern, priority) in list_crate_priorities(&mut conn).await? { @@ -441,8 +425,7 @@ enum BuildSubcommand { } impl BuildSubcommand { - fn handle_args(self, ctx: BinContext) -> Result<()> { - let build_queue = ctx.build_queue()?; + fn handle_args(self, ctx: Context) -> Result<()> { let rustwide_builder = || -> Result { RustwideBuilder::init(&ctx) }; match self { @@ -458,7 +441,7 @@ impl BuildSubcommand { .build_local_package(&path) .context("Building documentation failed")?; } else { - let registry_url = ctx.config()?.registry_url.clone(); + let registry_url = ctx.config.registry_url.as_ref(); builder .build_package( &crate_name @@ -466,7 +449,6 @@ impl BuildSubcommand { &crate_version .with_context(|| anyhow!("must specify version if not local"))?, registry_url - .as_ref() .map(|s| PackageKind::Registry(s.as_str())) .unwrap_or(PackageKind::CratesIo), true, @@ -476,8 +458,8 @@ impl BuildSubcommand { } Self::UpdateToolchain { only_first_time } => { - let rustc_version = ctx.runtime()?.block_on({ - let pool = ctx.pool()?; + let rustc_version = ctx.runtime.block_on({ + let pool = ctx.pool.clone(); async move { let mut conn = pool .get_async() @@ -512,9 +494,9 @@ impl BuildSubcommand { } Self::SetToolchain { toolchain_name } => { - ctx.runtime()?.block_on(async move { + ctx.runtime.block_on(async move { let mut conn = ctx - .pool()? + .pool .get_async() .await .context("failed to get a database connection")?; @@ -524,8 +506,8 @@ impl BuildSubcommand { })?; } - Self::Lock => build_queue.lock().context("Failed to lock")?, - Self::Unlock => build_queue.unlock().context("Failed to unlock")?, + Self::Lock => ctx.build_queue.lock().context("Failed to lock")?, + Self::Unlock => ctx.build_queue.unlock().context("Failed to unlock")?, } Ok(()) @@ -589,98 +571,82 @@ enum DatabaseSubcommand { } impl DatabaseSubcommand { - fn handle_args(self, ctx: BinContext) -> Result<()> { + fn handle_args(self, ctx: Context) -> Result<()> { match self { - Self::Migrate { version } => { - let pool = ctx.pool()?; - ctx.runtime()? - .block_on(async { - let mut conn = pool.get_async().await?; - db::migrate(&mut conn, version).await - }) - .context("Failed to run database migrations")? - } + Self::Migrate { version } => ctx + .runtime + .block_on(async { + let mut conn = ctx.pool.get_async().await?; + db::migrate(&mut conn, version).await + }) + .context("Failed to run database migrations")?, - Self::UpdateLatestVersionId => { - let pool = ctx.pool()?; - ctx.runtime()? - .block_on(async { - let mut list_conn = pool.get_async().await?; - let mut update_conn = pool.get_async().await?; + Self::UpdateLatestVersionId => ctx + .runtime + .block_on(async { + let mut list_conn = ctx.pool.get_async().await?; + let mut update_conn = ctx.pool.get_async().await?; - let mut result_stream = sqlx::query!( - r#"SELECT id as "id: CrateId", name FROM crates ORDER BY name"# - ) - .fetch(&mut *list_conn); + let mut result_stream = sqlx::query!( + r#"SELECT id as "id: CrateId", name FROM crates ORDER BY name"# + ) + .fetch(&mut *list_conn); - while let Some(row) = result_stream.next().await { - let row = row?; + while let Some(row) = result_stream.next().await { + let row = row?; - println!("handling crate {}", row.name); + println!("handling crate {}", row.name); - db::update_latest_version_id(&mut update_conn, row.id).await?; - } + db::update_latest_version_id(&mut update_conn, row.id).await?; + } - Ok::<(), anyhow::Error>(()) - }) - .context("Failed to update latest version id")? - } + Ok::<(), anyhow::Error>(()) + }) + .context("Failed to update latest version id")?, Self::UpdateRepositoryFields => { - ctx.runtime()? - .block_on(ctx.repository_stats_updater()?.update_all_crates())?; + ctx.runtime + .block_on(ctx.repository_stats_updater.update_all_crates())?; } Self::BackfillRepositoryStats => { - ctx.runtime()? - .block_on(ctx.repository_stats_updater()?.backfill_repositories())?; + ctx.runtime + .block_on(ctx.repository_stats_updater.backfill_repositories())?; } - Self::UpdateCrateRegistryFields { name } => ctx.runtime()?.block_on(async move { - let mut conn = ctx.pool()?.get_async().await?; - let registry_data = ctx.registry_api()?.get_crate_data(&name).await?; + Self::UpdateCrateRegistryFields { name } => ctx.runtime.block_on(async move { + let mut conn = ctx.pool.get_async().await?; + let registry_data = ctx.registry_api.get_crate_data(&name).await?; db::update_crate_data_in_database(&mut conn, &name, ®istry_data).await })?, Self::AddDirectory { directory } => { - ctx.runtime()? - .block_on(async { - let storage = ctx.async_storage().await?; - - add_path_into_database(&storage, &ctx.config()?.prefix, directory).await - }) + ctx.runtime + .block_on(add_path_into_database( + &ctx.async_storage, + &ctx.config.prefix, + directory, + )) .context("Failed to add directory into database")?; } Self::Delete { command: DeleteSubcommand::Version { name, version }, } => ctx - .runtime()? + .runtime .block_on(async move { - let mut conn = ctx.pool()?.get_async().await?; - db::delete_version( - &mut conn, - &*ctx.async_storage().await?, - &*ctx.config()?, - &name, - &version, - ) - .await + let mut conn = ctx.pool.get_async().await?; + db::delete_version(&mut conn, &ctx.async_storage, &ctx.config, &name, &version) + .await }) .context("failed to delete the version")?, Self::Delete { command: DeleteSubcommand::Crate { name }, } => ctx - .runtime()? + .runtime .block_on(async move { - let mut conn = ctx.pool()?.get_async().await?; - db::delete_crate( - &mut conn, - &*ctx.async_storage().await?, - &*ctx.config()?, - &name, - ) - .await + let mut conn = ctx.pool.get_async().await?; + db::delete_crate(&mut conn, &ctx.async_storage, &ctx.config, &name).await }) .context("failed to delete the crate")?, Self::Blacklist { command } => command.handle_args(ctx)?, @@ -688,7 +654,7 @@ impl DatabaseSubcommand { Self::Limits { command } => command.handle_args(ctx)?, Self::Synchronize { dry_run } => { - ctx.runtime()? + ctx.runtime .block_on(docs_rs::utils::consistency::run_check(&ctx, dry_run))?; } } @@ -720,10 +686,9 @@ enum LimitsSubcommand { } impl LimitsSubcommand { - fn handle_args(self, ctx: BinContext) -> Result<()> { - let pool = ctx.pool()?; - ctx.runtime()?.block_on(async move { - let mut conn = pool.get_async().await?; + fn handle_args(self, ctx: Context) -> Result<()> { + ctx.runtime.block_on(async move { + let mut conn = ctx.pool.get_async().await?; match self { Self::Get { crate_name } => { @@ -788,9 +753,9 @@ enum BlacklistSubcommand { } impl BlacklistSubcommand { - fn handle_args(self, ctx: BinContext) -> Result<()> { - ctx.runtime()?.block_on(async { - let conn = &mut *ctx.pool()?.get_async().await?; + fn handle_args(self, ctx: Context) -> Result<()> { + ctx.runtime.block_on(async move { + let conn = &mut ctx.pool.get_async().await?; match self { Self::List => { let crates = db::blacklist::list_crates(conn) @@ -832,142 +797,3 @@ enum DeleteSubcommand { version: String, }, } - -struct BinContext { - build_queue: OnceCell>, - async_build_queue: tokio::sync::OnceCell>, - storage: OnceCell>, - cdn: tokio::sync::OnceCell>, - config: OnceCell>, - pool: OnceCell, - service_metrics: OnceCell>, - instance_metrics: OnceCell>, - index: OnceCell>, - registry_api: OnceCell>, - repository_stats_updater: OnceCell>, - runtime: OnceCell>, -} - -impl BinContext { - fn new() -> Self { - Self { - build_queue: OnceCell::new(), - async_build_queue: tokio::sync::OnceCell::new(), - storage: OnceCell::new(), - cdn: tokio::sync::OnceCell::new(), - config: OnceCell::new(), - pool: OnceCell::new(), - service_metrics: OnceCell::new(), - instance_metrics: OnceCell::new(), - index: OnceCell::new(), - registry_api: OnceCell::new(), - repository_stats_updater: OnceCell::new(), - runtime: OnceCell::new(), - } - } -} - -macro_rules! lazy { - ( $(fn $name:ident($self:ident) -> $type:ty = $init:expr);+ $(;)? ) => { - $(fn $name(&$self) -> Result> { - Ok($self - .$name - .get_or_try_init::<_, Error>(|| Ok(Arc::new($init)))? - .clone()) - })* - } -} - -impl Context for BinContext { - lazy! { - fn build_queue(self) -> BuildQueue = { - let runtime = self.runtime()?; - BuildQueue::new( - runtime.clone(), - runtime.block_on(self.async_build_queue())? - ) - }; - fn storage(self) -> Storage = { - let runtime = self.runtime()?; - Storage::new( - runtime.block_on(self.async_storage())?, - runtime - ) - }; - fn config(self) -> Config = Config::from_env()?; - fn service_metrics(self) -> ServiceMetrics = { - ServiceMetrics::new()? - }; - fn instance_metrics(self) -> InstanceMetrics = InstanceMetrics::new()?; - fn runtime(self) -> Runtime = { - Builder::new_multi_thread() - .enable_all() - .build()? - }; - fn index(self) -> Index = { - let config = self.config()?; - let path = config.registry_index_path.clone(); - if let Some(registry_url) = config.registry_url.clone() { - Index::from_url(path, registry_url) - } else { - Index::new(path) - }? - }; - fn registry_api(self) -> RegistryApi = { - let config = self.config()?; - RegistryApi::new(config.registry_api_host.clone(), config.crates_io_api_call_retries)? - }; - fn repository_stats_updater(self) -> RepositoryStatsUpdater = { - let config = self.config()?; - let pool = self.pool()?; - RepositoryStatsUpdater::new(&config, pool) - }; - } - - async fn async_pool(&self) -> Result { - self.pool() - } - - fn pool(&self) -> Result { - Ok(self - .pool - .get_or_try_init::<_, Error>(|| { - Ok(Pool::new( - &*self.config()?, - self.runtime()?, - self.instance_metrics()?, - )?) - })? - .clone()) - } - - async fn async_storage(&self) -> Result> { - Ok(Arc::new( - AsyncStorage::new(self.pool()?, self.instance_metrics()?, self.config()?).await?, - )) - } - - async fn async_build_queue(&self) -> Result> { - Ok(self - .async_build_queue - .get_or_try_init(|| async { - Ok::<_, Error>(Arc::new(AsyncBuildQueue::new( - self.pool()?, - self.instance_metrics()?, - self.config()?, - self.async_storage().await?, - ))) - }) - .await? - .clone()) - } - - async fn cdn(&self) -> Result> { - let config = self.config()?; - Ok(self - .cdn - .get_or_init(|| async { Arc::new(CdnBackend::new(&config).await) }) - .await - .clone()) - } -} diff --git a/src/build_queue.rs b/src/build_queue.rs index 2c4b9e7dd..ec7855846 100644 --- a/src/build_queue.rs +++ b/src/build_queue.rs @@ -12,7 +12,7 @@ use futures_util::{StreamExt, stream::TryStreamExt}; use sqlx::Connection as _; use std::collections::HashMap; use std::sync::Arc; -use tokio::runtime::Runtime; +use tokio::runtime; use tracing::{debug, error, info, instrument}; /// The static priority for background rebuilds. @@ -415,7 +415,7 @@ impl AsyncBuildQueue { #[derive(Debug)] pub struct BuildQueue { - runtime: Arc, + runtime: runtime::Handle, inner: Arc, } @@ -476,7 +476,7 @@ impl BuildQueue { } impl BuildQueue { - pub fn new(runtime: Arc, inner: Arc) -> Self { + pub fn new(runtime: runtime::Handle, inner: Arc) -> Self { Self { runtime, inner } } @@ -609,9 +609,9 @@ impl BuildQueue { /// Builds the top package from the queue. Returns whether there was a package in the queue. /// /// Note that this will return `Ok(true)` even if the package failed to build. - pub(crate) fn build_next_queue_package( + pub(crate) fn build_next_queue_package( &self, - context: &C, + context: &Context, builder: &mut RustwideBuilder, ) -> Result { let mut processed = false; @@ -730,662 +730,665 @@ pub async fn queue_rebuilds( #[cfg(test)] mod tests { - use crate::test::FakeBuild; + use crate::test::{FakeBuild, TestEnvironment}; use super::*; use chrono::Utc; use std::time::Duration; - #[test] - fn test_rebuild_when_old() { - crate::test::async_wrapper(|env| async move { - env.override_config(|config| { - config.max_queued_rebuilds = Some(100); - }); + #[tokio::test(flavor = "multi_thread")] + async fn test_rebuild_when_old() -> Result<()> { + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .max_queued_rebuilds(Some(100)) + .build()?, + ) + .await?; - env.fake_release() - .await - .name("foo") - .version("0.1.0") - .builds(vec![ - FakeBuild::default() - .rustc_version("rustc 1.84.0-nightly (e7c0d2750 2020-10-15)"), - ]) - .create() - .await?; + env.fake_release() + .await + .name("foo") + .version("0.1.0") + .builds(vec![ + FakeBuild::default().rustc_version("rustc 1.84.0-nightly (e7c0d2750 2020-10-15)"), + ]) + .create() + .await?; - let build_queue = env.async_build_queue().await; - assert!(build_queue.queued_crates().await?.is_empty()); + let build_queue = env.async_build_queue(); + assert!(build_queue.queued_crates().await?.is_empty()); - let mut conn = env.async_db().await.async_conn().await; - queue_rebuilds(&mut conn, &env.config(), &build_queue).await?; + let mut conn = env.async_db().async_conn().await; + queue_rebuilds(&mut conn, env.config(), build_queue).await?; - let queue = build_queue.queued_crates().await?; - assert_eq!(queue.len(), 1); - assert_eq!(queue[0].name, "foo"); - assert_eq!(queue[0].version, "0.1.0"); - assert_eq!(queue[0].priority, REBUILD_PRIORITY); + let queue = build_queue.queued_crates().await?; + assert_eq!(queue.len(), 1); + assert_eq!(queue[0].name, "foo"); + assert_eq!(queue[0].version, "0.1.0"); + assert_eq!(queue[0].priority, REBUILD_PRIORITY); - Ok(()) - }) + Ok(()) } - #[test] - fn test_still_rebuild_when_full_with_failed() { - crate::test::async_wrapper(|env| async move { - env.override_config(|config| { - config.max_queued_rebuilds = Some(1); - }); + #[tokio::test(flavor = "multi_thread")] + async fn test_still_rebuild_when_full_with_failed() -> Result<()> { + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .max_queued_rebuilds(Some(1)) + .build()?, + ) + .await?; - let build_queue = env.async_build_queue().await; - build_queue - .add_crate("foo1", "0.1.0", REBUILD_PRIORITY, None) - .await?; - build_queue - .add_crate("foo2", "0.1.0", REBUILD_PRIORITY, None) - .await?; + let build_queue = env.async_build_queue(); + build_queue + .add_crate("foo1", "0.1.0", REBUILD_PRIORITY, None) + .await?; + build_queue + .add_crate("foo2", "0.1.0", REBUILD_PRIORITY, None) + .await?; - let mut conn = env.async_db().await.async_conn().await; - sqlx::query!("UPDATE queue SET attempt = 99") - .execute(&mut *conn) - .await?; + let mut conn = env.async_db().async_conn().await; + sqlx::query!("UPDATE queue SET attempt = 99") + .execute(&mut *conn) + .await?; - assert_eq!(build_queue.queued_crates().await?.len(), 0); + assert_eq!(build_queue.queued_crates().await?.len(), 0); - env.fake_release() - .await - .name("foo") - .version("0.1.0") - .builds(vec![ - FakeBuild::default() - .rustc_version("rustc 1.84.0-nightly (e7c0d2750 2020-10-15)"), - ]) - .create() - .await?; + env.fake_release() + .await + .name("foo") + .version("0.1.0") + .builds(vec![ + FakeBuild::default().rustc_version("rustc 1.84.0-nightly (e7c0d2750 2020-10-15)"), + ]) + .create() + .await?; - let build_queue = env.async_build_queue().await; - queue_rebuilds(&mut conn, &env.config(), &build_queue).await?; + let build_queue = env.async_build_queue(); + queue_rebuilds(&mut conn, env.config(), build_queue).await?; - assert_eq!(build_queue.queued_crates().await?.len(), 1); + assert_eq!(build_queue.queued_crates().await?.len(), 1); - Ok(()) - }) + Ok(()) } - #[test] - fn test_dont_rebuild_when_full() { - crate::test::async_wrapper(|env| async move { - env.override_config(|config| { - config.max_queued_rebuilds = Some(1); - }); + #[tokio::test(flavor = "multi_thread")] + async fn test_dont_rebuild_when_full() -> Result<()> { + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .max_queued_rebuilds(Some(1)) + .build()?, + ) + .await?; - let build_queue = env.async_build_queue().await; - build_queue - .add_crate("foo1", "0.1.0", REBUILD_PRIORITY, None) - .await?; - build_queue - .add_crate("foo2", "0.1.0", REBUILD_PRIORITY, None) - .await?; + let build_queue = env.async_build_queue(); + build_queue + .add_crate("foo1", "0.1.0", REBUILD_PRIORITY, None) + .await?; + build_queue + .add_crate("foo2", "0.1.0", REBUILD_PRIORITY, None) + .await?; - env.fake_release() - .await - .name("foo") - .version("0.1.0") - .builds(vec![ - FakeBuild::default() - .rustc_version("rustc 1.84.0-nightly (e7c0d2750 2020-10-15)"), - ]) - .create() - .await?; + env.fake_release() + .await + .name("foo") + .version("0.1.0") + .builds(vec![ + FakeBuild::default().rustc_version("rustc 1.84.0-nightly (e7c0d2750 2020-10-15)"), + ]) + .create() + .await?; - let build_queue = env.async_build_queue().await; - assert_eq!(build_queue.queued_crates().await?.len(), 2); + let build_queue = env.async_build_queue(); + assert_eq!(build_queue.queued_crates().await?.len(), 2); - let mut conn = env.async_db().await.async_conn().await; - queue_rebuilds(&mut conn, &env.config(), &build_queue).await?; + let mut conn = env.async_db().async_conn().await; + queue_rebuilds(&mut conn, env.config(), build_queue).await?; - assert_eq!(build_queue.queued_crates().await?.len(), 2); + assert_eq!(build_queue.queued_crates().await?.len(), 2); - Ok(()) - }) + Ok(()) } - #[test] - fn test_add_duplicate_doesnt_fail_last_priority_wins() { - crate::test::async_wrapper(|env| async move { - let queue = env.async_build_queue().await; + #[tokio::test(flavor = "multi_thread")] + async fn test_add_duplicate_doesnt_fail_last_priority_wins() -> Result<()> { + let env = TestEnvironment::new().await?; - queue.add_crate("some_crate", "0.1.1", 0, None).await?; - queue.add_crate("some_crate", "0.1.1", 9, None).await?; + let queue = env.async_build_queue(); - let queued_crates = queue.queued_crates().await?; - assert_eq!(queued_crates.len(), 1); - assert_eq!(queued_crates[0].priority, 9); + queue.add_crate("some_crate", "0.1.1", 0, None).await?; + queue.add_crate("some_crate", "0.1.1", 9, None).await?; - Ok(()) - }) + let queued_crates = queue.queued_crates().await?; + assert_eq!(queued_crates.len(), 1); + assert_eq!(queued_crates[0].priority, 9); + + Ok(()) } - #[test] - fn test_add_duplicate_resets_attempts_and_priority() { - crate::test::async_wrapper(|env| async move { - env.override_config(|config| { - config.build_attempts = 5; - }); + #[tokio::test(flavor = "multi_thread")] + async fn test_add_duplicate_resets_attempts_and_priority() -> Result<()> { + let env = + TestEnvironment::with_config(TestEnvironment::base_config().build_attempts(5).build()?) + .await?; - let queue = env.async_build_queue().await; + let queue = env.async_build_queue(); - let mut conn = env.async_db().await.async_conn().await; - sqlx::query!( - " + let mut conn = env.async_db().async_conn().await; + sqlx::query!( + " INSERT INTO queue (name, version, priority, attempt, last_attempt ) VALUES ('failed_crate', '0.1.1', 0, 99, NOW())", - ) - .execute(&mut *conn) - .await?; + ) + .execute(&mut *conn) + .await?; - assert_eq!(queue.pending_count().await?, 0); + assert_eq!(queue.pending_count().await?, 0); - queue.add_crate("failed_crate", "0.1.1", 9, None).await?; + queue.add_crate("failed_crate", "0.1.1", 9, None).await?; - assert_eq!(queue.pending_count().await?, 1); + assert_eq!(queue.pending_count().await?, 1); - let row = sqlx::query!( - "SELECT priority, attempt, last_attempt + let row = sqlx::query!( + "SELECT priority, attempt, last_attempt FROM queue WHERE name = $1 AND version = $2", - "failed_crate", - "0.1.1", - ) - .fetch_one(&mut *conn) - .await?; + "failed_crate", + "0.1.1", + ) + .fetch_one(&mut *conn) + .await?; - assert_eq!(row.priority, 9); - assert_eq!(row.attempt, 0); - assert!(row.last_attempt.is_none()); - Ok(()) - }) + assert_eq!(row.priority, 9); + assert_eq!(row.attempt, 0); + assert!(row.last_attempt.is_none()); + Ok(()) } - #[test] - fn test_has_build_queued() { - crate::test::async_wrapper(|env| async move { - let queue = env.async_build_queue().await; + #[tokio::test(flavor = "multi_thread")] + async fn test_has_build_queued() -> Result<()> { + let env = TestEnvironment::new().await?; - queue.add_crate("dummy", "0.1.1", 0, None).await?; + let queue = env.async_build_queue(); - let mut conn = env.async_db().await.async_conn().await; - assert!(queue.has_build_queued("dummy", "0.1.1").await.unwrap()); + queue.add_crate("dummy", "0.1.1", 0, None).await?; - sqlx::query!("UPDATE queue SET attempt = 6") - .execute(&mut *conn) - .await - .unwrap(); + let mut conn = env.async_db().async_conn().await; + assert!(queue.has_build_queued("dummy", "0.1.1").await.unwrap()); - assert!(!queue.has_build_queued("dummy", "0.1.1").await.unwrap()); + sqlx::query!("UPDATE queue SET attempt = 6") + .execute(&mut *conn) + .await + .unwrap(); - Ok(()) - }) + assert!(!queue.has_build_queued("dummy", "0.1.1").await.unwrap()); + + Ok(()) } #[test] - fn test_wait_between_build_attempts() { - crate::test::wrapper(|env| { - env.override_config(|config| { - config.build_attempts = 99; - config.delay_between_build_attempts = Duration::from_secs(1); - }); + fn test_wait_between_build_attempts() -> Result<()> { + let env = TestEnvironment::with_config_and_runtime( + TestEnvironment::base_config() + .build_attempts(99) + .delay_between_build_attempts(Duration::from_secs(1)) + .build()?, + )?; - let runtime = env.runtime(); + let runtime = env.runtime(); - let queue = env.build_queue(); + let queue = env.build_queue(); - queue.add_crate("krate", "1.0.0", 0, None)?; + queue.add_crate("krate", "1.0.0", 0, None)?; - // first let it fail - queue.process_next_crate(|krate| { - assert_eq!(krate.name, "krate"); - anyhow::bail!("simulate a failure"); - })?; + // first let it fail + queue.process_next_crate(|krate| { + assert_eq!(krate.name, "krate"); + anyhow::bail!("simulate a failure"); + })?; - queue.process_next_crate(|_| { - // this can't happen since we didn't wait between attempts - unreachable!(); - })?; + queue.process_next_crate(|_| { + // this can't happen since we didn't wait between attempts + unreachable!(); + })?; - runtime.block_on(async { - // fake the build-attempt timestamp so it's older - let mut conn = env.async_db().await.async_conn().await; - sqlx::query!( - "UPDATE queue SET last_attempt = $1", - Utc::now() - chrono::Duration::try_seconds(60).unwrap() - ) - .execute(&mut *conn) - .await - })?; + runtime.block_on(async { + // fake the build-attempt timestamp so it's older + let mut conn = env.async_db().async_conn().await; + sqlx::query!( + "UPDATE queue SET last_attempt = $1", + Utc::now() - chrono::Duration::try_seconds(60).unwrap() + ) + .execute(&mut *conn) + .await + })?; - let mut handled = false; - // now we can process it again - queue.process_next_crate(|krate| { - assert_eq!(krate.name, "krate"); - handled = true; - Ok(BuildPackageSummary::default()) - })?; + let mut handled = false; + // now we can process it again + queue.process_next_crate(|krate| { + assert_eq!(krate.name, "krate"); + handled = true; + Ok(BuildPackageSummary::default()) + })?; - assert!(handled); + assert!(handled); - Ok(()) - }) + Ok(()) } #[test] - fn test_add_and_process_crates() { + fn test_add_and_process_crates() -> Result<()> { const MAX_ATTEMPTS: u16 = 3; + let env = TestEnvironment::with_config_and_runtime( + TestEnvironment::base_config() + .build_attempts(MAX_ATTEMPTS) + .delay_between_build_attempts(Duration::ZERO) + .build()?, + )?; - crate::test::wrapper(|env| { - env.override_config(|config| { - config.build_attempts = MAX_ATTEMPTS; - config.delay_between_build_attempts = Duration::ZERO; - }); - - let queue = env.build_queue(); - - let test_crates = [ - ("low-priority", "1.0.0", 1000), - ("high-priority-foo", "1.0.0", -1000), - ("medium-priority", "1.0.0", -10), - ("high-priority-bar", "1.0.0", -1000), - ("standard-priority", "1.0.0", 0), - ("high-priority-baz", "1.0.0", -1000), - ]; - for krate in &test_crates { - queue.add_crate(krate.0, krate.1, krate.2, None)?; - } - - let assert_next = |name| -> Result<()> { - queue.process_next_crate(|krate| { - assert_eq!(name, krate.name); - Ok(BuildPackageSummary::default()) - })?; - Ok(()) - }; - let assert_next_and_fail = |name| -> Result<()> { - queue.process_next_crate(|krate| { - assert_eq!(name, krate.name); - anyhow::bail!("simulate a failure"); - })?; - Ok(()) - }; - - // The first processed item is the one with the highest priority added first. - assert_next("high-priority-foo")?; - - // Simulate a failure in high-priority-bar. - assert_next_and_fail("high-priority-bar")?; - - // Continue with the next high priority crate. - assert_next("high-priority-baz")?; - - // After all the crates with the max priority are processed, before starting to process - // crates with a lower priority the failed crates with the max priority will be tried - // again. - assert_next("high-priority-bar")?; - - // Continue processing according to the priority. - assert_next("medium-priority")?; - assert_next("standard-priority")?; - - // Simulate the crate failing many times. - for _ in 0..MAX_ATTEMPTS { - assert_next_and_fail("low-priority")?; - } + let queue = env.build_queue(); + + let test_crates = [ + ("low-priority", "1.0.0", 1000), + ("high-priority-foo", "1.0.0", -1000), + ("medium-priority", "1.0.0", -10), + ("high-priority-bar", "1.0.0", -1000), + ("standard-priority", "1.0.0", 0), + ("high-priority-baz", "1.0.0", -1000), + ]; + for krate in &test_crates { + queue.add_crate(krate.0, krate.1, krate.2, None)?; + } - // Since low-priority failed many times it will be removed from the queue. Because of - // that the queue should now be empty. - let mut called = false; - queue.process_next_crate(|_| { - called = true; + let assert_next = |name| -> Result<()> { + queue.process_next_crate(|krate| { + assert_eq!(name, krate.name); Ok(BuildPackageSummary::default()) })?; - assert!(!called, "there were still items in the queue"); - - // Ensure metrics were recorded correctly - let metrics = env.instance_metrics(); - assert_eq!(metrics.total_builds.get(), 9); - assert_eq!(metrics.failed_builds.get(), 1); - assert_eq!(metrics.build_time.get_sample_count(), 9); - - // no invalidations were run since we don't have a distribution id configured - assert!( - env.runtime() - .block_on(async { - cdn::queued_or_active_crate_invalidations( - &mut *env.async_db().await.async_conn().await, - ) - .await - })? - .is_empty() - ); - Ok(()) - }) + }; + let assert_next_and_fail = |name| -> Result<()> { + queue.process_next_crate(|krate| { + assert_eq!(name, krate.name); + anyhow::bail!("simulate a failure"); + })?; + Ok(()) + }; + + // The first processed item is the one with the highest priority added first. + assert_next("high-priority-foo")?; + + // Simulate a failure in high-priority-bar. + assert_next_and_fail("high-priority-bar")?; + + // Continue with the next high priority crate. + assert_next("high-priority-baz")?; + + // After all the crates with the max priority are processed, before starting to process + // crates with a lower priority the failed crates with the max priority will be tried + // again. + assert_next("high-priority-bar")?; + + // Continue processing according to the priority. + assert_next("medium-priority")?; + assert_next("standard-priority")?; + + // Simulate the crate failing many times. + for _ in 0..MAX_ATTEMPTS { + assert_next_and_fail("low-priority")?; + } + + // Since low-priority failed many times it will be removed from the queue. Because of + // that the queue should now be empty. + let mut called = false; + queue.process_next_crate(|_| { + called = true; + Ok(BuildPackageSummary::default()) + })?; + assert!(!called, "there were still items in the queue"); + + // Ensure metrics were recorded correctly + let metrics = env.instance_metrics(); + assert_eq!(metrics.total_builds.get(), 9); + assert_eq!(metrics.failed_builds.get(), 1); + assert_eq!(metrics.build_time.get_sample_count(), 9); + + // no invalidations were run since we don't have a distribution id configured + assert!( + env.runtime() + .block_on(async { + cdn::queued_or_active_crate_invalidations( + &mut *env.async_db().async_conn().await, + ) + .await + })? + .is_empty() + ); + + Ok(()) } #[test] - fn test_invalidate_cdn_after_build_and_error() { - crate::test::wrapper(|env| { - env.override_config(|config| { - config.cloudfront_distribution_id_web = Some("distribution_id_web".into()); - config.cloudfront_distribution_id_static = Some("distribution_id_static".into()); - }); - - let queue = env.build_queue(); + fn test_invalidate_cdn_after_build_and_error() -> Result<()> { + let env = TestEnvironment::with_config_and_runtime( + TestEnvironment::base_config() + .cloudfront_distribution_id_web(Some("distribution_id_web".into())) + .cloudfront_distribution_id_static(Some("distribution_id_static".into())) + .build()?, + )?; - queue.add_crate("will_succeed", "1.0.0", -1, None)?; - queue.add_crate("will_fail", "1.0.0", 0, None)?; + let queue = env.build_queue(); - let fetch_invalidations = || { - env.runtime() - .block_on(async { - let mut conn = env.async_db().await.async_conn().await; - cdn::queued_or_active_crate_invalidations(&mut conn).await - }) - .unwrap() - }; + queue.add_crate("will_succeed", "1.0.0", -1, None)?; + queue.add_crate("will_fail", "1.0.0", 0, None)?; - assert!(fetch_invalidations().is_empty()); + let fetch_invalidations = || { + env.runtime() + .block_on(async { + let mut conn = env.async_db().async_conn().await; + cdn::queued_or_active_crate_invalidations(&mut conn).await + }) + .unwrap() + }; - queue.process_next_crate(|krate| { - assert_eq!("will_succeed", krate.name); - Ok(BuildPackageSummary::default()) - })?; + assert!(fetch_invalidations().is_empty()); - let queued_invalidations = fetch_invalidations(); - assert_eq!(queued_invalidations.len(), 3); - assert!( - queued_invalidations - .iter() - .all(|i| i.krate == "will_succeed") - ); + queue.process_next_crate(|krate| { + assert_eq!("will_succeed", krate.name); + Ok(BuildPackageSummary::default()) + })?; - queue.process_next_crate(|krate| { - assert_eq!("will_fail", krate.name); - anyhow::bail!("simulate a failure"); - })?; + let queued_invalidations = fetch_invalidations(); + assert_eq!(queued_invalidations.len(), 3); + assert!( + queued_invalidations + .iter() + .all(|i| i.krate == "will_succeed") + ); + + queue.process_next_crate(|krate| { + assert_eq!("will_fail", krate.name); + anyhow::bail!("simulate a failure"); + })?; - let queued_invalidations = fetch_invalidations(); - assert_eq!(queued_invalidations.len(), 6); - assert!( - queued_invalidations - .iter() - .skip(3) - .all(|i| i.krate == "will_fail") - ); + let queued_invalidations = fetch_invalidations(); + assert_eq!(queued_invalidations.len(), 6); + assert!( + queued_invalidations + .iter() + .skip(3) + .all(|i| i.krate == "will_fail") + ); - Ok(()) - }) + Ok(()) } #[test] - fn test_pending_count() { - crate::test::wrapper(|env| { - let queue = env.build_queue(); + fn test_pending_count() -> Result<()> { + let env = TestEnvironment::new_with_runtime()?; - assert_eq!(queue.pending_count()?, 0); - queue.add_crate("foo", "1.0.0", 0, None)?; - assert_eq!(queue.pending_count()?, 1); - queue.add_crate("bar", "1.0.0", 0, None)?; - assert_eq!(queue.pending_count()?, 2); + let queue = env.build_queue(); - queue.process_next_crate(|krate| { - assert_eq!("foo", krate.name); - Ok(BuildPackageSummary::default()) - })?; - assert_eq!(queue.pending_count()?, 1); + assert_eq!(queue.pending_count()?, 0); + queue.add_crate("foo", "1.0.0", 0, None)?; + assert_eq!(queue.pending_count()?, 1); + queue.add_crate("bar", "1.0.0", 0, None)?; + assert_eq!(queue.pending_count()?, 2); - Ok(()) - }); + queue.process_next_crate(|krate| { + assert_eq!("foo", krate.name); + Ok(BuildPackageSummary::default()) + })?; + assert_eq!(queue.pending_count()?, 1); + + drop(env); + + Ok(()) } #[test] - fn test_prioritized_count() { - crate::test::wrapper(|env| { - let queue = env.build_queue(); - - assert_eq!(queue.prioritized_count()?, 0); - queue.add_crate("foo", "1.0.0", 0, None)?; - assert_eq!(queue.prioritized_count()?, 1); - queue.add_crate("bar", "1.0.0", -100, None)?; - assert_eq!(queue.prioritized_count()?, 2); - queue.add_crate("baz", "1.0.0", 100, None)?; - assert_eq!(queue.prioritized_count()?, 2); - - queue.process_next_crate(|krate| { - assert_eq!("bar", krate.name); - Ok(BuildPackageSummary::default()) - })?; - assert_eq!(queue.prioritized_count()?, 1); + fn test_prioritized_count() -> Result<()> { + let env = TestEnvironment::new_with_runtime()?; + + let queue = env.build_queue(); + + assert_eq!(queue.prioritized_count()?, 0); + queue.add_crate("foo", "1.0.0", 0, None)?; + assert_eq!(queue.prioritized_count()?, 1); + queue.add_crate("bar", "1.0.0", -100, None)?; + assert_eq!(queue.prioritized_count()?, 2); + queue.add_crate("baz", "1.0.0", 100, None)?; + assert_eq!(queue.prioritized_count()?, 2); + + queue.process_next_crate(|krate| { + assert_eq!("bar", krate.name); + Ok(BuildPackageSummary::default()) + })?; + assert_eq!(queue.prioritized_count()?, 1); - Ok(()) - }); + Ok(()) } #[test] - fn test_count_by_priority() { - crate::test::wrapper(|env| { - let queue = env.build_queue(); + fn test_count_by_priority() -> Result<()> { + let env = TestEnvironment::new_with_runtime()?; - assert!(queue.pending_count_by_priority()?.is_empty()); + let queue = env.build_queue(); - queue.add_crate("one", "1.0.0", 1, None)?; - queue.add_crate("two", "2.0.0", 2, None)?; - queue.add_crate("two_more", "2.0.0", 2, None)?; + assert!(queue.pending_count_by_priority()?.is_empty()); - assert_eq!( - queue.pending_count_by_priority()?, - HashMap::from_iter(vec![(1, 1), (2, 2)]) - ); + queue.add_crate("one", "1.0.0", 1, None)?; + queue.add_crate("two", "2.0.0", 2, None)?; + queue.add_crate("two_more", "2.0.0", 2, None)?; - while queue.pending_count()? > 0 { - queue.process_next_crate(|_| Ok(BuildPackageSummary::default()))?; - } - assert!(queue.pending_count_by_priority()?.is_empty()); + assert_eq!( + queue.pending_count_by_priority()?, + HashMap::from_iter(vec![(1, 1), (2, 2)]) + ); - Ok(()) - }); + while queue.pending_count()? > 0 { + queue.process_next_crate(|_| Ok(BuildPackageSummary::default()))?; + } + assert!(queue.pending_count_by_priority()?.is_empty()); + + Ok(()) } #[test] - fn test_failed_count_for_reattempts() { + fn test_failed_count_for_reattempts() -> Result<()> { + let env = TestEnvironment::with_config_and_runtime( + TestEnvironment::base_config() + .build_attempts(MAX_ATTEMPTS) + .delay_between_build_attempts(Duration::ZERO) + .build()?, + )?; + const MAX_ATTEMPTS: u16 = 3; - crate::test::wrapper(|env| { - env.override_config(|config| { - config.build_attempts = MAX_ATTEMPTS; - config.delay_between_build_attempts = Duration::ZERO; - }); - let queue = env.build_queue(); - assert_eq!(queue.failed_count()?, 0); - queue.add_crate("foo", "1.0.0", -100, None)?; - assert_eq!(queue.failed_count()?, 0); - queue.add_crate("bar", "1.0.0", 0, None)?; - - for _ in 0..MAX_ATTEMPTS { - assert_eq!(queue.failed_count()?, 0); - queue.process_next_crate(|krate| { - assert_eq!("foo", krate.name); - Ok(BuildPackageSummary { - should_reattempt: true, - ..Default::default() - }) - })?; - } - assert_eq!(queue.failed_count()?, 1); + let queue = env.build_queue(); + + assert_eq!(queue.failed_count()?, 0); + queue.add_crate("foo", "1.0.0", -100, None)?; + assert_eq!(queue.failed_count()?, 0); + queue.add_crate("bar", "1.0.0", 0, None)?; + for _ in 0..MAX_ATTEMPTS { + assert_eq!(queue.failed_count()?, 0); queue.process_next_crate(|krate| { - assert_eq!("bar", krate.name); - Ok(BuildPackageSummary::default()) + assert_eq!("foo", krate.name); + Ok(BuildPackageSummary { + should_reattempt: true, + ..Default::default() + }) })?; - assert_eq!(queue.failed_count()?, 1); + } + assert_eq!(queue.failed_count()?, 1); - Ok(()) - }); + queue.process_next_crate(|krate| { + assert_eq!("bar", krate.name); + Ok(BuildPackageSummary::default()) + })?; + assert_eq!(queue.failed_count()?, 1); + + Ok(()) } #[test] - fn test_failed_count_after_error() { + fn test_failed_count_after_error() -> Result<()> { + let env = TestEnvironment::with_config_and_runtime( + TestEnvironment::base_config() + .build_attempts(MAX_ATTEMPTS) + .delay_between_build_attempts(Duration::ZERO) + .build()?, + )?; + const MAX_ATTEMPTS: u16 = 3; - crate::test::wrapper(|env| { - env.override_config(|config| { - config.build_attempts = MAX_ATTEMPTS; - config.delay_between_build_attempts = Duration::ZERO; - }); - let queue = env.build_queue(); - assert_eq!(queue.failed_count()?, 0); - queue.add_crate("foo", "1.0.0", -100, None)?; - assert_eq!(queue.failed_count()?, 0); - queue.add_crate("bar", "1.0.0", 0, None)?; - - for _ in 0..MAX_ATTEMPTS { - assert_eq!(queue.failed_count()?, 0); - queue.process_next_crate(|krate| { - assert_eq!("foo", krate.name); - anyhow::bail!("this failed"); - })?; - } - assert_eq!(queue.failed_count()?, 1); + let queue = env.build_queue(); + + assert_eq!(queue.failed_count()?, 0); + queue.add_crate("foo", "1.0.0", -100, None)?; + assert_eq!(queue.failed_count()?, 0); + queue.add_crate("bar", "1.0.0", 0, None)?; + for _ in 0..MAX_ATTEMPTS { + assert_eq!(queue.failed_count()?, 0); queue.process_next_crate(|krate| { - assert_eq!("bar", krate.name); - Ok(BuildPackageSummary::default()) + assert_eq!("foo", krate.name); + anyhow::bail!("this failed"); })?; - assert_eq!(queue.failed_count()?, 1); + } + assert_eq!(queue.failed_count()?, 1); - Ok(()) - }); + queue.process_next_crate(|krate| { + assert_eq!("bar", krate.name); + Ok(BuildPackageSummary::default()) + })?; + assert_eq!(queue.failed_count()?, 1); + + Ok(()) } #[test] - fn test_queued_crates() { - crate::test::wrapper(|env| { - let queue = env.build_queue(); + fn test_queued_crates() -> Result<()> { + let env = TestEnvironment::new_with_runtime()?; + + let queue = env.build_queue(); + + let test_crates = [ + ("bar", "1.0.0", 0), + ("foo", "1.0.0", -10), + ("baz", "1.0.0", 10), + ]; + for krate in &test_crates { + queue.add_crate(krate.0, krate.1, krate.2, None)?; + } - let test_crates = [ - ("bar", "1.0.0", 0), + assert_eq!( + vec![ ("foo", "1.0.0", -10), + ("bar", "1.0.0", 0), ("baz", "1.0.0", 10), - ]; - for krate in &test_crates { - queue.add_crate(krate.0, krate.1, krate.2, None)?; - } - - assert_eq!( - vec![ - ("foo", "1.0.0", -10), - ("bar", "1.0.0", 0), - ("baz", "1.0.0", 10), - ], - queue - .queued_crates()? - .iter() - .map(|c| (c.name.as_str(), c.version.as_str(), c.priority)) - .collect::>() - ); + ], + queue + .queued_crates()? + .iter() + .map(|c| (c.name.as_str(), c.version.as_str(), c.priority)) + .collect::>() + ); - Ok(()) - }); + Ok(()) } #[test] - fn test_last_seen_reference_in_db() { - crate::test::wrapper(|env| { - let queue = env.build_queue(); - queue.unlock()?; - assert!(!queue.is_locked()?); - // initial db ref is empty - assert_eq!(queue.last_seen_reference()?, None); - assert!(!queue.is_locked()?); - - let oid = crates_index_diff::gix::ObjectId::from_hex( - b"ffffffffffffffffffffffffffffffffffffffff", - )?; - queue.set_last_seen_reference(oid)?; + fn test_last_seen_reference_in_db() -> Result<()> { + let env = TestEnvironment::new_with_runtime()?; + + let queue = env.build_queue(); + queue.unlock()?; + assert!(!queue.is_locked()?); + // initial db ref is empty + assert_eq!(queue.last_seen_reference()?, None); + assert!(!queue.is_locked()?); + + let oid = crates_index_diff::gix::ObjectId::from_hex( + b"ffffffffffffffffffffffffffffffffffffffff", + )?; + queue.set_last_seen_reference(oid)?; - assert_eq!(queue.last_seen_reference()?, Some(oid)); - assert!(!queue.is_locked()?); + assert_eq!(queue.last_seen_reference()?, Some(oid)); + assert!(!queue.is_locked()?); - Ok(()) - }); + Ok(()) } #[test] - fn test_broken_db_reference_breaks() { - crate::test::wrapper(|env| { - env.runtime().block_on(async { - let mut conn = env.async_db().await.async_conn().await; - set_config(&mut conn, ConfigName::LastSeenIndexReference, "invalid") - .await - .unwrap(); - }); + fn test_broken_db_reference_breaks() -> Result<()> { + let env = TestEnvironment::new_with_runtime()?; - let queue = env.build_queue(); - assert!(queue.last_seen_reference().is_err()); - - Ok(()) + env.runtime().block_on(async { + let mut conn = env.async_db().async_conn().await; + set_config(&mut conn, ConfigName::LastSeenIndexReference, "invalid") + .await + .unwrap(); }); + + let queue = env.build_queue(); + assert!(queue.last_seen_reference().is_err()); + + Ok(()) } #[test] - fn test_queue_lock() { - crate::test::wrapper(|env| { - let queue = env.build_queue(); - // unlocked without config - assert!(!queue.is_locked()?); + fn test_queue_lock() -> Result<()> { + let env = TestEnvironment::new_with_runtime()?; - queue.lock()?; - assert!(queue.is_locked()?); + let queue = env.build_queue(); + // unlocked without config + assert!(!queue.is_locked()?); - queue.unlock()?; - assert!(!queue.is_locked()?); + queue.lock()?; + assert!(queue.is_locked()?); - Ok(()) - }); + queue.unlock()?; + assert!(!queue.is_locked()?); + + Ok(()) } #[test] - fn test_add_long_name() { - crate::test::wrapper(|env| { - let queue = env.build_queue(); + fn test_add_long_name() -> Result<()> { + let env = TestEnvironment::new_with_runtime()?; - let name: String = "krate".repeat(100); + let queue = env.build_queue(); - queue.add_crate(&name, "0.0.1", 0, None)?; + let name: String = "krate".repeat(100); - queue.process_next_crate(|krate| { - assert_eq!(name, krate.name); - Ok(BuildPackageSummary::default()) - })?; + queue.add_crate(&name, "0.0.1", 0, None)?; - Ok(()) - }) + queue.process_next_crate(|krate| { + assert_eq!(name, krate.name); + Ok(BuildPackageSummary::default()) + })?; + + Ok(()) } #[test] - fn test_add_long_version() { - crate::test::wrapper(|env| { - let queue = env.build_queue(); + fn test_add_long_version() -> Result<()> { + let env = TestEnvironment::new_with_runtime()?; - let version: String = "version".repeat(100); + let queue = env.build_queue(); - queue.add_crate("krate", &version, 0, None)?; + let version: String = "version".repeat(100); - queue.process_next_crate(|krate| { - assert_eq!(version, krate.version); - Ok(BuildPackageSummary::default()) - })?; + queue.add_crate("krate", &version, 0, None)?; - Ok(()) - }) + queue.process_next_crate(|krate| { + assert_eq!(version, krate.version); + Ok(BuildPackageSummary::default()) + })?; + + Ok(()) } } diff --git a/src/cdn.rs b/src/cdn.rs index 5dae03b92..a745107d9 100644 --- a/src/cdn.rs +++ b/src/cdn.rs @@ -25,7 +25,7 @@ use uuid::Uuid; const MAX_CLOUDFRONT_WILDCARD_INVALIDATIONS: i32 = 13; #[derive(Debug, EnumString)] -pub(crate) enum CdnKind { +pub enum CdnKind { #[strum(ascii_case_insensitive)] Dummy, @@ -51,7 +51,7 @@ pub enum CdnBackend { } impl CdnBackend { - pub async fn new(config: &Arc) -> CdnBackend { + pub async fn new(config: &Config) -> CdnBackend { match config.cdn_backend { CdnKind::CloudFront => { let shared_config = aws_config::load_defaults(BehaviorVersion::latest()).await; @@ -642,12 +642,21 @@ mod tests { use std::time::Duration; use super::*; - use crate::test::async_wrapper; + use crate::test::TestEnvironment; use aws_sdk_cloudfront::{Config, config::Credentials}; use aws_smithy_runtime::client::http::test_util::{ReplayEvent, StaticReplayClient}; use aws_smithy_types::body::SdkBody; + const DISTRIBUTION_ID_WEB: &str = "distribution_id_web"; + const DISTRIBUTION_ID_STATIC: &str = "distribution_id_static"; + + fn config_with_cdn() -> crate::config::ConfigBuilder { + TestEnvironment::base_config() + .cloudfront_distribution_id_web(Some(DISTRIBUTION_ID_WEB.into())) + .cloudfront_distribution_id_static(Some(DISTRIBUTION_ID_STATIC.into())) + } + fn active_invalidations(cdn: &CdnBackend, distribution_id: &str) -> Vec { let CdnBackend::Dummy { invalidation_requests, @@ -692,513 +701,485 @@ mod tests { Ok(()) } - #[test] - fn create_cloudfront() { - async_wrapper(|env| async move { - env.override_config(|config| { - config.cdn_backend = CdnKind::CloudFront; - }); - - assert!(matches!(*env.cdn().await, CdnBackend::CloudFront { .. })); - assert!(matches!( - CdnBackend::new(&env.config()).await, - CdnBackend::CloudFront { .. } - )); - - Ok(()) - }) + #[tokio::test(flavor = "multi_thread")] + async fn create_cloudfront() -> Result<()> { + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .cdn_backend(CdnKind::CloudFront) + .build()?, + ) + .await?; + + assert!(matches!(*env.cdn(), CdnBackend::CloudFront { .. })); + assert!(matches!( + CdnBackend::new(env.config()).await, + CdnBackend::CloudFront { .. } + )); + Ok(()) } - #[test] - fn create_dummy() { - async_wrapper(|env| async move { - assert!(matches!(*env.cdn().await, CdnBackend::Dummy { .. })); - assert!(matches!( - CdnBackend::new(&env.config()).await, - CdnBackend::Dummy { .. } - )); - - Ok(()) - }) + #[tokio::test(flavor = "multi_thread")] + async fn create_dummy() -> Result<()> { + let env = TestEnvironment::new().await?; + + assert!(matches!(*env.cdn(), CdnBackend::Dummy { .. })); + assert!(matches!( + CdnBackend::new(env.config()).await, + CdnBackend::Dummy { .. } + )); + Ok(()) } - #[test] - fn invalidation_counts_are_zero_with_empty_queue() { - crate::test::async_wrapper(|env| async move { - env.override_config(|config| { - config.cloudfront_distribution_id_web = Some("distribution_id_web".into()); - config.cloudfront_distribution_id_static = Some("distribution_id_static".into()); - }); - - let config = env.config(); - let mut conn = env.async_db().await.async_conn().await; - assert!( - queued_or_active_crate_invalidations(&mut conn) - .await? - .is_empty() - ); + #[tokio::test(flavor = "multi_thread")] + async fn invalidation_counts_are_zero_with_empty_queue() -> Result<()> { + let env = TestEnvironment::with_config(config_with_cdn().build()?).await?; - let counts = - queued_or_active_crate_invalidation_count_by_distribution(&mut conn, &config) - .await?; - assert_eq!(counts.len(), 2); - assert_eq!(*counts.get("distribution_id_web").unwrap(), 0); - assert_eq!(*counts.get("distribution_id_static").unwrap(), 0); - Ok(()) - }) + let config = env.config(); + let mut conn = env.async_db().async_conn().await; + assert!( + queued_or_active_crate_invalidations(&mut conn) + .await? + .is_empty() + ); + + let counts = dbg!( + queued_or_active_crate_invalidation_count_by_distribution(&mut conn, config).await? + ); + assert_eq!(counts.len(), 2); + assert_eq!(*counts.get(DISTRIBUTION_ID_WEB).unwrap(), 0); + assert_eq!(*counts.get(DISTRIBUTION_ID_STATIC).unwrap(), 0); + Ok(()) } - #[test] - fn escalate_to_full_invalidation() { - crate::test::async_wrapper(|env| async move { - env.override_config(|config| { - config.cloudfront_distribution_id_web = Some("distribution_id_web".into()); - config.cloudfront_distribution_id_static = Some("distribution_id_static".into()); - config.cdn_max_queued_age = Duration::from_secs(0); - }); - - let cdn = env.cdn().await; - let config = env.config(); - let mut conn = env.async_db().await.async_conn().await; - assert!( - queued_or_active_crate_invalidations(&mut conn) - .await? - .is_empty() - ); + #[tokio::test(flavor = "multi_thread")] + async fn escalate_to_full_invalidation() -> Result<()> { + let env = TestEnvironment::with_config( + config_with_cdn() + .cdn_max_queued_age(Duration::from_secs(0)) + .build()?, + ) + .await?; - queue_crate_invalidation(&mut conn, &env.config(), "krate").await?; - - // invalidation paths are queued. - assert_eq!( - queued_or_active_crate_invalidations(&mut conn) - .await? - .into_iter() - .map(|i| ( - i.cdn_distribution_id, - i.krate, - i.path_pattern, - i.cdn_reference - )) - .collect::>(), - vec![ - ( - "distribution_id_web".into(), - "krate".into(), - "/krate*".into(), - None - ), - ( - "distribution_id_web".into(), - "krate".into(), - "/crate/krate*".into(), - None - ), - ( - "distribution_id_static".into(), - "krate".into(), - "/rustdoc/krate*".into(), - None - ), - ] - ); + let cdn = env.cdn(); + let config = env.config(); + let mut conn = env.async_db().async_conn().await; + assert!( + queued_or_active_crate_invalidations(&mut conn) + .await? + .is_empty() + ); - let counts = - queued_or_active_crate_invalidation_count_by_distribution(&mut conn, &config) - .await?; - assert_eq!(counts.len(), 2); - assert_eq!(*counts.get("distribution_id_web").unwrap(), 2); - assert_eq!(*counts.get("distribution_id_static").unwrap(), 1); - - // queueing the invalidation doesn't create it in the CDN - assert!(active_invalidations(&cdn, "distribution_id_web").is_empty()); - assert!(active_invalidations(&cdn, "distribution_id_static").is_empty()); - - let cdn = env.cdn().await; - let config = env.config(); - - // now handle the queued invalidations - handle_queued_invalidation_requests( - &config, - &cdn, - &env.instance_metrics(), - &mut conn, - "distribution_id_web", - ) - .await?; - handle_queued_invalidation_requests( - &config, - &cdn, - &env.instance_metrics(), - &mut conn, - "distribution_id_static", - ) - .await?; + queue_crate_invalidation(&mut conn, env.config(), "krate").await?; - // which creates them in the CDN - { - let ir_web = active_invalidations(&cdn, "distribution_id_web"); - assert_eq!(ir_web.len(), 1); - assert_eq!(ir_web[0].path_patterns, vec!["/*"]); + // invalidation paths are queued. + assert_eq!( + queued_or_active_crate_invalidations(&mut conn) + .await? + .into_iter() + .map(|i| ( + i.cdn_distribution_id, + i.krate, + i.path_pattern, + i.cdn_reference + )) + .collect::>(), + vec![ + ( + DISTRIBUTION_ID_WEB.into(), + "krate".into(), + "/krate*".into(), + None + ), + ( + DISTRIBUTION_ID_WEB.into(), + "krate".into(), + "/crate/krate*".into(), + None + ), + ( + DISTRIBUTION_ID_STATIC.into(), + "krate".into(), + "/rustdoc/krate*".into(), + None + ), + ] + ); - let ir_static = active_invalidations(&cdn, "distribution_id_static"); - assert_eq!(ir_web.len(), 1); - assert_eq!(ir_static[0].path_patterns, vec!["/*"]); - } + let counts = + queued_or_active_crate_invalidation_count_by_distribution(&mut conn, config).await?; + assert_eq!(counts.len(), 2); + assert_eq!(*counts.get(DISTRIBUTION_ID_WEB).unwrap(), 2); + assert_eq!(*counts.get(DISTRIBUTION_ID_STATIC).unwrap(), 1); + + // queueing the invalidation doesn't create it in the CDN + assert!(active_invalidations(cdn, DISTRIBUTION_ID_WEB).is_empty()); + assert!(active_invalidations(cdn, DISTRIBUTION_ID_STATIC).is_empty()); + + let cdn = env.cdn(); + let config = env.config(); + + // now handle the queued invalidations + handle_queued_invalidation_requests( + config, + cdn, + env.instance_metrics(), + &mut conn, + DISTRIBUTION_ID_WEB, + ) + .await?; + handle_queued_invalidation_requests( + config, + cdn, + env.instance_metrics(), + &mut conn, + DISTRIBUTION_ID_STATIC, + ) + .await?; - // the queued entries got a CDN reference attached - assert!( - queued_or_active_crate_invalidations(&mut conn) - .await? - .iter() - .all(|i| i.cdn_reference.is_some() && i.created_in_cdn.is_some()) - ); + // which creates them in the CDN + { + let ir_web = active_invalidations(cdn, DISTRIBUTION_ID_WEB); + assert_eq!(ir_web.len(), 1); + assert_eq!(ir_web[0].path_patterns, vec!["/*"]); - Ok(()) - }); - } + let ir_static = active_invalidations(cdn, DISTRIBUTION_ID_STATIC); + assert_eq!(ir_web.len(), 1); + assert_eq!(ir_static[0].path_patterns, vec!["/*"]); + } - #[test] - fn invalidate_a_crate() { - crate::test::async_wrapper(|env| async move { - env.override_config(|config| { - config.cloudfront_distribution_id_web = Some("distribution_id_web".into()); - config.cloudfront_distribution_id_static = Some("distribution_id_static".into()); - }); - - let cdn = env.cdn().await; - let config = env.config(); - let mut conn = env.async_db().await.async_conn().await; - assert!( - queued_or_active_crate_invalidations(&mut conn) - .await? - .is_empty() - ); + // the queued entries got a CDN reference attached + assert!( + queued_or_active_crate_invalidations(&mut conn) + .await? + .iter() + .all(|i| i.cdn_reference.is_some() && i.created_in_cdn.is_some()) + ); - queue_crate_invalidation(&mut conn, &env.config(), "krate").await?; - - // invalidation paths are queued. - assert_eq!( - queued_or_active_crate_invalidations(&mut conn) - .await? - .into_iter() - .map(|i| ( - i.cdn_distribution_id, - i.krate, - i.path_pattern, - i.cdn_reference - )) - .collect::>(), - vec![ - ( - "distribution_id_web".into(), - "krate".into(), - "/krate*".into(), - None - ), - ( - "distribution_id_web".into(), - "krate".into(), - "/crate/krate*".into(), - None - ), - ( - "distribution_id_static".into(), - "krate".into(), - "/rustdoc/krate*".into(), - None - ), - ] - ); + Ok(()) + } - let counts = - queued_or_active_crate_invalidation_count_by_distribution(&mut conn, &config) - .await?; - assert_eq!(counts.len(), 2); - assert_eq!(*counts.get("distribution_id_web").unwrap(), 2); - assert_eq!(*counts.get("distribution_id_static").unwrap(), 1); - - // queueing the invalidation doesn't create it in the CDN - assert!(active_invalidations(&cdn, "distribution_id_web").is_empty()); - assert!(active_invalidations(&cdn, "distribution_id_static").is_empty()); - - let cdn = env.cdn().await; - let config = env.config(); - - // now handle the queued invalidations - handle_queued_invalidation_requests( - &config, - &cdn, - &env.instance_metrics(), - &mut conn, - "distribution_id_web", - ) - .await?; - handle_queued_invalidation_requests( - &config, - &cdn, - &env.instance_metrics(), - &mut conn, - "distribution_id_static", - ) - .await?; + #[tokio::test(flavor = "multi_thread")] + async fn invalidate_a_crate() -> Result<()> { + let env = TestEnvironment::with_config(config_with_cdn().build()?).await?; - // which creates them in the CDN - { - let ir_web = active_invalidations(&cdn, "distribution_id_web"); - assert_eq!(ir_web.len(), 1); - assert_eq!(ir_web[0].path_patterns, vec!["/krate*", "/crate/krate*"]); + let cdn = env.cdn(); + let config = env.config(); + let mut conn = env.async_db().async_conn().await; + assert!( + queued_or_active_crate_invalidations(&mut conn) + .await? + .is_empty() + ); - let ir_static = active_invalidations(&cdn, "distribution_id_static"); - assert_eq!(ir_web.len(), 1); - assert_eq!(ir_static[0].path_patterns, vec!["/rustdoc/krate*"]); - } + queue_crate_invalidation(&mut conn, env.config(), "krate").await?; - // the queued entries got a CDN reference attached - assert!( - queued_or_active_crate_invalidations(&mut conn) - .await? - .iter() - .all(|i| i.cdn_reference.is_some() && i.created_in_cdn.is_some()) - ); + // invalidation paths are queued. + assert_eq!( + queued_or_active_crate_invalidations(&mut conn) + .await? + .into_iter() + .map(|i| ( + i.cdn_distribution_id, + i.krate, + i.path_pattern, + i.cdn_reference + )) + .collect::>(), + vec![ + ( + DISTRIBUTION_ID_WEB.into(), + "krate".into(), + "/krate*".into(), + None + ), + ( + DISTRIBUTION_ID_WEB.into(), + "krate".into(), + "/crate/krate*".into(), + None + ), + ( + DISTRIBUTION_ID_STATIC.into(), + "krate".into(), + "/rustdoc/krate*".into(), + None + ), + ] + ); - // clear the active invalidations in the CDN to _fake_ them - // being completed on the CDN side. - cdn.clear_active_invalidations(); - - // now handle again - handle_queued_invalidation_requests( - &config, - &cdn, - &env.instance_metrics(), - &mut conn, - "distribution_id_web", - ) - .await?; - handle_queued_invalidation_requests( - &config, - &cdn, - &env.instance_metrics(), - &mut conn, - "distribution_id_static", - ) - .await?; + let counts = + queued_or_active_crate_invalidation_count_by_distribution(&mut conn, config).await?; + assert_eq!(counts.len(), 2); + assert_eq!(*counts.get(DISTRIBUTION_ID_WEB).unwrap(), 2); + assert_eq!(*counts.get(DISTRIBUTION_ID_STATIC).unwrap(), 1); + + // queueing the invalidation doesn't create it in the CDN + assert!(active_invalidations(cdn, DISTRIBUTION_ID_WEB).is_empty()); + assert!(active_invalidations(cdn, DISTRIBUTION_ID_STATIC).is_empty()); + + let cdn = env.cdn(); + let config = env.config(); + + // now handle the queued invalidations + handle_queued_invalidation_requests( + config, + cdn, + env.instance_metrics(), + &mut conn, + DISTRIBUTION_ID_WEB, + ) + .await?; + handle_queued_invalidation_requests( + config, + cdn, + env.instance_metrics(), + &mut conn, + DISTRIBUTION_ID_STATIC, + ) + .await?; - // which removes them from the queue table - assert!( - queued_or_active_crate_invalidations(&mut conn) - .await? - .is_empty() - ); + // which creates them in the CDN + { + let ir_web = active_invalidations(cdn, DISTRIBUTION_ID_WEB); + assert_eq!(ir_web.len(), 1); + assert_eq!(ir_web[0].path_patterns, vec!["/krate*", "/crate/krate*"]); - Ok(()) - }); - } + let ir_static = active_invalidations(cdn, DISTRIBUTION_ID_STATIC); + assert_eq!(ir_web.len(), 1); + assert_eq!(ir_static[0].path_patterns, vec!["/rustdoc/krate*"]); + } - #[test] - fn only_add_some_invalidations_when_too_many_are_active() { - crate::test::async_wrapper(|env| async move { - env.override_config(|config| { - config.cloudfront_distribution_id_web = Some("distribution_id_web".into()); - }); - - let cdn = env.cdn().await; - - // create an invalidation with 15 paths, so we're over the limit - let already_running_invalidation = cdn - .create_invalidation( - "distribution_id_web", - &(0..(MAX_CLOUDFRONT_WILDCARD_INVALIDATIONS - 1)) - .map(|_| "/something*") - .collect::>(), - ) - .await?; + // the queued entries got a CDN reference attached + assert!( + queued_or_active_crate_invalidations(&mut conn) + .await? + .iter() + .all(|i| i.cdn_reference.is_some() && i.created_in_cdn.is_some()) + ); - let mut conn = env.async_db().await.async_conn().await; - assert!( - queued_or_active_crate_invalidations(&mut conn) - .await? - .is_empty() - ); + // clear the active invalidations in the CDN to _fake_ them + // being completed on the CDN side. + cdn.clear_active_invalidations(); + + // now handle again + handle_queued_invalidation_requests( + config, + cdn, + env.instance_metrics(), + &mut conn, + DISTRIBUTION_ID_WEB, + ) + .await?; + handle_queued_invalidation_requests( + config, + cdn, + env.instance_metrics(), + &mut conn, + DISTRIBUTION_ID_STATIC, + ) + .await?; - // insert some completed invalidations into the queue & the CDN, these will be ignored - for i in 0..10 { - insert_running_invalidation( - &mut conn, - "distribution_id_web", - &format!("some_id_{i}"), - ) - .await?; - cdn.insert_completed_invalidation( - "distribution_id_web", - &format!("some_id_{i}"), - &["/*"], - ); - } + // which removes them from the queue table + assert!( + queued_or_active_crate_invalidations(&mut conn) + .await? + .is_empty() + ); - // insert the CDN representation of the already running invalidation - insert_running_invalidation( - &mut conn, - "distribution_id_web", - &already_running_invalidation.invalidation_id, - ) - .await?; + Ok(()) + } - // queue an invalidation - queue_crate_invalidation(&mut conn, &env.config(), "krate").await?; + #[tokio::test(flavor = "multi_thread")] + async fn only_add_some_invalidations_when_too_many_are_active() -> Result<()> { + let env = TestEnvironment::with_config(config_with_cdn().build()?).await?; - // handle the queued invalidations - handle_queued_invalidation_requests( - &env.config(), - &*env.cdn().await, - &env.instance_metrics(), - &mut conn, - "distribution_id_web", + let cdn = env.cdn(); + + // create an invalidation with 15 paths, so we're over the limit + let already_running_invalidation = cdn + .create_invalidation( + DISTRIBUTION_ID_WEB, + &(0..(MAX_CLOUDFRONT_WILDCARD_INVALIDATIONS - 1)) + .map(|_| "/something*") + .collect::>(), ) .await?; - // only one path was added to the CDN - let q = queued_or_active_crate_invalidations(&mut conn).await?; - assert_eq!( - q.iter() - .filter_map(|i| i.cdn_reference.as_ref()) - .filter(|&reference| reference != &already_running_invalidation.invalidation_id) - .count(), - 1 + let mut conn = env.async_db().async_conn().await; + assert!( + queued_or_active_crate_invalidations(&mut conn) + .await? + .is_empty() + ); + + // insert some completed invalidations into the queue & the CDN, these will be ignored + for i in 0..10 { + insert_running_invalidation(&mut conn, DISTRIBUTION_ID_WEB, &format!("some_id_{i}")) + .await?; + cdn.insert_completed_invalidation( + DISTRIBUTION_ID_WEB, + &format!("some_id_{i}"), + &["/*"], ); + } - // old invalidation is still active, new one is added - let ir_web = active_invalidations(&cdn, "distribution_id_web"); - assert_eq!(ir_web.len(), 2); - assert_eq!(ir_web[0].path_patterns.len(), 12); - assert_eq!(ir_web[1].path_patterns.len(), 1); + // insert the CDN representation of the already running invalidation + insert_running_invalidation( + &mut conn, + DISTRIBUTION_ID_WEB, + &already_running_invalidation.invalidation_id, + ) + .await?; - Ok(()) - }); - } + // queue an invalidation + queue_crate_invalidation(&mut conn, env.config(), "krate").await?; - #[test] - fn dont_create_invalidations_when_too_many_are_active() { - crate::test::async_wrapper(|env| async move { - env.override_config(|config| { - config.cloudfront_distribution_id_web = Some("distribution_id_web".into()); - }); + // handle the queued invalidations + handle_queued_invalidation_requests( + env.config(), + env.cdn(), + env.instance_metrics(), + &mut conn, + DISTRIBUTION_ID_WEB, + ) + .await?; - let cdn = env.cdn().await; + // only one path was added to the CDN + let q = queued_or_active_crate_invalidations(&mut conn).await?; + assert_eq!( + q.iter() + .filter_map(|i| i.cdn_reference.as_ref()) + .filter(|&reference| reference != &already_running_invalidation.invalidation_id) + .count(), + 1 + ); - // create an invalidation with 15 paths, so we're over the limit - let already_running_invalidation = cdn - .create_invalidation( - "distribution_id_web", - &(0..15).map(|_| "/something*").collect::>(), - ) - .await?; + // old invalidation is still active, new one is added + let ir_web = active_invalidations(cdn, DISTRIBUTION_ID_WEB); + assert_eq!(ir_web.len(), 2); + assert_eq!(ir_web[0].path_patterns.len(), 12); + assert_eq!(ir_web[1].path_patterns.len(), 1); - let mut conn = env.async_db().await.async_conn().await; - assert!( - queued_or_active_crate_invalidations(&mut conn) - .await? - .is_empty() - ); - insert_running_invalidation( - &mut conn, - "distribution_id_web", - &already_running_invalidation.invalidation_id, - ) - .await?; + Ok(()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn dont_create_invalidations_when_too_many_are_active() -> Result<()> { + let env = TestEnvironment::with_config(config_with_cdn().build()?).await?; - // queue an invalidation - queue_crate_invalidation(&mut conn, &env.config(), "krate").await?; + let cdn = env.cdn(); - // handle the queued invalidations - handle_queued_invalidation_requests( - &env.config(), - &*env.cdn().await, - &env.instance_metrics(), - &mut conn, - "distribution_id_web", + // create an invalidation with 15 paths, so we're over the limit + let already_running_invalidation = cdn + .create_invalidation( + DISTRIBUTION_ID_WEB, + &(0..15).map(|_| "/something*").collect::>(), ) .await?; - // nothing was added to the CDN - assert!( - queued_or_active_crate_invalidations(&mut conn) - .await? - .iter() - .filter(|i| !matches!( + let mut conn = env.async_db().async_conn().await; + assert!( + queued_or_active_crate_invalidations(&mut conn) + .await? + .is_empty() + ); + insert_running_invalidation( + &mut conn, + DISTRIBUTION_ID_WEB, + &already_running_invalidation.invalidation_id, + ) + .await?; + + // queue an invalidation + queue_crate_invalidation(&mut conn, env.config(), "krate").await?; + + // handle the queued invalidations + handle_queued_invalidation_requests( + env.config(), + env.cdn(), + env.instance_metrics(), + &mut conn, + DISTRIBUTION_ID_WEB, + ) + .await?; + + // nothing was added to the CDN + assert!( + queued_or_active_crate_invalidations(&mut conn) + .await? + .iter() + .filter(|i| i.cdn_distribution_id == DISTRIBUTION_ID_WEB + && !matches!( &i.cdn_reference, Some(val) if val == &already_running_invalidation.invalidation_id )) - .all(|i| i.cdn_reference.is_none()) - ); + .all(|i| i.cdn_reference.is_none()) + ); - // old invalidations are still active - let ir_web = active_invalidations(&cdn, "distribution_id_web"); - assert_eq!(ir_web.len(), 1); - assert_eq!(ir_web[0].path_patterns.len(), 15); - - // clear the active invalidations in the CDN to _fake_ them - // being completed on the CDN side. - cdn.clear_active_invalidations(); - - // now handle again - handle_queued_invalidation_requests( - &env.config(), - &*env.cdn().await, - &env.instance_metrics(), - &mut conn, - "distribution_id_web", - ) - .await?; + // old invalidations are still active + let ir_web = active_invalidations(cdn, DISTRIBUTION_ID_WEB); + assert_eq!(ir_web.len(), 1); + assert_eq!(ir_web[0].path_patterns.len(), 15); + + // clear the active invalidations in the CDN to _fake_ them + // being completed on the CDN side. + cdn.clear_active_invalidations(); + + // now handle again + handle_queued_invalidation_requests( + env.config(), + env.cdn(), + env.instance_metrics(), + &mut conn, + DISTRIBUTION_ID_WEB, + ) + .await?; - // which adds the CDN reference - assert!( - queued_or_active_crate_invalidations(&mut conn) - .await? - .iter() - .all(|i| i.cdn_reference.is_some()) - ); + // which adds the CDN reference + assert!( + queued_or_active_crate_invalidations(&mut conn) + .await? + .iter() + .filter(|i| i.cdn_distribution_id == DISTRIBUTION_ID_WEB) + .all(|i| i.cdn_reference.is_some()) + ); - // and creates them in the CDN too - let ir_web = active_invalidations(&cdn, "distribution_id_web"); - assert_eq!(ir_web.len(), 1); - assert_eq!(ir_web[0].path_patterns, vec!["/krate*", "/crate/krate*"]); + // and creates them in the CDN too + let ir_web = active_invalidations(cdn, DISTRIBUTION_ID_WEB); + assert_eq!(ir_web.len(), 1); + assert_eq!(ir_web[0].path_patterns, vec!["/krate*", "/crate/krate*"]); - Ok(()) - }); + Ok(()) } - #[test] - fn dont_create_invalidations_without_paths() { - crate::test::async_wrapper(|env| async move { - env.override_config(|config| { - config.cloudfront_distribution_id_web = Some("distribution_id_web".into()); - }); - - let cdn = env.cdn().await; - - let mut conn = env.async_db().await.async_conn().await; - // no invalidation is queued - assert!( - queued_or_active_crate_invalidations(&mut conn) - .await? - .is_empty() - ); + #[tokio::test(flavor = "multi_thread")] + async fn dont_create_invalidations_without_paths() -> Result<()> { + let env = TestEnvironment::with_config(config_with_cdn().build()?).await?; - // run the handler - handle_queued_invalidation_requests( - &env.config(), - &*env.cdn().await, - &env.instance_metrics(), - &mut conn, - "distribution_id_web", - ) - .await?; + let cdn = env.cdn(); - // no invalidation was created - assert!(active_invalidations(&cdn, "distribution_id_web").is_empty()); + let mut conn = env.async_db().async_conn().await; + // no invalidation is queued + assert!( + queued_or_active_crate_invalidations(&mut conn) + .await? + .is_empty() + ); - Ok(()) - }); + // run the handler + handle_queued_invalidation_requests( + env.config(), + env.cdn(), + env.instance_metrics(), + &mut conn, + DISTRIBUTION_ID_WEB, + ) + .await?; + + // no invalidation was created + assert!(active_invalidations(cdn, DISTRIBUTION_ID_WEB).is_empty()); + + Ok(()) } async fn get_mock_config(http_client: StaticReplayClient) -> aws_sdk_cloudfront::Config { @@ -1218,7 +1199,7 @@ mod tests { Config::new(&cfg) } - #[tokio::test] + #[tokio::test(flavor = "multi_thread")] async fn invalidate_path() { let conn = StaticReplayClient::new(vec![ReplayEvent::new( http02::Request::builder() @@ -1268,7 +1249,7 @@ mod tests { conn.assert_requests_match(&[]); } - #[tokio::test] + #[tokio::test(flavor = "multi_thread")] async fn get_invalidation_info_doesnt_exist() { let conn = StaticReplayClient::new(vec![ReplayEvent::new( http02::Request::builder() @@ -1297,7 +1278,7 @@ mod tests { ); } - #[tokio::test] + #[tokio::test(flavor = "multi_thread")] async fn get_invalidation_info_completed() { let conn = StaticReplayClient::new(vec![ReplayEvent::new( http02::Request::builder() diff --git a/src/config.rs b/src/config.rs index 923823d52..b99eb77db 100644 --- a/src/config.rs +++ b/src/config.rs @@ -4,7 +4,8 @@ use std::{env::VarError, error::Error, path::PathBuf, str::FromStr, time::Durati use tracing::trace; use url::Url; -#[derive(Debug)] +#[derive(Debug, derive_builder::Builder)] +#[builder(pattern = "owned")] pub struct Config { pub prefix: PathBuf, pub registry_index_path: PathBuf, @@ -29,11 +30,17 @@ pub struct Config { pub(crate) s3_bucket: String, pub(crate) s3_region: String, pub(crate) s3_endpoint: Option, + + // DO NOT CONFIGURE THIS THROUGH AN ENVIRONMENT VARIABLE! + // Accidentally turning this on outside of the test suite might cause data loss in the + // production environment. #[cfg(test)] + #[builder(default)] pub(crate) s3_bucket_is_temporary: bool, // CloudFront domain which we can access // public S3 files through + #[cfg_attr(test, builder(setter(into)))] pub(crate) s3_static_root_path: String, // Github authentication @@ -126,7 +133,7 @@ pub struct Config { } impl Config { - pub fn from_env() -> Result { + pub fn from_env() -> Result { let old_vars = [ ("CRATESFYI_PREFIX", "DOCSRS_PREFIX"), ("CRATESFYI_DATABASE_URL", "DOCSRS_DATABASE_URL"), @@ -149,105 +156,82 @@ impl Config { let prefix: PathBuf = require_env("DOCSRS_PREFIX")?; let temp_dir = prefix.join("tmp"); - Ok(Self { - build_attempts: env("DOCSRS_BUILD_ATTEMPTS", 5)?, - delay_between_build_attempts: Duration::from_secs(env::( + Ok(ConfigBuilder::default() + .build_attempts(env("DOCSRS_BUILD_ATTEMPTS", 5u16)?) + .delay_between_build_attempts(Duration::from_secs(env::( "DOCSRS_DELAY_BETWEEN_BUILD_ATTEMPTS", 60, - )?), - delay_between_registry_fetches: Duration::from_secs(env::( + )?)) + .delay_between_registry_fetches(Duration::from_secs(env::( "DOCSRS_DELAY_BETWEEN_REGISTRY_FETCHES", 60, - )?), - - crates_io_api_call_retries: env("DOCSRS_CRATESIO_API_CALL_RETRIES", 3)?, - - registry_index_path: env("REGISTRY_INDEX_PATH", prefix.join("crates.io-index"))?, - registry_url: maybe_env("REGISTRY_URL")?, - registry_api_host: env( + )?)) + .crates_io_api_call_retries(env("DOCSRS_CRATESIO_API_CALL_RETRIES", 3u32)?) + .registry_index_path(env("REGISTRY_INDEX_PATH", prefix.join("crates.io-index"))?) + .registry_url(maybe_env("REGISTRY_URL")?) + .registry_api_host(env( "DOCSRS_REGISTRY_API_HOST", "https://crates.io".parse().unwrap(), - )?, - prefix: prefix.clone(), - - database_url: require_env("DOCSRS_DATABASE_URL")?, - max_pool_size: env("DOCSRS_MAX_POOL_SIZE", 90)?, - min_pool_idle: env("DOCSRS_MIN_POOL_IDLE", 10)?, - - storage_backend: env("DOCSRS_STORAGE_BACKEND", StorageKind::Database)?, - - aws_sdk_max_retries: env("DOCSRS_AWS_SDK_MAX_RETRIES", 6)?, - - s3_bucket: env("DOCSRS_S3_BUCKET", "rust-docs-rs".to_string())?, - s3_region: env("S3_REGION", "us-west-1".to_string())?, - s3_endpoint: maybe_env("S3_ENDPOINT")?, - // DO NOT CONFIGURE THIS THROUGH AN ENVIRONMENT VARIABLE! - // Accidentally turning this on outside of the test suite might cause data loss in the - // production environment. - #[cfg(test)] - s3_bucket_is_temporary: false, - - s3_static_root_path: env( + )?) + .prefix(prefix.clone()) + .database_url(require_env("DOCSRS_DATABASE_URL")?) + .max_pool_size(env("DOCSRS_MAX_POOL_SIZE", 90u32)?) + .min_pool_idle(env("DOCSRS_MIN_POOL_IDLE", 10u32)?) + .storage_backend(env("DOCSRS_STORAGE_BACKEND", StorageKind::Database)?) + .aws_sdk_max_retries(env("DOCSRS_AWS_SDK_MAX_RETRIES", 6u32)?) + .s3_bucket(env("DOCSRS_S3_BUCKET", "rust-docs-rs".to_string())?) + .s3_region(env("S3_REGION", "us-west-1".to_string())?) + .s3_endpoint(maybe_env("S3_ENDPOINT")?) + .s3_static_root_path(env( "DOCSRS_S3_STATIC_ROOT_PATH", "https://static.docs.rs".to_string(), - )?, - - github_accesstoken: maybe_env("DOCSRS_GITHUB_ACCESSTOKEN")?, - github_updater_min_rate_limit: env("DOCSRS_GITHUB_UPDATER_MIN_RATE_LIMIT", 2500)?, - - gitlab_accesstoken: maybe_env("DOCSRS_GITLAB_ACCESSTOKEN")?, - - cratesio_token: maybe_env("DOCSRS_CRATESIO_TOKEN")?, - - max_file_size: env("DOCSRS_MAX_FILE_SIZE", 50 * 1024 * 1024)?, - max_file_size_html: env("DOCSRS_MAX_FILE_SIZE_HTML", 50 * 1024 * 1024)?, + )?) + .github_accesstoken(maybe_env("DOCSRS_GITHUB_ACCESSTOKEN")?) + .github_updater_min_rate_limit(env("DOCSRS_GITHUB_UPDATER_MIN_RATE_LIMIT", 2500u32)?) + .gitlab_accesstoken(maybe_env("DOCSRS_GITLAB_ACCESSTOKEN")?) + .cratesio_token(maybe_env("DOCSRS_CRATESIO_TOKEN")?) + .max_file_size(env("DOCSRS_MAX_FILE_SIZE", 50 * 1024 * 1024)?) + .max_file_size_html(env("DOCSRS_MAX_FILE_SIZE_HTML", 50 * 1024 * 1024)?) // LOL HTML only uses as much memory as the size of the start tag! // https://github.com/rust-lang/docs.rs/pull/930#issuecomment-667729380 - max_parse_memory: env("DOCSRS_MAX_PARSE_MEMORY", 5 * 1024 * 1024)?, - registry_gc_interval: env("DOCSRS_REGISTRY_GC_INTERVAL", 60 * 60)?, - render_threads: env("DOCSRS_RENDER_THREADS", num_cpus::get())?, - request_timeout: maybe_env::("DOCSRS_REQUEST_TIMEOUT")?.map(Duration::from_secs), - report_request_timeouts: env("DOCSRS_REPORT_REQUEST_TIMEOUTS", false)?, - - random_crate_search_view_size: env("DOCSRS_RANDOM_CRATE_SEARCH_VIEW_SIZE", 500)?, - - csp_report_only: env("DOCSRS_CSP_REPORT_ONLY", false)?, - - cache_control_stale_while_revalidate: maybe_env( + .max_parse_memory(env("DOCSRS_MAX_PARSE_MEMORY", 5 * 1024 * 1024)?) + .registry_gc_interval(env("DOCSRS_REGISTRY_GC_INTERVAL", 60 * 60)?) + .render_threads(env("DOCSRS_RENDER_THREADS", num_cpus::get())?) + .request_timeout(maybe_env::("DOCSRS_REQUEST_TIMEOUT")?.map(Duration::from_secs)) + .report_request_timeouts(env("DOCSRS_REPORT_REQUEST_TIMEOUTS", false)?) + .random_crate_search_view_size(env("DOCSRS_RANDOM_CRATE_SEARCH_VIEW_SIZE", 500)?) + .csp_report_only(env("DOCSRS_CSP_REPORT_ONLY", false)?) + .cache_control_stale_while_revalidate(maybe_env( "CACHE_CONTROL_STALE_WHILE_REVALIDATE", - )?, - - cache_invalidatable_responses: env("DOCSRS_CACHE_INVALIDATEABLE_RESPONSES", true)?, - - cdn_backend: env("DOCSRS_CDN_BACKEND", CdnKind::Dummy)?, - cdn_max_queued_age: Duration::from_secs(env("DOCSRS_CDN_MAX_QUEUED_AGE", 3600)?), - - cloudfront_distribution_id_web: maybe_env("CLOUDFRONT_DISTRIBUTION_ID_WEB")?, - cloudfront_distribution_id_static: maybe_env("CLOUDFRONT_DISTRIBUTION_ID_STATIC")?, - - local_archive_cache_path: env( + )?) + .cache_invalidatable_responses(env("DOCSRS_CACHE_INVALIDATEABLE_RESPONSES", true)?) + .cdn_backend(env("DOCSRS_CDN_BACKEND", CdnKind::Dummy)?) + .cdn_max_queued_age(Duration::from_secs(env("DOCSRS_CDN_MAX_QUEUED_AGE", 3600)?)) + .cloudfront_distribution_id_web(maybe_env("CLOUDFRONT_DISTRIBUTION_ID_WEB")?) + .cloudfront_distribution_id_static(maybe_env("CLOUDFRONT_DISTRIBUTION_ID_STATIC")?) + .local_archive_cache_path(env( "DOCSRS_ARCHIVE_INDEX_CACHE_PATH", prefix.join("archive_cache"), - )?, - - compiler_metrics_collection_path: maybe_env("DOCSRS_COMPILER_METRICS_PATH")?, - - temp_dir, - - rustwide_workspace: env("DOCSRS_RUSTWIDE_WORKSPACE", PathBuf::from(".workspace"))?, - inside_docker: env("DOCSRS_DOCKER", false)?, - docker_image: maybe_env("DOCSRS_LOCAL_DOCKER_IMAGE")? - .or(maybe_env("DOCSRS_DOCKER_IMAGE")?), - build_cpu_limit: maybe_env("DOCSRS_BUILD_CPU_LIMIT")?, - build_default_memory_limit: maybe_env("DOCSRS_BUILD_DEFAULT_MEMORY_LIMIT")?, - include_default_targets: env("DOCSRS_INCLUDE_DEFAULT_TARGETS", true)?, - disable_memory_limit: env("DOCSRS_DISABLE_MEMORY_LIMIT", false)?, - build_workspace_reinitialization_interval: Duration::from_secs(env( + )?) + .compiler_metrics_collection_path(maybe_env("DOCSRS_COMPILER_METRICS_PATH")?) + .temp_dir(temp_dir) + .rustwide_workspace(env( + "DOCSRS_RUSTWIDE_WORKSPACE", + PathBuf::from(".workspace"), + )?) + .inside_docker(env("DOCSRS_DOCKER", false)?) + .docker_image( + maybe_env("DOCSRS_LOCAL_DOCKER_IMAGE")?.or(maybe_env("DOCSRS_DOCKER_IMAGE")?), + ) + .build_cpu_limit(maybe_env("DOCSRS_BUILD_CPU_LIMIT")?) + .build_default_memory_limit(maybe_env("DOCSRS_BUILD_DEFAULT_MEMORY_LIMIT")?) + .include_default_targets(env("DOCSRS_INCLUDE_DEFAULT_TARGETS", true)?) + .disable_memory_limit(env("DOCSRS_DISABLE_MEMORY_LIMIT", false)?) + .build_workspace_reinitialization_interval(Duration::from_secs(env( "DOCSRS_BUILD_WORKSPACE_REINITIALIZATION_INTERVAL", 86400, - )?), - max_queued_rebuilds: maybe_env("DOCSRS_MAX_QUEUED_REBUILDS")?, - }) + )?)) + .max_queued_rebuilds(maybe_env("DOCSRS_MAX_QUEUED_REBUILDS")?)) } } diff --git a/src/context.rs b/src/context.rs index 9629790ed..008d1a3b7 100644 --- a/src/context.rs +++ b/src/context.rs @@ -1,27 +1,103 @@ use crate::cdn::CdnBackend; use crate::db::Pool; -use crate::error::Result; use crate::repositories::RepositoryStatsUpdater; use crate::{ AsyncBuildQueue, AsyncStorage, BuildQueue, Config, Index, InstanceMetrics, RegistryApi, ServiceMetrics, Storage, }; -use std::{future::Future, sync::Arc}; -use tokio::runtime::Runtime; - -pub trait Context { - fn config(&self) -> Result>; - fn async_build_queue(&self) -> impl Future>> + Send; - fn build_queue(&self) -> Result>; - fn storage(&self) -> Result>; - fn async_storage(&self) -> impl Future>> + Send; - fn cdn(&self) -> impl Future>> + Send; - fn pool(&self) -> Result; - fn async_pool(&self) -> impl Future> + Send; - fn service_metrics(&self) -> Result>; - fn instance_metrics(&self) -> Result>; - fn index(&self) -> Result>; - fn registry_api(&self) -> Result>; - fn repository_stats_updater(&self) -> Result>; - fn runtime(&self) -> Result>; +use anyhow::Result; +use std::sync::Arc; +use tokio::runtime; + +pub struct Context { + pub config: Arc, + pub async_build_queue: Arc, + pub build_queue: Arc, + pub storage: Arc, + pub async_storage: Arc, + pub cdn: Arc, + pub pool: Pool, + pub service_metrics: Arc, + pub instance_metrics: Arc, + pub index: Arc, + pub registry_api: Arc, + pub repository_stats_updater: Arc, + pub runtime: runtime::Handle, +} + +impl Context { + /// Create a new context environment from the given configuration. + #[cfg(not(test))] + pub async fn from_config(config: Config) -> Result { + let instance_metrics = Arc::new(InstanceMetrics::new()?); + let pool = Pool::new(&config, instance_metrics.clone()).await?; + Self::from_config_with_metrics_and_pool(config, instance_metrics, pool).await + } + + /// Create a new context environment from the given configuration, for running tests. + #[cfg(test)] + pub async fn from_config( + config: Config, + instance_metrics: Arc, + pool: Pool, + ) -> Result { + Self::from_config_with_metrics_and_pool(config, instance_metrics, pool).await + } + + /// private function for context environment generation, allows passing in a + /// preconfigured instance metrics & pool from the database. + /// Mostly so we can support test environments with their db + async fn from_config_with_metrics_and_pool( + config: Config, + instance_metrics: Arc, + pool: Pool, + ) -> Result { + let config = Arc::new(config); + + let async_storage = Arc::new( + AsyncStorage::new(pool.clone(), instance_metrics.clone(), config.clone()).await?, + ); + + let async_build_queue = Arc::new(AsyncBuildQueue::new( + pool.clone(), + instance_metrics.clone(), + config.clone(), + async_storage.clone(), + )); + + let cdn = Arc::new(CdnBackend::new(&config).await); + + let index = Arc::new({ + let path = config.registry_index_path.clone(); + if let Some(registry_url) = config.registry_url.clone() { + Index::from_url(path, registry_url) + } else { + Index::new(path) + }? + }); + + let runtime = runtime::Handle::current(); + // sync wrappers around build-queue & storage async resources + let build_queue = Arc::new(BuildQueue::new(runtime.clone(), async_build_queue.clone())); + let storage = Arc::new(Storage::new(async_storage.clone(), runtime.clone())); + + Ok(Self { + async_build_queue, + build_queue, + storage, + async_storage, + cdn, + pool: pool.clone(), + service_metrics: Arc::new(ServiceMetrics::new()?), + instance_metrics, + index, + registry_api: Arc::new(RegistryApi::new( + config.registry_api_host.clone(), + config.crates_io_api_call_retries, + )?), + repository_stats_updater: Arc::new(RepositoryStatsUpdater::new(&config, pool)), + runtime, + config, + }) + } } diff --git a/src/db/add_package.rs b/src/db/add_package.rs index dcff5b1e3..8b5c0b4c7 100644 --- a/src/db/add_package.rs +++ b/src/db/add_package.rs @@ -655,7 +655,7 @@ mod test { #[test] fn test_set_build_to_error() { async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; let crate_id = initialize_crate(&mut conn, "krate").await?; let release_id = initialize_release(&mut conn, crate_id, "0.1.0").await?; let build_id = initialize_build(&mut conn, release_id).await?; @@ -689,7 +689,7 @@ mod test { #[test] fn test_finish_build_success_valid_rustc_date() { async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; let crate_id = initialize_crate(&mut conn, "krate").await?; let release_id = initialize_release(&mut conn, crate_id, "0.1.0").await?; let build_id = initialize_build(&mut conn, release_id).await?; @@ -738,7 +738,7 @@ mod test { #[test] fn test_finish_build_success_invalid_rustc_date() { async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; let crate_id = initialize_crate(&mut conn, "krate").await?; let release_id = initialize_release(&mut conn, crate_id, "0.1.0").await?; let build_id = initialize_build(&mut conn, release_id).await?; @@ -783,7 +783,7 @@ mod test { #[test] fn test_finish_build_error() { async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; let crate_id = initialize_crate(&mut conn, "krate").await?; let release_id = initialize_release(&mut conn, crate_id, "0.1.0").await?; let build_id = initialize_build(&mut conn, release_id).await?; @@ -826,7 +826,7 @@ mod test { #[test] fn new_keywords() { async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; let release_id = env .fake_release() @@ -913,7 +913,7 @@ mod test { .create() .await?; - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; let kw_r = sqlx::query!( r#"SELECT kw.name as "name!", @@ -957,7 +957,7 @@ mod test { #[test] fn new_owner_long_avatar() { async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; let crate_id = initialize_crate(&mut conn, "krate").await?; let owner1 = CrateOwner { @@ -997,7 +997,7 @@ mod test { #[test] fn new_owners() { async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; let crate_id = initialize_crate(&mut conn, "krate").await?; let owner1 = CrateOwner { @@ -1037,7 +1037,7 @@ mod test { #[test] fn update_owner_details() { async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; let crate_id = initialize_crate(&mut conn, "krate").await?; // set initial owner details @@ -1086,7 +1086,7 @@ mod test { #[test] fn add_new_owners_and_delete_old() { async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; let crate_id = initialize_crate(&mut conn, "krate").await?; // set initial owner details @@ -1203,7 +1203,7 @@ mod test { #[test] fn test_initialize_crate() { async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; let name = "krate"; let crate_id = initialize_crate(&mut conn, name).await?; @@ -1227,7 +1227,7 @@ mod test { #[test] fn test_initialize_release() { async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; let name = "krate"; let version = "0.1.0"; let crate_id = initialize_crate(&mut conn, name).await?; @@ -1254,7 +1254,7 @@ mod test { #[test] fn test_initialize_build() { async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; let name = "krate"; let version = "0.1.0"; let crate_id = initialize_crate(&mut conn, name).await?; @@ -1281,7 +1281,7 @@ mod test { #[test] fn test_long_crate_name() { async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; let name: String = "krate".repeat(100); let crate_id = initialize_crate(&mut conn, &name).await?; @@ -1299,7 +1299,7 @@ mod test { #[test] fn test_long_release_version() { async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; let crate_id = initialize_crate(&mut conn, "krate").await?; let version: String = "version".repeat(100); diff --git a/src/db/blacklist.rs b/src/db/blacklist.rs index f4e104521..c4200c893 100644 --- a/src/db/blacklist.rs +++ b/src/db/blacklist.rs @@ -71,7 +71,7 @@ mod tests { #[test] fn test_list_blacklist() { crate::test::async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; // crates are added out of order to verify sorting add_crate(&mut conn, "crate A").await?; @@ -86,7 +86,7 @@ mod tests { #[test] fn test_add_to_and_remove_from_blacklist() { crate::test::async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; assert!(!is_blacklisted(&mut conn, "crate foo").await?); add_crate(&mut conn, "crate foo").await?; @@ -100,7 +100,7 @@ mod tests { #[test] fn test_add_twice_to_blacklist() { crate::test::async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; add_crate(&mut conn, "crate foo").await?; assert!(add_crate(&mut conn, "crate foo").await.is_err()); @@ -113,7 +113,7 @@ mod tests { #[test] fn test_remove_non_existing_crate() { crate::test::async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; assert!(remove_crate(&mut conn, "crate foo").await.is_err()); diff --git a/src/db/delete.rs b/src/db/delete.rs index 6a7135823..11ba12019 100644 --- a/src/db/delete.rs +++ b/src/db/delete.rs @@ -250,7 +250,7 @@ mod tests { .create() .await?; - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; assert!(get_id(&mut conn, "some-package").await.is_ok()); Ok(()) @@ -261,7 +261,7 @@ mod tests { #[test_case(false)] fn test_delete_crate(archive_storage: bool) { async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; // Create fake packages in the database let pkg1_v1_id = env @@ -300,7 +300,6 @@ mod tests { ] { assert!( env.async_storage() - .await .rustdoc_file_exists( pkg, version, @@ -312,13 +311,7 @@ mod tests { ); } - delete_crate( - &mut conn, - &*env.async_storage().await, - &env.config(), - "package-1", - ) - .await?; + delete_crate(&mut conn, env.async_storage(), env.config(), "package-1").await?; assert!(!crate_exists(&mut conn, "package-1").await?); assert!(crate_exists(&mut conn, "package-2").await?); @@ -329,7 +322,6 @@ mod tests { // files for package 2 still exists assert!( env.async_storage() - .await .rustdoc_file_exists( "package-2", "1.0.0", @@ -344,20 +336,17 @@ mod tests { if archive_storage { assert!( !env.async_storage() - .await .exists(&rustdoc_archive_path("package-1", "1.0.0")) .await? ); assert!( !env.async_storage() - .await .exists(&rustdoc_archive_path("package-1", "2.0.0")) .await? ); } else { assert!( !env.async_storage() - .await .rustdoc_file_exists( "package-1", "1.0.0", @@ -369,7 +358,6 @@ mod tests { ); assert!( !env.async_storage() - .await .rustdoc_file_exists( "package-1", "2.0.0", @@ -418,7 +406,7 @@ mod tests { .await } - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; let v1 = env .fake_release() .await @@ -435,11 +423,10 @@ mod tests { assert!(release_exists(&mut conn, v1).await?); assert!( env.async_storage() - .await .rustdoc_file_exists("a", "1.0.0", None, "a/index.html", archive_storage) .await? ); - assert!(json_exists(&*env.async_storage().await, "1.0.0").await?); + assert!(json_exists(env.async_storage(), "1.0.0").await?); let crate_id = sqlx::query_scalar!( r#"SELECT crate_id as "crate_id: CrateId" FROM releases WHERE id = $1"#, v1.0 @@ -467,34 +454,26 @@ mod tests { assert!(release_exists(&mut conn, v2).await?); assert!( env.async_storage() - .await .rustdoc_file_exists("a", "2.0.0", None, "a/index.html", archive_storage) .await? ); - assert!(json_exists(&*env.async_storage().await, "2.0.0").await?); + assert!(json_exists(env.async_storage(), "2.0.0").await?); assert_eq!( owners(&mut conn, crate_id).await?, vec!["Peter Rabbit".to_string()] ); - delete_version( - &mut conn, - &*env.async_storage().await, - &env.config(), - "a", - "1.0.0", - ) - .await?; + delete_version(&mut conn, env.async_storage(), env.config(), "a", "1.0.0").await?; assert!(!release_exists(&mut conn, v1).await?); if archive_storage { // for archive storage the archive and index files // need to be cleaned up. let rustdoc_archive = rustdoc_archive_path("a", "1.0.0"); - assert!(!env.async_storage().await.exists(&rustdoc_archive).await?); + assert!(!env.async_storage().exists(&rustdoc_archive).await?); // local and remote index are gone too let archive_index = format!("{rustdoc_archive}.index"); - assert!(!env.async_storage().await.exists(&archive_index).await?); + assert!(!env.async_storage().exists(&archive_index).await?); assert!( !env.config() .local_archive_cache_path @@ -504,21 +483,19 @@ mod tests { } else { assert!( !env.async_storage() - .await .rustdoc_file_exists("a", "1.0.0", None, "a/index.html", archive_storage) .await? ); } - assert!(!json_exists(&*env.async_storage().await, "1.0.0").await?); + assert!(!json_exists(env.async_storage(), "1.0.0").await?); assert!(release_exists(&mut conn, v2).await?); assert!( env.async_storage() - .await .rustdoc_file_exists("a", "2.0.0", None, "a/index.html", archive_storage) .await? ); - assert!(json_exists(&*env.async_storage().await, "2.0.0").await?); + assert!(json_exists(env.async_storage(), "2.0.0").await?); assert_eq!( owners(&mut conn, crate_id).await?, vec!["Peter Rabbit".to_string()] @@ -536,21 +513,14 @@ mod tests { #[test] fn test_delete_incomplete_version() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); let mut conn = db.async_conn().await; let (release_id, _) = fake_release_that_failed_before_build(&mut conn, "a", "1.0.0", "some-error") .await?; - delete_version( - &mut conn, - &*env.async_storage().await, - &env.config(), - "a", - "1.0.0", - ) - .await?; + delete_version(&mut conn, env.async_storage(), env.config(), "a", "1.0.0").await?; assert!(!release_exists(&mut conn, release_id).await?); @@ -561,14 +531,14 @@ mod tests { #[test] fn test_delete_incomplete_crate() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); let mut conn = db.async_conn().await; let (release_id, _) = fake_release_that_failed_before_build(&mut conn, "a", "1.0.0", "some-error") .await?; - delete_crate(&mut conn, &*env.async_storage().await, &env.config(), "a").await?; + delete_crate(&mut conn, env.async_storage(), env.config(), "a").await?; assert!(!crate_exists(&mut conn, "a").await?); assert!(!release_exists(&mut conn, release_id).await?); diff --git a/src/db/overrides.rs b/src/db/overrides.rs index a11e0f3e8..4329c1c0e 100644 --- a/src/db/overrides.rs +++ b/src/db/overrides.rs @@ -91,7 +91,7 @@ mod test { #[test] fn retrieve_overrides() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); let mut conn = db.async_conn().await; let krate = "hexponent"; diff --git a/src/db/pool.rs b/src/db/pool.rs index 96ecf8b71..d6da89dc9 100644 --- a/src/db/pool.rs +++ b/src/db/pool.rs @@ -7,7 +7,7 @@ use std::{ sync::Arc, time::Duration, }; -use tokio::runtime::Runtime; +use tokio::runtime; use tracing::debug; const DEFAULT_SCHEMA: &str = "public"; @@ -15,36 +15,30 @@ const DEFAULT_SCHEMA: &str = "public"; #[derive(Debug, Clone)] pub struct Pool { async_pool: sqlx::PgPool, - runtime: Arc, + runtime: runtime::Handle, metrics: Arc, max_size: u32, } impl Pool { - pub fn new( - config: &Config, - runtime: Arc, - metrics: Arc, - ) -> Result { + pub async fn new(config: &Config, metrics: Arc) -> Result { debug!( "creating database pool (if this hangs, consider running `docker-compose up -d db s3`)" ); - Self::new_inner(config, runtime, metrics, DEFAULT_SCHEMA) + Self::new_inner(config, metrics, DEFAULT_SCHEMA).await } #[cfg(test)] - pub(crate) fn new_with_schema( + pub(crate) async fn new_with_schema( config: &Config, - runtime: Arc, metrics: Arc, schema: &str, ) -> Result { - Self::new_inner(config, runtime, metrics, schema) + Self::new_inner(config, metrics, schema).await } - fn new_inner( + async fn new_inner( config: &Config, - runtime: Arc, metrics: Arc, schema: &str, ) -> Result { @@ -52,7 +46,6 @@ impl Pool { let max_lifetime = Duration::from_secs(30 * 60); let idle_timeout = Duration::from_secs(10 * 60); - let _guard = runtime.enter(); let async_pool = PgPoolOptions::new() .max_connections(config.max_pool_size) .min_connections(config.min_pool_idle) @@ -85,7 +78,7 @@ impl Pool { Ok(Pool { async_pool, metrics, - runtime, + runtime: runtime::Handle::current(), max_size: config.max_pool_size, }) } @@ -175,7 +168,7 @@ where #[derive(Debug)] pub struct AsyncPoolClient { inner: Option>, - runtime: Arc, + runtime: runtime::Handle, } impl Deref for AsyncPoolClient { diff --git a/src/docbuilder/limits.rs b/src/docbuilder/limits.rs index 815169272..f1e21a27c 100644 --- a/src/docbuilder/limits.rs +++ b/src/docbuilder/limits.rs @@ -76,14 +76,14 @@ mod test { #[test] fn retrieve_limits() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); let mut conn = db.async_conn().await; - let defaults = Limits::new(&env.config()); + let defaults = Limits::new(env.config()); let krate = "hexponent"; // limits work if no crate has limits set - let hexponent = Limits::for_crate(&env.config(), &mut conn, krate).await?; + let hexponent = Limits::for_crate(env.config(), &mut conn, krate).await?; assert_eq!(hexponent, defaults); Overrides::save( @@ -96,7 +96,7 @@ mod test { ) .await?; // limits work if crate has limits set - let hexponent = Limits::for_crate(&env.config(), &mut conn, krate).await?; + let hexponent = Limits::for_crate(env.config(), &mut conn, krate).await?; assert_eq!( hexponent, Limits { @@ -125,7 +125,7 @@ mod test { .await?; assert_eq!( limits, - Limits::for_crate(&env.config(), &mut conn, krate).await? + Limits::for_crate(env.config(), &mut conn, krate).await? ); Ok(()) }) @@ -134,7 +134,7 @@ mod test { #[test] fn targets_default_to_one_with_timeout() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); let mut conn = db.async_conn().await; let krate = "hexponent"; Overrides::save( @@ -146,37 +146,38 @@ mod test { }, ) .await?; - let limits = Limits::for_crate(&env.config(), &mut conn, krate).await?; + let limits = Limits::for_crate(env.config(), &mut conn, krate).await?; assert_eq!(limits.targets, 1); Ok(()) }) } - #[test] - fn config_default_memory_limit() { - async_wrapper(|env| async move { - env.override_config(|config| { - config.build_default_memory_limit = Some(6 * GB); - }); + #[tokio::test(flavor = "multi_thread")] + async fn config_default_memory_limit() -> Result<()> { + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .build_default_memory_limit(Some(6 * GB)) + .build()?, + ) + .await?; - let db = env.async_db().await; - let mut conn = db.async_conn().await; + let db = env.async_db(); + let mut conn = db.async_conn().await; - let limits = Limits::for_crate(&env.config(), &mut conn, "krate").await?; - assert_eq!(limits.memory, 6 * GB); + let limits = Limits::for_crate(env.config(), &mut conn, "krate").await?; + assert_eq!(limits.memory, 6 * GB); - Ok(()) - }) + Ok(()) } #[test] fn overrides_dont_lower_memory_limit() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); let mut conn = db.async_conn().await; - let defaults = Limits::new(&env.config()); + let defaults = Limits::new(env.config()); Overrides::save( &mut conn, @@ -188,7 +189,7 @@ mod test { ) .await?; - let limits = Limits::for_crate(&env.config(), &mut conn, "krate").await?; + let limits = Limits::for_crate(env.config(), &mut conn, "krate").await?; assert_eq!(limits, defaults); Ok(()) diff --git a/src/docbuilder/rustwide_builder.rs b/src/docbuilder/rustwide_builder.rs index 6cfc45a8a..2aeda4c2a 100644 --- a/src/docbuilder/rustwide_builder.rs +++ b/src/docbuilder/rustwide_builder.rs @@ -37,7 +37,7 @@ use std::io::BufReader; use std::path::Path; use std::sync::Arc; use std::time::Instant; -use tokio::runtime::Runtime; +use tokio::runtime; use tracing::{debug, error, info, info_span, instrument, warn}; const USER_AGENT: &str = "docs.rs builder (https://github.com/rust-lang/docs.rs)"; @@ -83,12 +83,10 @@ async fn get_configured_toolchain(conn: &mut sqlx::PgConnection) -> Result(context: &C) -> Result { - let config = context.config()?; - - let mut builder = WorkspaceBuilder::new(&config.rustwide_workspace, USER_AGENT) - .running_inside_docker(config.inside_docker); - if let Some(custom_image) = &config.docker_image { +fn build_workspace(context: &Context) -> Result { + let mut builder = WorkspaceBuilder::new(&context.config.rustwide_workspace, USER_AGENT) + .running_inside_docker(context.config.inside_docker); + if let Some(custom_image) = &context.config.docker_image { let image = match SandboxImage::local(custom_image) { Ok(i) => i, Err(CommandError::SandboxImageMissing(_)) => SandboxImage::remote(custom_image)?, @@ -115,7 +113,7 @@ pub enum PackageKind<'a> { pub struct RustwideBuilder { workspace: Workspace, toolchain: Toolchain, - runtime: Arc, + runtime: runtime::Handle, config: Arc, db: Pool, storage: Arc, @@ -127,35 +125,29 @@ pub struct RustwideBuilder { } impl RustwideBuilder { - pub fn init(context: &C) -> Result { - let config = context.config()?; - let pool = context.pool()?; - let runtime = context.runtime()?; - let toolchain = runtime.block_on(async { - let mut conn = pool.get_async().await?; + pub fn init(context: &Context) -> Result { + let toolchain = context.runtime.block_on(async { + let mut conn = context.pool.get_async().await?; get_configured_toolchain(&mut conn).await })?; Ok(RustwideBuilder { workspace: build_workspace(context)?, toolchain, - config, - db: pool, - runtime: runtime.clone(), - storage: context.storage()?, - async_storage: runtime.block_on(context.async_storage())?, - metrics: context.instance_metrics()?, - registry_api: context.registry_api()?, - repository_stats_updater: context.repository_stats_updater()?, + config: context.config.clone(), + db: context.pool.clone(), + runtime: context.runtime.clone(), + storage: context.storage.clone(), + async_storage: context.async_storage.clone(), + metrics: context.instance_metrics.clone(), + registry_api: context.registry_api.clone(), + repository_stats_updater: context.repository_stats_updater.clone(), workspace_initialize_time: Instant::now(), }) } - pub fn reinitialize_workspace_if_interval_passed( - &mut self, - context: &C, - ) -> Result<()> { - let interval = context.config()?.build_workspace_reinitialization_interval; + pub fn reinitialize_workspace_if_interval_passed(&mut self, context: &Context) -> Result<()> { + let interval = context.config.build_workspace_reinitialization_interval; if self.workspace_initialize_time.elapsed() >= interval { info!("start reinitialize workspace again"); self.workspace = build_workspace(context)?; @@ -1293,7 +1285,7 @@ mod tests { use crate::db::types::Feature; use crate::registry_api::ReleaseData; use crate::storage::{CompressionAlgorithm, compression}; - use crate::test::{AxumRouterTestExt, TestEnvironment, wrapper}; + use crate::test::{AxumRouterTestExt, TestEnvironment}; use pretty_assertions::assert_eq; use std::{io, iter}; use test_case::test_case; @@ -1304,7 +1296,7 @@ mod tests { version: &str, ) -> Result>, sqlx::Error> { env.runtime().block_on(async { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; sqlx::query_scalar!( r#"SELECT releases.features "features?: Vec" @@ -1351,32 +1343,33 @@ mod tests { #[test] #[ignore] - fn test_build_crate() { - wrapper(|env| { - let crate_ = DUMMY_CRATE_NAME; - let crate_path = crate_.replace('-', "_"); - let version = DUMMY_CRATE_VERSION; - let default_target = "x86_64-unknown-linux-gnu"; - - let storage = env.storage(); - let old_rustdoc_file = format!("rustdoc/{crate_}/{version}/some_doc_file"); - let old_source_file = format!("sources/{crate_}/{version}/some_source_file"); - storage.store_one(&old_rustdoc_file, Vec::new())?; - storage.store_one(&old_source_file, Vec::new())?; - - let mut builder = RustwideBuilder::init(env).unwrap(); - builder.update_toolchain()?; - assert!( - builder - .build_package(crate_, version, PackageKind::CratesIo, false)? - .successful - ); + fn test_build_crate() -> Result<()> { + let env = TestEnvironment::new_with_runtime()?; + + let crate_ = DUMMY_CRATE_NAME; + let crate_path = crate_.replace('-', "_"); + let version = DUMMY_CRATE_VERSION; + let default_target = "x86_64-unknown-linux-gnu"; + + let storage = env.storage(); + let old_rustdoc_file = format!("rustdoc/{crate_}/{version}/some_doc_file"); + let old_source_file = format!("sources/{crate_}/{version}/some_source_file"); + storage.store_one(&old_rustdoc_file, Vec::new())?; + storage.store_one(&old_source_file, Vec::new())?; + + let mut builder = RustwideBuilder::init(&env.context).unwrap(); + builder.update_toolchain()?; + assert!( + builder + .build_package(crate_, version, PackageKind::CratesIo, false)? + .successful + ); - // check release record in the db (default and other targets) - let row = env.runtime().block_on(async { - let mut conn = env.async_db().await.async_conn().await; - sqlx::query!( - r#"SELECT + // check release record in the db (default and other targets) + let row = env.runtime().block_on(async { + let mut conn = env.async_db().async_conn().await; + sqlx::query!( + r#"SELECT r.rustdoc_status, r.default_target, r.doc_targets, @@ -1396,214 +1389,209 @@ mod tests { WHERE c.name = $1 AND r.version = $2"#, - crate_, - version, - ) - .fetch_one(&mut *conn) - .await - })?; + crate_, + version, + ) + .fetch_one(&mut *conn) + .await + })?; - assert_eq!(row.rustdoc_status, Some(true)); - assert_eq!(row.default_target, Some(default_target.into())); - assert!(row.total_items.is_some()); - assert!(row.archive_storage); - assert!(!row.docsrs_version.unwrap().is_empty()); - assert!(!row.rustc_version.unwrap().is_empty()); - assert_eq!(row.build_status.unwrap(), "success"); - assert!(row.source_size > 0); - assert!(row.documentation_size.unwrap() > 0); - - let mut targets: Vec = row - .doc_targets - .unwrap() - .as_array() - .unwrap() - .iter() - .map(|v| v.as_str().unwrap().to_owned()) - .collect(); - targets.sort(); + assert_eq!(row.rustdoc_status, Some(true)); + assert_eq!(row.default_target, Some(default_target.into())); + assert!(row.total_items.is_some()); + assert!(row.archive_storage); + assert!(!row.docsrs_version.unwrap().is_empty()); + assert!(!row.rustc_version.unwrap().is_empty()); + assert_eq!(row.build_status.unwrap(), "success"); + assert!(row.source_size > 0); + assert!(row.documentation_size.unwrap() > 0); + + let mut targets: Vec = row + .doc_targets + .unwrap() + .as_array() + .unwrap() + .iter() + .map(|v| v.as_str().unwrap().to_owned()) + .collect(); + targets.sort(); + + let runtime = env.runtime(); + let web = runtime.block_on(env.web_app()); + + // old rustdoc & source files are gone + assert!(!storage.exists(&old_rustdoc_file)?); + assert!(!storage.exists(&old_source_file)?); + + // doc archive exists + let doc_archive = rustdoc_archive_path(crate_, version); + assert!(storage.exists(&doc_archive)?, "{}", doc_archive); + + // source archive exists + let source_archive = source_archive_path(crate_, version); + assert!(storage.exists(&source_archive)?, "{}", source_archive); + + // default target was built and is accessible + assert!(storage.exists_in_archive( + &doc_archive, + None, + &format!("{crate_path}/index.html"), + )?); + runtime.block_on(web.assert_success(&format!("/{crate_}/{version}/{crate_path}/")))?; + + // source is also packaged + assert!(storage.exists_in_archive(&source_archive, None, "src/lib.rs",)?); + runtime.block_on( + web.assert_success(&format!("/crate/{crate_}/{version}/source/src/lib.rs")), + )?; + assert!(!storage.exists_in_archive( + &doc_archive, + None, + &format!("{default_target}/{crate_path}/index.html"), + )?); + + let default_target_url = + format!("/{crate_}/{version}/{default_target}/{crate_path}/index.html"); + runtime.block_on(web.assert_redirect( + &default_target_url, + &format!("/{crate_}/{version}/{crate_path}/index.html"), + ))?; + + // Non-dist toolchains only have a single target, and of course + // if include_default_targets is false we won't have this full list + // of targets. + if builder.toolchain.as_dist().is_some() && env.config().include_default_targets { + assert_eq!( + targets, + vec![ + "aarch64-apple-darwin", + "aarch64-unknown-linux-gnu", + "i686-pc-windows-msvc", + "x86_64-pc-windows-msvc", + "x86_64-unknown-linux-gnu", + ] + ); - let runtime = env.runtime(); - let web = runtime.block_on(env.web_app()); + // other targets too + for target in DEFAULT_TARGETS { + for alg in RUSTDOC_JSON_COMPRESSION_ALGORITHMS { + // check if rustdoc json files exist for all targets + let path = rustdoc_json_path( + crate_, + version, + target, + RustdocJsonFormatVersion::Latest, + Some(*alg), + ); + assert!(storage.exists(&path)?); + assert!(storage.get_public_access(&path)?); + + let ext = compression::file_extension_for(*alg); + + let json_prefix = format!("rustdoc-json/{crate_}/{version}/{target}/"); + let mut json_files: Vec<_> = storage + .list_prefix(&json_prefix) + .filter_map(|res| res.ok()) + .map(|f| f.strip_prefix(&json_prefix).unwrap().to_owned()) + .collect(); + json_files.retain(|f| f.ends_with(&format!(".json.{ext}"))); + json_files.sort(); + dbg!(&json_files); + assert!(json_files[0].starts_with(&format!("empty-library_1.0.0_{target}_"))); + + assert!(json_files[0].ends_with(&format!(".json.{ext}"))); + assert_eq!( + json_files[1], + format!("empty-library_1.0.0_{target}_latest.json.{ext}") + ); + } - // old rustdoc & source files are gone - assert!(!storage.exists(&old_rustdoc_file)?); - assert!(!storage.exists(&old_source_file)?); + if target == &default_target { + continue; + } + let target_docs_present = storage.exists_in_archive( + &doc_archive, + None, + &format!("{target}/{crate_path}/index.html"), + )?; - // doc archive exists - let doc_archive = rustdoc_archive_path(crate_, version); - assert!(storage.exists(&doc_archive)?, "{}", doc_archive); + let target_url = format!("/{crate_}/{version}/{target}/{crate_path}/index.html"); - // source archive exists - let source_archive = source_archive_path(crate_, version); - assert!(storage.exists(&source_archive)?, "{}", source_archive); + assert!(target_docs_present); + runtime.block_on(web.assert_success(&target_url))?; - // default target was built and is accessible - assert!(storage.exists_in_archive( - &doc_archive, - None, - &format!("{crate_path}/index.html"), - )?); - runtime.block_on(web.assert_success(&format!("/{crate_}/{version}/{crate_path}/")))?; - - // source is also packaged - assert!(storage.exists_in_archive(&source_archive, None, "src/lib.rs",)?); - runtime.block_on( - web.assert_success(&format!("/crate/{crate_}/{version}/source/src/lib.rs")), - )?; - assert!(!storage.exists_in_archive( - &doc_archive, - None, - &format!("{default_target}/{crate_path}/index.html"), - )?); - - let default_target_url = - format!("/{crate_}/{version}/{default_target}/{crate_path}/index.html"); - runtime.block_on(web.assert_redirect( - &default_target_url, - &format!("/{crate_}/{version}/{crate_path}/index.html"), - ))?; - - // Non-dist toolchains only have a single target, and of course - // if include_default_targets is false we won't have this full list - // of targets. - if builder.toolchain.as_dist().is_some() && env.config().include_default_targets { - assert_eq!( - targets, - vec![ - "aarch64-apple-darwin", - "aarch64-unknown-linux-gnu", - "i686-pc-windows-msvc", - "x86_64-pc-windows-msvc", - "x86_64-unknown-linux-gnu", - ] + assert!( + storage + .exists(&format!("build-logs/{}/{target}.txt", row.build_id)) + .unwrap() ); - - // other targets too - for target in DEFAULT_TARGETS { - for alg in RUSTDOC_JSON_COMPRESSION_ALGORITHMS { - // check if rustdoc json files exist for all targets - let path = rustdoc_json_path( - crate_, - version, - target, - RustdocJsonFormatVersion::Latest, - Some(*alg), - ); - assert!(storage.exists(&path)?); - assert!(storage.get_public_access(&path)?); - - let ext = compression::file_extension_for(*alg); - - let json_prefix = format!("rustdoc-json/{crate_}/{version}/{target}/"); - let mut json_files: Vec<_> = storage - .list_prefix(&json_prefix) - .filter_map(|res| res.ok()) - .map(|f| f.strip_prefix(&json_prefix).unwrap().to_owned()) - .collect(); - json_files.retain(|f| f.ends_with(&format!(".json.{ext}"))); - json_files.sort(); - dbg!(&json_files); - assert!( - json_files[0].starts_with(&format!("empty-library_1.0.0_{target}_")) - ); - - assert!(json_files[0].ends_with(&format!(".json.{ext}"))); - assert_eq!( - json_files[1], - format!("empty-library_1.0.0_{target}_latest.json.{ext}") - ); - } - - if target == &default_target { - continue; - } - let target_docs_present = storage.exists_in_archive( - &doc_archive, - None, - &format!("{target}/{crate_path}/index.html"), - )?; - - let target_url = - format!("/{crate_}/{version}/{target}/{crate_path}/index.html"); - - assert!(target_docs_present); - runtime.block_on(web.assert_success(&target_url))?; - - assert!( - storage - .exists(&format!("build-logs/{}/{target}.txt", row.build_id)) - .unwrap() - ); - } } - - Ok(()) - }) + } + Ok(()) } #[test] #[ignore] - fn test_collect_metrics() { - wrapper(|env| { - let metrics_dir = tempfile::tempdir()?.keep(); - - env.override_config(|cfg| { - cfg.compiler_metrics_collection_path = Some(metrics_dir.clone()); - cfg.include_default_targets = false; - }); + fn test_collect_metrics() -> Result<()> { + let metrics_dir = tempfile::tempdir().unwrap().keep(); + let env = TestEnvironment::with_config_and_runtime( + TestEnvironment::base_config() + .compiler_metrics_collection_path(Some(metrics_dir.clone())) + .include_default_targets(false) + .build()?, + )?; - let crate_ = DUMMY_CRATE_NAME; - let version = DUMMY_CRATE_VERSION; + let crate_ = DUMMY_CRATE_NAME; + let version = DUMMY_CRATE_VERSION; - let mut builder = RustwideBuilder::init(env).unwrap(); - builder.update_toolchain()?; - assert!( - builder - .build_package(crate_, version, PackageKind::CratesIo, true)? - .successful - ); + let mut builder = RustwideBuilder::init(&env.context).unwrap(); + builder.update_toolchain()?; + assert!( + builder + .build_package(crate_, version, PackageKind::CratesIo, true)? + .successful + ); - let metric_files: Vec<_> = fs::read_dir(&metrics_dir)? - .filter_map(|di| di.ok()) - .map(|di| di.path()) - .collect(); + let metric_files: Vec<_> = fs::read_dir(&metrics_dir)? + .filter_map(|di| di.ok()) + .map(|di| di.path()) + .collect(); - assert_eq!(metric_files.len(), 1); + assert_eq!(metric_files.len(), 1); - let _: serde_json::Value = serde_json::from_slice(&fs::read(&metric_files[0])?)?; + let _: serde_json::Value = serde_json::from_slice(&fs::read(&metric_files[0])?)?; - Ok(()) - }) + Ok(()) } #[test] #[ignore] - fn test_build_binary_crate() { - wrapper(|env| { - // some binary crate - let crate_ = "heater"; - let version = "0.2.3"; - - let storage = env.storage(); - let old_rustdoc_file = format!("rustdoc/{crate_}/{version}/some_doc_file"); - let old_source_file = format!("sources/{crate_}/{version}/some_source_file"); - storage.store_one(&old_rustdoc_file, Vec::new())?; - storage.store_one(&old_source_file, Vec::new())?; - - let mut builder = RustwideBuilder::init(env).unwrap(); - builder.update_toolchain()?; - assert!( - !builder - .build_package(crate_, version, PackageKind::CratesIo, false)? - .successful - ); + fn test_build_binary_crate() -> Result<()> { + let env = TestEnvironment::new_with_runtime()?; + + // some binary crate + let crate_ = "heater"; + let version = "0.2.3"; + + let storage = env.storage(); + let old_rustdoc_file = format!("rustdoc/{crate_}/{version}/some_doc_file"); + let old_source_file = format!("sources/{crate_}/{version}/some_source_file"); + storage.store_one(&old_rustdoc_file, Vec::new())?; + storage.store_one(&old_source_file, Vec::new())?; + + let mut builder = RustwideBuilder::init(&env.context).unwrap(); + builder.update_toolchain()?; + assert!( + !builder + .build_package(crate_, version, PackageKind::CratesIo, false)? + .successful + ); - // check release record in the db (default and other targets) - let row = env.runtime().block_on(async { - let mut conn = env.async_db().await.async_conn().await; - sqlx::query!( - "SELECT + // check release record in the db (default and other targets) + let row = env.runtime().block_on(async { + let mut conn = env.async_db().async_conn().await; + sqlx::query!( + "SELECT r.rustdoc_status, r.is_library FROM @@ -1613,318 +1601,319 @@ mod tests { WHERE c.name = $1 AND r.version = $2", - crate_, - version - ) - .fetch_one(&mut *conn) - .await - })?; + crate_, + version + ) + .fetch_one(&mut *conn) + .await + })?; - assert_eq!(row.rustdoc_status, Some(false)); - assert_eq!(row.is_library, Some(false)); + assert_eq!(row.rustdoc_status, Some(false)); + assert_eq!(row.is_library, Some(false)); - // doc archive exists - let doc_archive = rustdoc_archive_path(crate_, version); - assert!(!storage.exists(&doc_archive)?); + // doc archive exists + let doc_archive = rustdoc_archive_path(crate_, version); + assert!(!storage.exists(&doc_archive)?); - // source archive exists - let source_archive = source_archive_path(crate_, version); - assert!(storage.exists(&source_archive)?); + // source archive exists + let source_archive = source_archive_path(crate_, version); + assert!(storage.exists(&source_archive)?); - // old rustdoc & source files still exist - assert!(storage.exists(&old_rustdoc_file)?); - assert!(storage.exists(&old_source_file)?); + // old rustdoc & source files still exist + assert!(storage.exists(&old_rustdoc_file)?); + assert!(storage.exists(&old_source_file)?); - Ok(()) - }) + Ok(()) } #[test] #[ignore] - fn test_failed_build_with_existing_successful_release() { - wrapper(|env| { - // rand 0.8.5 fails to build with recent nightly versions - // https://github.com/rust-lang/docs.rs/issues/26750 - let crate_ = "rand"; - let version = "0.8.5"; - - // create a successful release & build in the database - let release_id = env.runtime().block_on(async { - let mut conn = env.async_db().await.async_conn().await; - let crate_id = initialize_crate(&mut conn, crate_).await?; - let release_id = initialize_release(&mut conn, crate_id, version).await?; - let build_id = initialize_build(&mut conn, release_id).await?; - finish_build( - &mut conn, - build_id, - "some-version", - "other-version", - BuildStatus::Success, - None, - None, - ) - .await?; - finish_release( - &mut conn, - crate_id, - release_id, - &MetadataPackage::default(), - Path::new("/unknown/"), - "x86_64-unknown-linux-gnu", - serde_json::Value::Array(vec![]), - vec![ - "i686-pc-windows-msvc".into(), - "aarch64-unknown-linux-gnu".into(), - "aarch64-apple-darwin".into(), - "x86_64-pc-windows-msvc".into(), - "x86_64-unknown-linux-gnu".into(), - ], - &ReleaseData::default(), - true, - false, - iter::once(CompressionAlgorithm::Bzip2), - None, - true, - 42, - ) - .await?; + fn test_failed_build_with_existing_successful_release() -> Result<()> { + let env = TestEnvironment::new_with_runtime()?; + + // rand 0.8.5 fails to build with recent nightly versions + // https://github.com/rust-lang/docs.rs/issues/26750 + let crate_ = "rand"; + let version = "0.8.5"; + + // create a successful release & build in the database + let release_id = env.runtime().block_on(async { + let mut conn = env.async_db().async_conn().await; + let crate_id = initialize_crate(&mut conn, crate_).await?; + let release_id = initialize_release(&mut conn, crate_id, version).await?; + let build_id = initialize_build(&mut conn, release_id).await?; + finish_build( + &mut conn, + build_id, + "some-version", + "other-version", + BuildStatus::Success, + None, + None, + ) + .await?; + finish_release( + &mut conn, + crate_id, + release_id, + &MetadataPackage::default(), + Path::new("/unknown/"), + "x86_64-unknown-linux-gnu", + serde_json::Value::Array(vec![]), + vec![ + "i686-pc-windows-msvc".into(), + "aarch64-unknown-linux-gnu".into(), + "aarch64-apple-darwin".into(), + "x86_64-pc-windows-msvc".into(), + "x86_64-unknown-linux-gnu".into(), + ], + &ReleaseData::default(), + true, + false, + iter::once(CompressionAlgorithm::Bzip2), + None, + true, + 42, + ) + .await?; - Ok::<_, anyhow::Error>(release_id) - })?; + Ok::<_, anyhow::Error>(release_id) + })?; - fn check_rustdoc_status(env: &TestEnvironment, rid: ReleaseId) -> Result<()> { - assert_eq!( - env.runtime().block_on(async { - let mut conn = env.async_db().await.async_conn().await; - sqlx::query_scalar!( - "SELECT rustdoc_status FROM releases WHERE id = $1", - rid.0 - ) + fn check_rustdoc_status(env: &TestEnvironment, rid: ReleaseId) -> Result<()> { + assert_eq!( + env.runtime().block_on(async { + let mut conn = env.async_db().async_conn().await; + sqlx::query_scalar!("SELECT rustdoc_status FROM releases WHERE id = $1", rid.0) .fetch_one(&mut *conn) .await - })?, - Some(true) - ); - Ok(()) - } + })?, + Some(true) + ); + Ok(()) + } - check_rustdoc_status(env, release_id)?; + check_rustdoc_status(&env, release_id)?; - let mut builder = RustwideBuilder::init(env).unwrap(); - builder.update_toolchain()?; - assert!( - // not successful build - !builder - .build_package(crate_, version, PackageKind::CratesIo, false)? - .successful - ); + let mut builder = RustwideBuilder::init(&env.context).unwrap(); + builder.update_toolchain()?; + assert!( + // not successful build + !builder + .build_package(crate_, version, PackageKind::CratesIo, false)? + .successful + ); - check_rustdoc_status(env, release_id)?; - Ok(()) - }); + check_rustdoc_status(&env, release_id)?; + Ok(()) } #[test_case("scsys-macros", "0.2.6")] #[test_case("scsys-derive", "0.2.6")] #[test_case("thiserror-impl", "1.0.26")] #[ignore] - fn test_proc_macro(crate_: &str, version: &str) { - wrapper(|env| { - let mut builder = RustwideBuilder::init(env).unwrap(); - builder.update_toolchain()?; - assert!( - builder - .build_package(crate_, version, PackageKind::CratesIo, false)? - .successful - ); + fn test_proc_macro(crate_: &str, version: &str) -> Result<()> { + let env = TestEnvironment::new_with_runtime()?; + + let mut builder = RustwideBuilder::init(&env.context).unwrap(); + builder.update_toolchain()?; + assert!( + builder + .build_package(crate_, version, PackageKind::CratesIo, false)? + .successful + ); - let storage = env.storage(); + let storage = env.storage(); - // doc archive exists - let doc_archive = rustdoc_archive_path(crate_, version); - assert!(storage.exists(&doc_archive)?); + // doc archive exists + let doc_archive = rustdoc_archive_path(crate_, version); + assert!(storage.exists(&doc_archive)?); - // source archive exists - let source_archive = source_archive_path(crate_, version); - assert!(storage.exists(&source_archive)?); + // source archive exists + let source_archive = source_archive_path(crate_, version); + assert!(storage.exists(&source_archive)?); - Ok(()) - }); + Ok(()) } #[test] #[ignore] - fn test_cross_compile_non_host_default() { - wrapper(|env| { - let crate_ = "windows-win"; - let version = "2.4.1"; - let mut builder = RustwideBuilder::init(env).unwrap(); - builder.update_toolchain()?; - if builder.toolchain.as_ci().is_some() { - return Ok(()); - } - assert!( - builder - .build_package(crate_, version, PackageKind::CratesIo, false)? - .successful - ); + fn test_cross_compile_non_host_default() -> Result<()> { + let env = TestEnvironment::new_with_runtime()?; + + let crate_ = "windows-win"; + let version = "2.4.1"; + let mut builder = RustwideBuilder::init(&env.context).unwrap(); + builder.update_toolchain()?; + if builder.toolchain.as_ci().is_some() { + return Ok(()); + } + assert!( + builder + .build_package(crate_, version, PackageKind::CratesIo, false)? + .successful + ); - let storage = env.storage(); + let storage = env.storage(); - // doc archive exists - let doc_archive = rustdoc_archive_path(crate_, version); - assert!(storage.exists(&doc_archive)?, "{}", doc_archive); + // doc archive exists + let doc_archive = rustdoc_archive_path(crate_, version); + assert!(storage.exists(&doc_archive)?, "{}", doc_archive); - // source archive exists - let source_archive = source_archive_path(crate_, version); - assert!(storage.exists(&source_archive)?, "{}", source_archive); + // source archive exists + let source_archive = source_archive_path(crate_, version); + assert!(storage.exists(&source_archive)?, "{}", source_archive); - let target = "x86_64-unknown-linux-gnu"; - let crate_path = crate_.replace('-', "_"); - let target_docs_present = storage.exists_in_archive( - &doc_archive, - None, - &format!("{target}/{crate_path}/index.html"), - )?; - assert!(target_docs_present); + let target = "x86_64-unknown-linux-gnu"; + let crate_path = crate_.replace('-', "_"); + let target_docs_present = storage.exists_in_archive( + &doc_archive, + None, + &format!("{target}/{crate_path}/index.html"), + )?; + assert!(target_docs_present); - env.runtime().block_on(async { - let web = env.web_app().await; - let target_url = format!("/{crate_}/{version}/{target}/{crate_path}/index.html"); + env.runtime().block_on(async { + let web = env.web_app().await; + let target_url = format!("/{crate_}/{version}/{target}/{crate_path}/index.html"); - web.assert_success(&target_url).await - })?; + web.assert_success(&target_url).await + })?; - Ok(()) - }); + Ok(()) } #[test] #[ignore] - fn test_locked_fails_unlocked_needs_new_deps() { - wrapper(|env| { - env.override_config(|cfg| cfg.include_default_targets = false); - - // if the corrected dependency of the crate was already downloaded we need to remove it - remove_cache_files(env, "rand_core", "0.5.1")?; - - // Specific setup required: - // * crate has a binary so that it is published with a lockfile - // * crate has a library so that it is documented by docs.rs - // * crate has an optional dependency - // * metadata enables the optional dependency for docs.rs - // * `cargo doc` fails with the version of the dependency in the lockfile - // * there is a newer version of the dependency available that correctly builds - let crate_ = "docs_rs_test_incorrect_lockfile"; - let version = "0.1.2"; - let mut builder = RustwideBuilder::init(env).unwrap(); - builder.update_toolchain()?; - assert!( - builder - .build_package(crate_, version, PackageKind::CratesIo, false)? - .successful - ); + fn test_locked_fails_unlocked_needs_new_deps() -> Result<()> { + let env = TestEnvironment::with_config_and_runtime( + TestEnvironment::base_config() + .include_default_targets(false) + .build()?, + )?; - Ok(()) - }); + // if the corrected dependency of the crate was already downloaded we need to remove it + remove_cache_files(&env, "rand_core", "0.5.1")?; + + // Specific setup required: + // * crate has a binary so that it is published with a lockfile + // * crate has a library so that it is documented by docs.rs + // * crate has an optional dependency + // * metadata enables the optional dependency for docs.rs + // * `cargo doc` fails with the version of the dependency in the lockfile + // * there is a newer version of the dependency available that correctly builds + let crate_ = "docs_rs_test_incorrect_lockfile"; + let version = "0.1.2"; + let mut builder = RustwideBuilder::init(&env.context).unwrap(); + builder.update_toolchain()?; + assert!( + builder + .build_package(crate_, version, PackageKind::CratesIo, false)? + .successful + ); + + Ok(()) } #[test] #[ignore] - fn test_locked_fails_unlocked_needs_new_unknown_deps() { - wrapper(|env| { - env.override_config(|cfg| cfg.include_default_targets = false); - - // if the corrected dependency of the crate was already downloaded we need to remove it - remove_cache_files(env, "value-bag-sval2", "1.4.1")?; - - // Similar to above, this crate fails to build with the published - // lockfile, but generating a new working lockfile requires - // introducing a completely new dependency (not just version) which - // would not have had its details pulled down from the sparse-index. - let crate_ = "docs_rs_test_incorrect_lockfile"; - let version = "0.2.0"; - let mut builder = RustwideBuilder::init(env).unwrap(); - builder.update_toolchain()?; - assert!( - builder - .build_package(crate_, version, PackageKind::CratesIo, false)? - .successful - ); + fn test_locked_fails_unlocked_needs_new_unknown_deps() -> Result<()> { + let env = TestEnvironment::with_config_and_runtime( + TestEnvironment::base_config() + .include_default_targets(false) + .build()?, + )?; - Ok(()) - }); + // if the corrected dependency of the crate was already downloaded we need to remove it + remove_cache_files(&env, "value-bag-sval2", "1.4.1")?; + + // Similar to above, this crate fails to build with the published + // lockfile, but generating a new working lockfile requires + // introducing a completely new dependency (not just version) which + // would not have had its details pulled down from the sparse-index. + let crate_ = "docs_rs_test_incorrect_lockfile"; + let version = "0.2.0"; + let mut builder = RustwideBuilder::init(&env.context).unwrap(); + builder.update_toolchain()?; + assert!( + builder + .build_package(crate_, version, PackageKind::CratesIo, false)? + .successful + ); + + Ok(()) } #[test] #[ignore] - fn test_rustflags_are_passed_to_build_script() { - wrapper(|env| { - let crate_ = "proc-macro2"; - let version = "1.0.95"; - let mut builder = RustwideBuilder::init(env).unwrap(); - builder.update_toolchain()?; - assert!( - builder - .build_package(crate_, version, PackageKind::CratesIo, false)? - .successful - ); - Ok(()) - }); + fn test_rustflags_are_passed_to_build_script() -> Result<()> { + let env = TestEnvironment::new_with_runtime()?; + + let crate_ = "proc-macro2"; + let version = "1.0.95"; + let mut builder = RustwideBuilder::init(&env.context).unwrap(); + builder.update_toolchain()?; + assert!( + builder + .build_package(crate_, version, PackageKind::CratesIo, false)? + .successful + ); + Ok(()) } #[test] #[ignore] - fn test_sources_are_added_even_for_build_failures_before_build() { - wrapper(|env| { - // https://github.com/rust-lang/docs.rs/issues/2523 - // package with invalid cargo metadata. - // Will succeed in the crate fetch step, so sources are - // added. Will fail when we try to build. - let crate_ = "simconnect-sys"; - let version = "0.23.1"; - let mut builder = RustwideBuilder::init(env).unwrap(); - builder.update_toolchain()?; - - // `Result` is `Ok`, but the build-result is `false` - assert!( - !builder - .build_package(crate_, version, PackageKind::CratesIo, false)? - .successful - ); + fn test_sources_are_added_even_for_build_failures_before_build() -> Result<()> { + let env = TestEnvironment::new_with_runtime()?; + + // https://github.com/rust-lang/docs.rs/issues/2523 + // package with invalid cargo metadata. + // Will succeed in the crate fetch step, so sources are + // added. Will fail when we try to build. + let crate_ = "simconnect-sys"; + let version = "0.23.1"; + let mut builder = RustwideBuilder::init(&env.context).unwrap(); + builder.update_toolchain()?; + + // `Result` is `Ok`, but the build-result is `false` + assert!( + !builder + .build_package(crate_, version, PackageKind::CratesIo, false)? + .successful + ); - // source archive exists - let source_archive = source_archive_path(crate_, version); - assert!( - env.storage().exists(&source_archive)?, - "archive doesnt exist: {source_archive}" - ); + // source archive exists + let source_archive = source_archive_path(crate_, version); + assert!( + env.storage().exists(&source_archive)?, + "archive doesnt exist: {source_archive}" + ); - Ok(()) - }); + Ok(()) } #[test] #[ignore] - fn test_build_failures_before_build() { - wrapper(|env| { - // https://github.com/rust-lang/docs.rs/issues/2491 - // package without Cargo.toml, so fails directly in the fetch stage. - let crate_ = "emheap"; - let version = "0.1.0"; - let mut builder = RustwideBuilder::init(env).unwrap(); - builder.update_toolchain()?; + fn test_build_failures_before_build() -> Result<()> { + let env = TestEnvironment::new_with_runtime()?; - // `Result` is `Ok`, but the build-result is `false` - let summary = builder.build_package(crate_, version, PackageKind::CratesIo, false)?; + // https://github.com/rust-lang/docs.rs/issues/2491 + // package without Cargo.toml, so fails directly in the fetch stage. + let crate_ = "emheap"; + let version = "0.1.0"; + let mut builder = RustwideBuilder::init(&env.context).unwrap(); + builder.update_toolchain()?; - assert!(!summary.successful); - assert!(summary.should_reattempt); + // `Result` is `Ok`, but the build-result is `false` + let summary = builder.build_package(crate_, version, PackageKind::CratesIo, false)?; - let row = env.runtime().block_on(async { - let mut conn = env.async_db().await.async_conn().await; - sqlx::query!( - r#"SELECT + assert!(!summary.successful); + assert!(summary.should_reattempt); + + let row = env.runtime().block_on(async { + let mut conn = env.async_db().async_conn().await; + sqlx::query!( + r#"SELECT rustc_version, docsrs_version, build_status as "build_status: BuildStatus", @@ -1934,160 +1923,161 @@ mod tests { INNER JOIN releases as r on c.id = r.crate_id INNER JOIN builds as b on b.rid = r.id WHERE c.name = $1 and r.version = $2"#, - crate_, - version, - ) - .fetch_one(&mut *conn) - .await - })?; + crate_, + version, + ) + .fetch_one(&mut *conn) + .await + })?; - assert!(row.rustc_version.is_none()); - assert!(row.docsrs_version.is_none()); - assert_eq!(row.build_status, BuildStatus::Failure); - assert!(row.errors.unwrap().contains("missing Cargo.toml")); + assert!(row.rustc_version.is_none()); + assert!(row.docsrs_version.is_none()); + assert_eq!(row.build_status, BuildStatus::Failure); + assert!(row.errors.unwrap().contains("missing Cargo.toml")); - Ok(()) - }); + Ok(()) } #[test] #[ignore] - fn test_implicit_features_for_optional_dependencies() { - wrapper(|env| { - let crate_ = "serde"; - let version = "1.0.152"; - let mut builder = RustwideBuilder::init(env).unwrap(); - builder.update_toolchain()?; - assert!( - builder - .build_package(crate_, version, PackageKind::CratesIo, false)? - .successful - ); + fn test_implicit_features_for_optional_dependencies() -> Result<()> { + let env = TestEnvironment::new_with_runtime()?; + + let crate_ = "serde"; + let version = "1.0.152"; + let mut builder = RustwideBuilder::init(&env.context).unwrap(); + builder.update_toolchain()?; + assert!( + builder + .build_package(crate_, version, PackageKind::CratesIo, false)? + .successful + ); - assert!( - get_features(env, crate_, version)? - .unwrap() - .iter() - .any(|f| f.name == "serde_derive") - ); + assert!( + get_features(&env, crate_, version)? + .unwrap() + .iter() + .any(|f| f.name == "serde_derive") + ); - Ok(()) - }); + Ok(()) } #[test] #[ignore] - fn test_no_implicit_features_for_optional_dependencies_with_dep_syntax() { - wrapper(|env| { - let mut builder = RustwideBuilder::init(env).unwrap(); - builder.update_toolchain()?; - assert!( - builder - .build_local_package(Path::new("tests/crates/optional-dep"))? - .successful - ); + fn test_no_implicit_features_for_optional_dependencies_with_dep_syntax() -> Result<()> { + let env = TestEnvironment::new_with_runtime()?; + + let mut builder = RustwideBuilder::init(&env.context).unwrap(); + builder.update_toolchain()?; + assert!( + builder + .build_local_package(Path::new("tests/crates/optional-dep"))? + .successful + ); - assert_eq!( - get_features(env, "optional-dep", "0.0.1")? - .unwrap() - .iter() - .map(|f| f.name.to_owned()) - .sorted() - .collect_vec(), - // "regex" feature is not in the list, - // because we don't have implicit features for optional dependencies - // with `dep` syntax any more. - vec!["alloc", "default", "optional_regex", "std"] - ); + assert_eq!( + get_features(&env, "optional-dep", "0.0.1")? + .unwrap() + .iter() + .map(|f| f.name.to_owned()) + .sorted() + .collect_vec(), + // "regex" feature is not in the list, + // because we don't have implicit features for optional dependencies + // with `dep` syntax any more. + vec!["alloc", "default", "optional_regex", "std"] + ); - Ok(()) - }); + Ok(()) } #[test] #[ignore] - fn test_build_std() { - wrapper(|env| { - let mut builder = RustwideBuilder::init(env)?; - builder.update_toolchain()?; - assert!( - builder - .build_local_package(Path::new("tests/crates/build-std"))? - .successful - ); - Ok(()) - }) + fn test_build_std() -> Result<()> { + let env = TestEnvironment::new_with_runtime()?; + + let mut builder = RustwideBuilder::init(&env.context)?; + builder.update_toolchain()?; + assert!( + builder + .build_local_package(Path::new("tests/crates/build-std"))? + .successful + ); + Ok(()) } #[test] #[ignore] - fn test_workspace_reinitialize_at_once() { - wrapper(|env| { - let mut builder = RustwideBuilder::init(env)?; - builder.update_toolchain()?; - builder.reinitialize_workspace_if_interval_passed(env)?; - assert!( - builder - .build_local_package(Path::new("tests/crates/build-std"))? - .successful - ); - Ok(()) - }) + fn test_workspace_reinitialize_at_once() -> Result<()> { + let env = TestEnvironment::new_with_runtime()?; + + let mut builder = RustwideBuilder::init(&env.context)?; + builder.update_toolchain()?; + builder.reinitialize_workspace_if_interval_passed(&env.context)?; + assert!( + builder + .build_local_package(Path::new("tests/crates/build-std"))? + .successful + ); + Ok(()) } #[test] #[ignore] - fn test_workspace_reinitialize_after_interval() { + fn test_workspace_reinitialize_after_interval() -> Result<()> { + let env = TestEnvironment::with_config_and_runtime( + TestEnvironment::base_config() + .build_workspace_reinitialization_interval(Duration::from_secs(1)) + .build()?, + )?; + use std::thread::sleep; use std::time::Duration; - wrapper(|env: &TestEnvironment| { - env.override_config(|cfg: &mut Config| { - cfg.build_workspace_reinitialization_interval = Duration::from_secs(1) - }); - let mut builder = RustwideBuilder::init(env)?; - builder.update_toolchain()?; - assert!( - builder - .build_local_package(Path::new("tests/crates/build-std"))? - .successful - ); - sleep(Duration::from_secs(1)); - builder.reinitialize_workspace_if_interval_passed(env)?; - assert!( - builder - .build_local_package(Path::new("tests/crates/build-std"))? - .successful - ); - Ok(()) - }) + + let mut builder = RustwideBuilder::init(&env.context)?; + builder.update_toolchain()?; + assert!( + builder + .build_local_package(Path::new("tests/crates/build-std"))? + .successful + ); + sleep(Duration::from_secs(1)); + builder.reinitialize_workspace_if_interval_passed(&env.context)?; + assert!( + builder + .build_local_package(Path::new("tests/crates/build-std"))? + .successful + ); + Ok(()) } #[test] #[ignore] - fn test_new_builder_detects_existing_rustc() { - wrapper(|env: &TestEnvironment| { - let mut builder = RustwideBuilder::init(env)?; - builder.update_toolchain()?; - let old_version = builder.rustc_version()?; - drop(builder); - - // new builder should detect the existing rustc version from the previous builder - // (simulating running `update-toolchain` and `build crate` in separate invocations) - let mut builder = RustwideBuilder::init(env)?; - assert!( - builder - .build_package( - DUMMY_CRATE_NAME, - DUMMY_CRATE_VERSION, - PackageKind::CratesIo, - false - )? - .successful - ); - assert_eq!(old_version, builder.rustc_version()?); + fn test_new_builder_detects_existing_rustc() -> Result<()> { + let env = TestEnvironment::new_with_runtime()?; + + let mut builder = RustwideBuilder::init(&env.context)?; + builder.update_toolchain()?; + let old_version = builder.rustc_version()?; + drop(builder); + + // new builder should detect the existing rustc version from the previous builder + // (simulating running `update-toolchain` and `build crate` in separate invocations) + let mut builder = RustwideBuilder::init(&env.context)?; + assert!( + builder + .build_package( + DUMMY_CRATE_NAME, + DUMMY_CRATE_VERSION, + PackageKind::CratesIo, + false + )? + .successful + ); + assert_eq!(old_version, builder.rustc_version()?); - Ok(()) - }) + Ok(()) } #[test] diff --git a/src/repositories/github.rs b/src/repositories/github.rs index 912551547..986ac2c84 100644 --- a/src/repositories/github.rs +++ b/src/repositories/github.rs @@ -269,6 +269,17 @@ mod tests { use super::{Config, GitHub}; use crate::repositories::RateLimitReached; use crate::repositories::updater::{RepositoryForge, repository_name}; + use crate::test::TestEnvironment; + use anyhow::Result; + + const TEST_TOKEN: &str = "qsjdnfqdq"; + + fn github_config() -> anyhow::Result { + TestEnvironment::base_config() + .github_accesstoken(Some(TEST_TOKEN.to_owned())) + .build() + .map_err(Into::into) + } async fn mock_server_and_github(config: &Config) -> (mockito::ServerGuard, GitHub) { let server = mockito::Server::new_async().await; @@ -279,109 +290,97 @@ mod tests { (server, updater) } - #[test] - fn test_rate_limit_fail() { - crate::test::async_wrapper(|env| async move { - let mut config = env.base_config(); - config.github_accesstoken = Some("qsjdnfqdq".to_owned()); - let (mut server, updater) = mock_server_and_github(&config).await; - - let _m1 = server - .mock("POST", "/graphql") - .with_header("content-type", "application/json") - .with_body( - r#"{"errors":[{"type":"RATE_LIMITED","message":"API rate limit exceeded"}]}"#, - ) - .create(); - - match updater.fetch_repositories(&[String::new()]).await { - Err(e) if e.downcast_ref::().is_some() => {} - x => panic!("Expected Err(RateLimitReached), found: {x:?}"), - } - Ok(()) - }); + #[tokio::test] + async fn test_rate_limit_fail() -> Result<()> { + let config = github_config()?; + let (mut server, updater) = mock_server_and_github(&config).await; + + let _m1 = server + .mock("POST", "/graphql") + .with_header("content-type", "application/json") + .with_body( + r#"{"errors":[{"type":"RATE_LIMITED","message":"API rate limit exceeded"}]}"#, + ) + .create(); + + match updater.fetch_repositories(&[String::new()]).await { + Err(e) if e.downcast_ref::().is_some() => {} + x => panic!("Expected Err(RateLimitReached), found: {x:?}"), + } + Ok(()) } - #[test] - fn test_rate_limit_manual() { - crate::test::async_wrapper(|env| async move { - let mut config = env.base_config(); - config.github_accesstoken = Some("qsjdnfqdq".to_owned()); - let (mut server, updater) = mock_server_and_github(&config).await; - - let _m1 = server - .mock("POST", "/graphql") - .with_header("content-type", "application/json") - .with_body(r#"{"data": {"nodes": [], "rateLimit": {"remaining": 0}}}"#) - .create(); - - match updater.fetch_repositories(&[String::new()]).await { - Err(e) if e.downcast_ref::().is_some() => {} - x => panic!("Expected Err(RateLimitReached), found: {x:?}"), - } - Ok(()) - }); + #[tokio::test] + async fn test_rate_limit_manual() -> Result<()> { + let config = github_config()?; + let (mut server, updater) = mock_server_and_github(&config).await; + + let _m1 = server + .mock("POST", "/graphql") + .with_header("content-type", "application/json") + .with_body(r#"{"data": {"nodes": [], "rateLimit": {"remaining": 0}}}"#) + .create(); + + match updater.fetch_repositories(&[String::new()]).await { + Err(e) if e.downcast_ref::().is_some() => {} + x => panic!("Expected Err(RateLimitReached), found: {x:?}"), + } + Ok(()) } - #[test] - fn not_found() { - crate::test::async_wrapper(|env| async move { - let mut config = env.base_config(); - config.github_accesstoken = Some("qsjdnfqdq".to_owned()); - let (mut server, updater) = mock_server_and_github(&config).await; - - let _m1 = server - .mock("POST", "/graphql") - .with_header("content-type", "application/json") - .with_body( - r#"{"data": {"nodes": [], "rateLimit": {"remaining": 100000}}, "errors": + #[tokio::test] + async fn not_found() -> Result<()> { + let config = github_config()?; + let (mut server, updater) = mock_server_and_github(&config).await; + + let _m1 = server + .mock("POST", "/graphql") + .with_header("content-type", "application/json") + .with_body( + r#"{"data": {"nodes": [], "rateLimit": {"remaining": 100000}}, "errors": [{"type": "NOT_FOUND", "path": ["nodes", 0], "message": "none"}]}"#, - ) - .create(); + ) + .create(); - match updater.fetch_repositories(&[String::new()]).await { - Ok(res) => { - assert_eq!(res.missing, vec![String::new()]); - assert_eq!(res.present.len(), 0); - } - x => panic!("Failed: {x:?}"), + match updater.fetch_repositories(&[String::new()]).await { + Ok(res) => { + assert_eq!(res.missing, vec![String::new()]); + assert_eq!(res.present.len(), 0); } - Ok(()) - }); + x => panic!("Failed: {x:?}"), + } + Ok(()) } - #[test] - fn get_repository_info() { - crate::test::async_wrapper(|env| async move { - let mut config = env.base_config(); - config.github_accesstoken = Some("qsjdnfqdq".to_owned()); - let (mut server, updater) = mock_server_and_github(&config).await; - - let _m1 = server - .mock("POST", "/graphql") - .with_header("content-type", "application/json") - .with_body( - r#"{"data": {"repository": {"id": "hello", "nameWithOwner": "foo/bar", + #[tokio::test] + async fn get_repository_info() -> Result<()> { + let config = github_config()?; + let (mut server, updater) = mock_server_and_github(&config).await; + + let _m1 = server + .mock("POST", "/graphql") + .with_header("content-type", "application/json") + .with_body( + r#"{"data": {"repository": {"id": "hello", "nameWithOwner": "foo/bar", "description": "this is", "stargazerCount": 10, "forkCount": 11, "issues": {"totalCount": 12}}}}"#, - ) - .create(); - - let repo = updater - .fetch_repository( - &repository_name("https://gitlab.com/foo/bar").expect("repository_name failed"), - ) - .await - .expect("fetch_repository failed") - .unwrap(); - - assert_eq!(repo.id, "hello"); - assert_eq!(repo.name_with_owner, "foo/bar"); - assert_eq!(repo.description, Some("this is".to_owned())); - assert_eq!(repo.stars, 10); - assert_eq!(repo.forks, 11); - assert_eq!(repo.issues, 12); - Ok(()) - }); + ) + .create(); + + let repo = updater + .fetch_repository( + &repository_name("https://gitlab.com/foo/bar").expect("repository_name failed"), + ) + .await + .expect("fetch_repository failed") + .unwrap(); + + assert_eq!(repo.id, "hello"); + assert_eq!(repo.name_with_owner, "foo/bar"); + assert_eq!(repo.description, Some("this is".to_owned())); + assert_eq!(repo.stars, 10); + assert_eq!(repo.forks, 11); + assert_eq!(repo.issues, 12); + Ok(()) } } diff --git a/src/repositories/gitlab.rs b/src/repositories/gitlab.rs index bfdf82914..c1fb70b95 100644 --- a/src/repositories/gitlab.rs +++ b/src/repositories/gitlab.rs @@ -267,6 +267,7 @@ mod tests { use super::GitLab; use crate::repositories::RateLimitReached; use crate::repositories::updater::{RepositoryForge, repository_name}; + use anyhow::Result; async fn mock_server_and_gitlab() -> (mockito::ServerGuard, GitLab) { let server = mockito::Server::new_async().await; @@ -280,87 +281,81 @@ mod tests { (server, updater) } - #[test] - fn test_rate_limit() { - crate::test::async_wrapper(|_env| async move { - let (mut server, updater) = mock_server_and_gitlab().await; - - let _m1 = server - .mock("POST", "/api/graphql") - .with_header("content-type", "application/json") - .with_header("RateLimit-Remaining", "0") - .with_body("{}") - .create(); - - match updater - .fetch_repository( - &repository_name("https://gitlab.com/foo/bar").expect("repository_name failed"), - ) - .await - { - Err(e) if e.downcast_ref::().is_some() => {} - x => panic!("Expected Err(RateLimitReached), found: {x:?}"), - } - match updater.fetch_repositories(&[String::new()]).await { - Err(e) if e.downcast_ref::().is_some() => {} - x => panic!("Expected Err(RateLimitReached), found: {x:?}"), - } - Ok(()) - }) + #[tokio::test] + async fn test_rate_limit() -> Result<()> { + let (mut server, updater) = mock_server_and_gitlab().await; + + let _m1 = server + .mock("POST", "/api/graphql") + .with_header("content-type", "application/json") + .with_header("RateLimit-Remaining", "0") + .with_body("{}") + .create(); + + match updater + .fetch_repository( + &repository_name("https://gitlab.com/foo/bar").expect("repository_name failed"), + ) + .await + { + Err(e) if e.downcast_ref::().is_some() => {} + x => panic!("Expected Err(RateLimitReached), found: {x:?}"), + } + match updater.fetch_repositories(&[String::new()]).await { + Err(e) if e.downcast_ref::().is_some() => {} + x => panic!("Expected Err(RateLimitReached), found: {x:?}"), + } + Ok(()) } - #[test] - fn not_found() { - crate::test::async_wrapper(|_env| async move { - let (mut server, updater) = mock_server_and_gitlab().await; - - let _m1 = server - .mock("POST", "/api/graphql") - .with_header("content-type", "application/json") - .with_body(r#"{"data": {"projects": {"nodes": []}}}"#) - .create(); - - match updater.fetch_repositories(&[String::new()]).await { - Ok(res) => { - assert_eq!(res.missing, vec![String::new()]); - assert_eq!(res.present.len(), 0); - } - x => panic!("Failed: {x:?}"), + #[tokio::test] + async fn not_found() -> Result<()> { + let (mut server, updater) = mock_server_and_gitlab().await; + + let _m1 = server + .mock("POST", "/api/graphql") + .with_header("content-type", "application/json") + .with_body(r#"{"data": {"projects": {"nodes": []}}}"#) + .create(); + + match updater.fetch_repositories(&[String::new()]).await { + Ok(res) => { + assert_eq!(res.missing, vec![String::new()]); + assert_eq!(res.present.len(), 0); } - Ok(()) - }) + x => panic!("Failed: {x:?}"), + } + Ok(()) } - #[test] - fn get_repository_info() { - crate::test::async_wrapper(|_env| async move { - let (mut server, updater) = mock_server_and_gitlab().await; + #[tokio::test] + async fn get_repository_info() -> Result<()> { + let (mut server, updater) = mock_server_and_gitlab().await; - let _m1 = server - .mock("POST", "/api/graphql") - .with_header("content-type", "application/json") - .with_body( - r#"{"data": {"project": {"id": "hello", "fullPath": "foo/bar", + let _m1 = server + .mock("POST", "/api/graphql") + .with_header("content-type", "application/json") + .with_body( + r#"{"data": {"project": {"id": "hello", "fullPath": "foo/bar", "description": "this is", "starCount": 10, "forksCount": 11, "openIssuesCount": 12}}}"#, - ) - .create(); - - let repo = updater - .fetch_repository( - &repository_name("https://gitlab.com/foo/bar").expect("repository_name failed"), - ) - .await - .expect("fetch_repository failed") - .unwrap(); - - assert_eq!(repo.id, "hello"); - assert_eq!(repo.name_with_owner, "foo/bar"); - assert_eq!(repo.description, Some("this is".to_owned())); - assert_eq!(repo.stars, 10); - assert_eq!(repo.forks, 11); - assert_eq!(repo.issues, 12); - Ok(()) - }) + ) + .create(); + + let repo = updater + .fetch_repository( + &repository_name("https://gitlab.com/foo/bar").expect("repository_name failed"), + ) + .await + .expect("fetch_repository failed") + .unwrap(); + + assert_eq!(repo.id, "hello"); + assert_eq!(repo.name_with_owner, "foo/bar"); + assert_eq!(repo.description, Some("this is".to_owned())); + assert_eq!(repo.stars, 10); + assert_eq!(repo.forks, 11); + assert_eq!(repo.issues, 12); + Ok(()) } } diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 1b4dcdd92..6925dfc58 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -35,7 +35,7 @@ use std::{ use std::{iter, str::FromStr}; use tokio::{ io::{AsyncRead, AsyncWriteExt}, - runtime::Runtime, + runtime, }; use tracing::{error, info_span, instrument, trace}; use walkdir::WalkDir; @@ -150,10 +150,10 @@ pub fn get_file_list>(path: P) -> Box, ) -> Result { Ok(Self { - config: config.clone(), backend: match config.storage_backend { StorageKind::Database => { StorageBackend::Database(DatabaseBackend::new(pool, metrics)) @@ -196,6 +195,7 @@ impl AsyncStorage { StorageBackend::S3(Box::new(S3Backend::new(metrics, &config).await?)) } }, + config, }) } @@ -756,12 +756,12 @@ impl std::fmt::Debug for AsyncStorage { /// Sync wrapper around `AsyncStorage` for parts of the codebase that are not async. pub struct Storage { inner: Arc, - runtime: Arc, + runtime: runtime::Handle, } #[allow(dead_code)] impl Storage { - pub fn new(inner: Arc, runtime: Arc) -> Self { + pub fn new(inner: Arc, runtime: runtime::Handle) -> Self { Self { inner, runtime } } @@ -948,8 +948,8 @@ impl Storage { // we leak the web server, and Drop isn't executed in that case (since the leaked web server // still holds a reference to the storage). #[cfg(test)] - pub(crate) fn cleanup_after_test(&self) -> Result<()> { - self.runtime.block_on(self.inner.cleanup_after_test()) + pub(crate) async fn cleanup_after_test(&self) -> Result<()> { + self.inner.cleanup_after_test().await } } @@ -1531,14 +1531,14 @@ mod backend_tests { $( mod $backend { use crate::test::TestEnvironment; - use crate::storage::{Storage, StorageKind}; - use std::sync::Arc; - - fn get_storage(env: &TestEnvironment) -> Arc { - env.override_config(|config| { - config.storage_backend = $config; - }); - env.storage() + use crate::storage::{ StorageKind}; + + fn get_env() -> anyhow::Result { + crate::test::TestEnvironment::with_config_and_runtime( + TestEnvironment::base_config() + .storage_backend($config) + .build()? + ) } backend_tests!(@tests $tests); @@ -1549,20 +1549,18 @@ mod backend_tests { (@tests { $($test:ident,)* }) => { $( #[test] - fn $test() { - crate::test::wrapper(|env| { - super::$test(&*get_storage(env)) - }); + fn $test() -> anyhow::Result<()> { + let env = get_env()?; + super::$test(&*env.storage()) } )* }; (@tests_with_metrics { $($test:ident,)* }) => { $( #[test] - fn $test() { - crate::test::wrapper(|env| { - super::$test(&*get_storage(env), &*env.instance_metrics()) - }); + fn $test() -> anyhow::Result<()> { + let env = get_env()?; + super::$test(&*env.storage(), &*env.instance_metrics()) } )* }; diff --git a/src/test/mod.rs b/src/test/mod.rs index e137943d5..7069c9eff 100644 --- a/src/test/mod.rs +++ b/src/test/mod.rs @@ -2,84 +2,36 @@ mod fakes; pub(crate) use self::fakes::{FakeBuild, fake_release_that_failed_before_build}; use crate::cdn::CdnBackend; +use crate::config::ConfigBuilder; use crate::db::{self, AsyncPoolClient, Pool}; use crate::error::Result; -use crate::repositories::RepositoryStatsUpdater; use crate::storage::{AsyncStorage, Storage, StorageKind}; use crate::web::{build_axum_app, cache, page::TemplateData}; -use crate::{ - AsyncBuildQueue, BuildQueue, Config, Context, Index, InstanceMetrics, RegistryApi, - ServiceMetrics, -}; +use crate::{AsyncBuildQueue, BuildQueue, Config, Context, InstanceMetrics}; use anyhow::Context as _; use axum::body::Bytes; use axum::{Router, body::Body, http::Request, response::Response as AxumResponse}; use fn_error_context::context; -use futures_util::{FutureExt, stream::TryStreamExt}; +use futures_util::stream::TryStreamExt; use http_body_util::BodyExt; // for `collect` -use once_cell::sync::OnceCell; use serde::de::DeserializeOwned; use sqlx::Connection as _; use std::{fs, future::Future, panic, rc::Rc, str::FromStr, sync::Arc}; -use tokio::runtime::{Builder, Runtime}; +use tokio::{runtime, task::block_in_place}; use tower::ServiceExt; use tracing::error; -#[track_caller] -pub(crate) fn wrapper(f: impl FnOnce(&TestEnvironment) -> Result<()>) { - let env = TestEnvironment::new(); - // if we didn't catch the panic, the server would hang forever - let maybe_panic = panic::catch_unwind(panic::AssertUnwindSafe(|| f(&env))); - env.cleanup(); - let result = match maybe_panic { - Ok(r) => r, - Err(payload) => panic::resume_unwind(payload), - }; - - if let Err(err) = result { - eprintln!("the test failed: {err}"); - for cause in err.chain() { - eprintln!(" caused by: {cause}"); - } - - eprintln!("{}", err.backtrace()); - - panic!("the test failed"); - } -} - pub(crate) fn async_wrapper(f: F) where F: FnOnce(Rc) -> Fut, Fut: Future>, { - let env = Rc::new(TestEnvironment::new()); - - let fut = f(env.clone()); - - let runtime = env.runtime(); + let env = Rc::new( + TestEnvironment::with_config_and_runtime(TestEnvironment::base_config().build().unwrap()) + .unwrap(), + ); - // if we didn't catch the panic, the server would hang forever - let maybe_panic = runtime.block_on(panic::AssertUnwindSafe(fut).catch_unwind()); - - let env = Rc::into_inner(env).unwrap(); - env.cleanup(); - - let result = match maybe_panic { - Ok(r) => r, - Err(payload) => panic::resume_unwind(payload), - }; - - if let Err(err) = result { - eprintln!("the test failed: {err}"); - for cause in err.chain() { - eprintln!(" caused by: {cause}"); - } - - eprintln!("{}", err.backtrace()); - - panic!("the test failed"); - } + env.runtime().block_on(f(env.clone())).expect("test failed"); } pub(crate) trait AxumResponseTestExt { @@ -351,19 +303,12 @@ impl AxumRouterTestExt for axum::Router { } pub(crate) struct TestEnvironment { - build_queue: OnceCell>, - async_build_queue: tokio::sync::OnceCell>, - config: OnceCell>, - db: tokio::sync::OnceCell, - storage: OnceCell>, - async_storage: tokio::sync::OnceCell>, - cdn: tokio::sync::OnceCell>, - index: OnceCell>, - registry_api: OnceCell>, - runtime: OnceCell>, - instance_metrics: OnceCell>, - service_metrics: OnceCell>, - repository_stats_updater: OnceCell>, + // NOTE: the database has to come before the context, + // otherwise it can happen that we can't cleanup the test database + // because the tokio runtime from the context is gone. + db: TestDatabase, + pub context: Context, + owned_runtime: Option>, } pub(crate) fn init_logger() { @@ -383,293 +328,129 @@ pub(crate) fn init_logger() { } impl TestEnvironment { - fn new() -> Self { - init_logger(); - Self { - build_queue: OnceCell::new(), - async_build_queue: tokio::sync::OnceCell::new(), - config: OnceCell::new(), - db: tokio::sync::OnceCell::new(), - storage: OnceCell::new(), - async_storage: tokio::sync::OnceCell::new(), - cdn: tokio::sync::OnceCell::new(), - index: OnceCell::new(), - registry_api: OnceCell::new(), - instance_metrics: OnceCell::new(), - service_metrics: OnceCell::new(), - runtime: OnceCell::new(), - repository_stats_updater: OnceCell::new(), - } + pub(crate) fn new_with_runtime() -> Result { + Self::with_config_and_runtime(Self::base_config().build()?) } - fn cleanup(self) { - if let Some(storage) = self.storage.get() { - storage - .cleanup_after_test() - .expect("failed to cleanup after tests"); - } - - if let Some(config) = self.config.get() - && config.local_archive_cache_path.exists() - { - fs::remove_dir_all(&config.local_archive_cache_path).unwrap(); - } + pub(crate) async fn new() -> Result { + Self::with_config(Self::base_config().build()?).await } - pub(crate) fn base_config(&self) -> Config { - let mut config = Config::from_env().expect("failed to get base config"); - - // create index directory - fs::create_dir_all(config.registry_index_path.clone()).unwrap(); - - // Use less connections for each test compared to production. - config.max_pool_size = 8; - config.min_pool_idle = 0; - - // Use the database for storage, as it's faster than S3. - config.storage_backend = StorageKind::Database; - - // Use a temporary S3 bucket. - config.s3_bucket = format!("docsrs-test-bucket-{}", rand::random::()); - config.s3_bucket_is_temporary = true; - - config.local_archive_cache_path = - std::env::temp_dir().join(format!("docsrs-test-index-{}", rand::random::())); - - // set stale content serving so Cache::ForeverInCdn and Cache::ForeverInCdnAndStaleInBrowser - // are actually different. - config.cache_control_stale_while_revalidate = Some(86400); - - config.include_default_targets = true; - - config + pub(crate) fn with_config_and_runtime(config: Config) -> Result { + let runtime = Arc::new( + runtime::Builder::new_multi_thread() + .enable_all() + .build() + .context("failed to initialize runtime")?, + ); + let mut env = runtime.block_on(Self::with_config(config))?; + env.owned_runtime = Some(runtime); + Ok(env) } - pub(crate) fn override_config(&self, f: impl FnOnce(&mut Config)) { - let mut config = self.base_config(); - f(&mut config); + pub(crate) async fn with_config(config: Config) -> Result { + init_logger(); - if self.config.set(Arc::new(config)).is_err() { - panic!("can't call override_config after the configuration is accessed!"); - } - } + // create index directory + fs::create_dir_all(config.registry_index_path.clone())?; - pub(crate) async fn async_build_queue(&self) -> Arc { - self.async_build_queue - .get_or_init(|| async { - Arc::new(AsyncBuildQueue::new( - self.async_db().await.pool(), - self.instance_metrics(), - self.config(), - self.async_storage().await, - )) - }) + let instance_metrics = Arc::new(InstanceMetrics::new()?); + let test_db = TestDatabase::new(&config, instance_metrics.clone()) .await - .clone() - } + .context("can't initialize test database")?; - pub(crate) fn build_queue(&self) -> Arc { - let runtime = self.runtime(); - self.build_queue - .get_or_init(|| { - Arc::new(BuildQueue::new( - runtime.clone(), - runtime.block_on(self.async_build_queue()), - )) - }) - .clone() + Ok(Self { + context: Context::from_config(config, instance_metrics, test_db.pool().clone()).await?, + db: test_db, + owned_runtime: None, + }) } - pub(crate) async fn cdn(&self) -> Arc { - self.cdn - .get_or_init(|| async { Arc::new(CdnBackend::new(&self.config()).await) }) - .await - .clone() + pub(crate) fn base_config() -> ConfigBuilder { + Config::from_env() + .expect("can't load base config from environment") + // Use less connections for each test compared to production. + .max_pool_size(8) + .min_pool_idle(2) + // Use the database for storage, as it's faster than S3. + .storage_backend(StorageKind::Database) + // Use a temporary S3 bucket. + .s3_bucket(format!("docsrs-test-bucket-{}", rand::random::())) + .s3_bucket_is_temporary(true) + .local_archive_cache_path( + std::env::temp_dir().join(format!("docsrs-test-index-{}", rand::random::())), + ) + // set stale content serving so Cache::ForeverInCdn and Cache::ForeverInCdnAndStaleInBrowser + // are actually different. + .cache_control_stale_while_revalidate(Some(86400)) + .include_default_targets(true) } - pub(crate) fn config(&self) -> Arc { - self.config - .get_or_init(|| Arc::new(self.base_config())) - .clone() + pub(crate) fn async_build_queue(&self) -> &AsyncBuildQueue { + &self.context.async_build_queue } - pub(crate) async fn async_storage(&self) -> Arc { - self.async_storage - .get_or_init(|| async { - let db = self.async_db().await; - Arc::new( - AsyncStorage::new(db.pool(), self.instance_metrics(), self.config()) - .await - .expect("failed to initialize the async storage"), - ) - }) - .await - .clone() + pub(crate) fn build_queue(&self) -> &BuildQueue { + &self.context.build_queue } - pub(crate) fn storage(&self) -> Arc { - let runtime = self.runtime(); - self.storage - .get_or_init(|| { - Arc::new(Storage::new( - runtime.block_on(self.async_storage()), - runtime, - )) - }) - .clone() + pub(crate) fn cdn(&self) -> &CdnBackend { + &self.context.cdn } - pub(crate) fn instance_metrics(&self) -> Arc { - self.instance_metrics - .get_or_init(|| { - Arc::new(InstanceMetrics::new().expect("failed to initialize the instance metrics")) - }) - .clone() + pub(crate) fn config(&self) -> &Config { + &self.context.config } - pub(crate) fn service_metrics(&self) -> Arc { - self.service_metrics - .get_or_init(|| { - Arc::new(ServiceMetrics::new().expect("failed to initialize the service metrics")) - }) - .clone() + pub(crate) fn async_storage(&self) -> &AsyncStorage { + &self.context.async_storage } - pub(crate) fn runtime(&self) -> Arc { - self.runtime - .get_or_init(|| { - Arc::new( - Builder::new_current_thread() - .enable_all() - .build() - .expect("failed to initialize runtime"), - ) - }) - .clone() + pub(crate) fn storage(&self) -> &Storage { + &self.context.storage } - pub(crate) fn index(&self) -> Arc { - self.index - .get_or_init(|| { - Arc::new( - Index::new(self.config().registry_index_path.clone()) - .expect("failed to initialize the index"), - ) - }) - .clone() + pub(crate) fn instance_metrics(&self) -> &InstanceMetrics { + &self.context.instance_metrics } - pub(crate) fn registry_api(&self) -> Arc { - self.registry_api - .get_or_init(|| { - Arc::new( - RegistryApi::new( - self.config().registry_api_host.clone(), - self.config().crates_io_api_call_retries, - ) - .expect("failed to initialize the registry api"), - ) - }) - .clone() - } - - pub(crate) fn repository_stats_updater(&self) -> Arc { - self.repository_stats_updater - .get_or_init(|| { - Arc::new(RepositoryStatsUpdater::new( - &self.config(), - self.pool().expect("failed to get the pool"), - )) - }) - .clone() + pub(crate) fn runtime(&self) -> &runtime::Handle { + &self.context.runtime } - pub(crate) fn db(&self) -> &TestDatabase { - self.runtime().block_on(self.async_db()) - } - - pub(crate) async fn async_db(&self) -> &TestDatabase { - self.db - .get_or_init(|| async { - let config = self.config(); - let runtime = self.runtime(); - let instance_metrics = self.instance_metrics(); - self.runtime() - .spawn_blocking(move || TestDatabase::new(&config, runtime, instance_metrics)) - .await - .unwrap() - .expect("failed to initialize the db") - }) - .await + pub(crate) fn async_db(&self) -> &TestDatabase { + &self.db } pub(crate) async fn web_app(&self) -> Router { let template_data = Arc::new(TemplateData::new(1).unwrap()); - build_axum_app(self, template_data) + build_axum_app(&self.context, template_data) .await .expect("could not build axum app") } pub(crate) async fn fake_release(&self) -> fakes::FakeRelease<'_> { - fakes::FakeRelease::new(self.async_db().await, self.async_storage().await) + fakes::FakeRelease::new(self.async_db(), self.context.async_storage.clone()) } } -impl Context for TestEnvironment { - fn config(&self) -> Result> { - Ok(TestEnvironment::config(self)) - } - - async fn async_build_queue(&self) -> Result> { - Ok(TestEnvironment::async_build_queue(self).await) - } - - fn build_queue(&self) -> Result> { - Ok(TestEnvironment::build_queue(self)) - } - - fn storage(&self) -> Result> { - Ok(TestEnvironment::storage(self)) - } - - async fn async_storage(&self) -> Result> { - Ok(TestEnvironment::async_storage(self).await) - } - - async fn cdn(&self) -> Result> { - Ok(TestEnvironment::cdn(self).await) - } - - async fn async_pool(&self) -> Result { - Ok(self.async_db().await.pool()) - } - - fn pool(&self) -> Result { - Ok(self.db().pool()) - } - - fn instance_metrics(&self) -> Result> { - Ok(self.instance_metrics()) - } - - fn service_metrics(&self) -> Result> { - Ok(self.service_metrics()) - } - - fn index(&self) -> Result> { - Ok(self.index()) - } - - fn registry_api(&self) -> Result> { - Ok(self.registry_api()) - } +impl Drop for TestEnvironment { + fn drop(&mut self) { + let storage = self.context.storage.clone(); + let runtime = self.runtime(); - fn repository_stats_updater(&self) -> Result> { - Ok(self.repository_stats_updater()) - } + block_in_place(move || { + runtime.block_on(async move { + storage + .cleanup_after_test() + .await + .expect("failed to cleanup after tests"); + }); + }); - fn runtime(&self) -> Result> { - Ok(self.runtime()) + if self.context.config.local_archive_cache_path.exists() { + fs::remove_dir_all(&self.context.config.local_archive_cache_path).unwrap(); + } } } @@ -677,71 +458,64 @@ impl Context for TestEnvironment { pub(crate) struct TestDatabase { pool: Pool, schema: String, - runtime: Arc, + runtime: runtime::Handle, } impl TestDatabase { - fn new(config: &Config, runtime: Arc, metrics: Arc) -> Result { + async fn new(config: &Config, metrics: Arc) -> Result { // A random schema name is generated and used for the current connection. This allows each // test to create a fresh instance of the database to run within. let schema = format!("docs_rs_test_schema_{}", rand::random::()); - let pool = Pool::new_with_schema(config, runtime.clone(), metrics, &schema)?; + let pool = Pool::new_with_schema(config, metrics, &schema).await?; - runtime.block_on({ - let schema = schema.clone(); - async move { - let mut conn = sqlx::PgConnection::connect(&config.database_url).await?; - sqlx::query(&format!("CREATE SCHEMA {schema}")) - .execute(&mut conn) - .await - .context("error creating schema")?; - sqlx::query(&format!("SET search_path TO {schema}, public")) - .execute(&mut conn) - .await - .context("error setting search path")?; - db::migrate(&mut conn, None) - .await - .context("error running migrations")?; - - // Move all sequence start positions 10000 apart to avoid overlapping primary keys - let sequence_names: Vec<_> = sqlx::query!( - "SELECT relname - FROM pg_class - INNER JOIN pg_namespace ON - pg_class.relnamespace = pg_namespace.oid - WHERE pg_class.relkind = 'S' - AND pg_namespace.nspname = $1 - ", - schema, - ) - .fetch(&mut conn) - .map_ok(|row| row.relname) - .try_collect() - .await?; - - for (i, sequence) in sequence_names.into_iter().enumerate() { - let offset = (i + 1) * 10000; - sqlx::query(&format!( - r#"ALTER SEQUENCE "{sequence}" RESTART WITH {offset};"# - )) - .execute(&mut conn) - .await?; - } - - Ok::<(), anyhow::Error>(()) - } - })?; + let mut conn = sqlx::PgConnection::connect(&config.database_url).await?; + sqlx::query(&format!("CREATE SCHEMA {schema}")) + .execute(&mut conn) + .await + .context("error creating schema")?; + sqlx::query(&format!("SET search_path TO {schema}, public")) + .execute(&mut conn) + .await + .context("error setting search path")?; + db::migrate(&mut conn, None) + .await + .context("error running migrations")?; + + // Move all sequence start positions 10000 apart to avoid overlapping primary keys + let sequence_names: Vec<_> = sqlx::query!( + "SELECT relname + FROM pg_class + INNER JOIN pg_namespace ON + pg_class.relnamespace = pg_namespace.oid + WHERE pg_class.relkind = 'S' + AND pg_namespace.nspname = $1 + ", + schema, + ) + .fetch(&mut conn) + .map_ok(|row| row.relname) + .try_collect() + .await?; + + for (i, sequence) in sequence_names.into_iter().enumerate() { + let offset = (i + 1) * 10000; + sqlx::query(&format!( + r#"ALTER SEQUENCE "{sequence}" RESTART WITH {offset};"# + )) + .execute(&mut conn) + .await?; + } Ok(TestDatabase { pool, schema, - runtime, + runtime: runtime::Handle::current(), }) } - pub(crate) fn pool(&self) -> Pool { - self.pool.clone() + pub(crate) fn pool(&self) -> &Pool { + &self.pool } pub(crate) async fn async_conn(&self) -> AsyncPoolClient { @@ -754,18 +528,31 @@ impl TestDatabase { impl Drop for TestDatabase { fn drop(&mut self) { - self.runtime.block_on(async { - let mut conn = self.async_conn().await; - let migration_result = db::migrate(&mut conn, Some(0)).await; + let pool = self.pool.clone(); + let schema = self.schema.clone(); + let runtime = self.runtime.clone(); - if let Err(e) = sqlx::query(format!("DROP SCHEMA {} CASCADE;", self.schema).as_str()) - .execute(&mut *conn) - .await - { - error!("failed to drop test schema {}: {}", self.schema, e); - } + block_in_place(move || { + runtime.block_on(async move { + let Ok(mut conn) = pool.get_async().await else { + error!("error in drop impl"); + return; + }; + + let migration_result = db::migrate(&mut conn, Some(0)).await; - migration_result.expect("downgrading database works"); + if let Err(e) = sqlx::query(format!("DROP SCHEMA {} CASCADE;", schema).as_str()) + .execute(&mut *conn) + .await + { + error!("failed to drop test schema {}: {}", schema, e); + return; + } + + if let Err(err) = migration_result { + error!(?err, "error reverting migrations"); + } + }) }); } } diff --git a/src/utils/consistency/db.rs b/src/utils/consistency/db.rs index 673561a08..42be527a8 100644 --- a/src/utils/consistency/db.rs +++ b/src/utils/consistency/db.rs @@ -68,7 +68,6 @@ mod tests { fn test_load() { async_wrapper(|env| async move { env.async_build_queue() - .await .add_crate("queued", "0.0.1", 0, None) .await?; env.fake_release() @@ -85,8 +84,8 @@ mod tests { .create() .await?; - let mut conn = env.async_db().await.async_conn().await; - let result = load(&mut conn, &env.config()).await?; + let mut conn = env.async_db().async_conn().await; + let result = load(&mut conn, env.config()).await?; assert_eq!( result, diff --git a/src/utils/consistency/mod.rs b/src/utils/consistency/mod.rs index 8043c0975..acb90588d 100644 --- a/src/utils/consistency/mod.rs +++ b/src/utils/consistency/mod.rs @@ -24,18 +24,16 @@ const BUILD_PRIORITY: i32 = 15; /// /// Even when activities fail, the command can just be re-run. While the diff calculation will /// be repeated, we won't re-execute fixing activities. -pub async fn run_check(ctx: &C, dry_run: bool) -> Result<()> { - let index = ctx.index()?; - +pub async fn run_check(ctx: &Context, dry_run: bool) -> Result<()> { info!("Loading data from database..."); - let mut conn = ctx.async_pool().await?.get_async().await?; - let db_data = db::load(&mut conn, &*ctx.config()?) + let mut conn = ctx.pool.get_async().await?; + let db_data = db::load(&mut conn, &ctx.config) .await .context("Loading crate data from database for consistency check")?; tracing::info!("Loading data from index..."); let index_data = spawn_blocking({ - let index = index.clone(); + let index = ctx.index.clone(); move || index::load(&index) }) .await @@ -80,19 +78,13 @@ struct HandleResult { yanks_corrected: u32, } -async fn handle_diff<'a, I, C>(ctx: &C, iter: I, dry_run: bool) -> Result +async fn handle_diff<'a, I>(ctx: &Context, iter: I, dry_run: bool) -> Result where I: Iterator, - C: Context, { let mut result = HandleResult::default(); - let config = ctx.config()?; - - let storage = ctx.async_storage().await?; - let build_queue = ctx.async_build_queue().await?; - - let mut conn = ctx.async_pool().await?.get_async().await?; + let mut conn = ctx.pool.get_async().await?; for difference in iter { println!("{difference}"); @@ -100,7 +92,8 @@ where match difference { diff::Difference::CrateNotInIndex(name) => { if !dry_run - && let Err(err) = delete::delete_crate(&mut conn, &storage, &config, name).await + && let Err(err) = + delete::delete_crate(&mut conn, &ctx.async_storage, &ctx.config, name).await { warn!("{:?}", err); } @@ -109,7 +102,8 @@ where diff::Difference::CrateNotInDb(name, versions) => { for version in versions { if !dry_run - && let Err(err) = build_queue + && let Err(err) = ctx + .async_build_queue .add_crate(name, version, BUILD_PRIORITY, None) .await { @@ -120,8 +114,14 @@ where } diff::Difference::ReleaseNotInIndex(name, version) => { if !dry_run - && let Err(err) = - delete::delete_version(&mut conn, &storage, &config, name, version).await + && let Err(err) = delete::delete_version( + &mut conn, + &ctx.async_storage, + &ctx.config, + name, + version, + ) + .await { warn!("{:?}", err); } @@ -129,7 +129,8 @@ where } diff::Difference::ReleaseNotInDb(name, version) => { if !dry_run - && let Err(err) = build_queue + && let Err(err) = ctx + .async_build_queue .add_crate(name, version, BUILD_PRIORITY, None) .await { @@ -138,7 +139,12 @@ where result.builds_queued += 1; } diff::Difference::ReleaseYank(name, version, yanked) => { - if !dry_run && let Err(err) = build_queue.set_yanked(name, version, *yanked).await { + if !dry_run + && let Err(err) = ctx + .async_build_queue + .set_yanked(name, version, *yanked) + .await + { warn!("{:?}", err); } result.yanks_corrected += 1; @@ -157,7 +163,7 @@ mod tests { use sqlx::Row as _; async fn count(env: &TestEnvironment, sql: &str) -> Result { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; Ok(sqlx::query_scalar(sql).fetch_one(&mut *conn).await?) } @@ -165,7 +171,7 @@ mod tests { where O: Send + Unpin + for<'r> sqlx::Decode<'r, sqlx::Postgres> + sqlx::Type, { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; Ok::<_, anyhow::Error>( sqlx::query(sql) .fetch_all(&mut *conn) @@ -190,7 +196,7 @@ mod tests { let diff = [Difference::CrateNotInIndex("krate".into())]; // calling with dry-run leads to no change - handle_diff(&*env, diff.iter(), true).await?; + handle_diff(&env.context, diff.iter(), true).await?; assert_eq!( count(&env, "SELECT count(*) FROM crates WHERE name = 'krate'").await?, @@ -198,7 +204,7 @@ mod tests { ); // without dry-run the crate will be deleted - handle_diff(&*env, diff.iter(), false).await?; + handle_diff(&env.context, diff.iter(), false).await?; assert_eq!( count(&env, "SELECT count(*) FROM crates WHERE name = 'krate'").await?, @@ -232,11 +238,11 @@ mod tests { assert_eq!(count(&env, "SELECT count(*) FROM releases").await?, 2); - handle_diff(&*env, diff.iter(), true).await?; + handle_diff(&env.context, diff.iter(), true).await?; assert_eq!(count(&env, "SELECT count(*) FROM releases").await?, 2); - handle_diff(&*env, diff.iter(), false).await?; + handle_diff(&env.context, diff.iter(), false).await?; assert_eq!( single_row::(&env, "SELECT version FROM releases").await?, @@ -264,14 +270,14 @@ mod tests { false, )]; - handle_diff(&*env, diff.iter(), true).await?; + handle_diff(&env.context, diff.iter(), true).await?; assert_eq!( single_row::(&env, "SELECT yanked FROM releases").await?, vec![true] ); - handle_diff(&*env, diff.iter(), false).await?; + handle_diff(&env.context, diff.iter(), false).await?; assert_eq!( single_row::(&env, "SELECT yanked FROM releases").await?, @@ -287,13 +293,13 @@ mod tests { async_wrapper(|env| async move { let diff = [Difference::ReleaseNotInDb("krate".into(), "0.1.1".into())]; - handle_diff(&*env, diff.iter(), true).await?; + handle_diff(&env.context, diff.iter(), true).await?; - let build_queue = env.async_build_queue().await; + let build_queue = env.async_build_queue(); assert!(build_queue.queued_crates().await?.is_empty()); - handle_diff(&*env, diff.iter(), false).await?; + handle_diff(&env.context, diff.iter(), false).await?; assert_eq!( build_queue @@ -316,13 +322,13 @@ mod tests { vec!["0.1.1".into(), "0.1.2".into()], )]; - handle_diff(&*env, diff.iter(), true).await?; + handle_diff(&env.context, diff.iter(), true).await?; - let build_queue = env.async_build_queue().await; + let build_queue = env.async_build_queue(); assert!(build_queue.queued_crates().await?.is_empty()); - handle_diff(&*env, diff.iter(), false).await?; + handle_diff(&env.context, diff.iter(), false).await?; assert_eq!( build_queue diff --git a/src/utils/daemon.rs b/src/utils/daemon.rs index 26ecb50bc..7699774e6 100644 --- a/src/utils/daemon.rs +++ b/src/utils/daemon.rs @@ -12,15 +12,15 @@ use std::future::Future; use std::sync::Arc; use std::thread; use std::time::Duration; -use tokio::{runtime::Runtime, task::spawn_blocking, time::Instant}; +use tokio::{runtime, task::spawn_blocking, time::Instant}; use tracing::{debug, info}; /// Run the registry watcher /// NOTE: this should only be run once, otherwise crates would be added /// to the queue multiple times. pub async fn watch_registry( - build_queue: Arc, - config: Arc, + build_queue: &AsyncBuildQueue, + config: &Config, index: Arc, ) -> Result<(), Error> { let mut last_gc = Instant::now(); @@ -52,29 +52,28 @@ pub async fn watch_registry( } } -fn start_registry_watcher(context: &C) -> Result<(), Error> { - let build_queue = context.runtime()?.block_on(context.async_build_queue())?; - let config = context.config()?; - let index = context.index()?; - let runtime = context.runtime()?; +fn start_registry_watcher(context: &Context) -> Result<(), Error> { + let build_queue = context.async_build_queue.clone(); + let config = context.config.clone(); + let index = context.index.clone(); - runtime.spawn(async { + context.runtime.spawn(async move { // space this out to prevent it from clashing against the queue-builder thread on launch tokio::time::sleep(Duration::from_secs(30)).await; - watch_registry(build_queue, config, index).await + watch_registry(&build_queue, &config, index).await }); Ok(()) } -pub fn start_background_repository_stats_updater(context: &C) -> Result<(), Error> { +pub fn start_background_repository_stats_updater(context: &Context) -> Result<(), Error> { // This call will still skip github repositories updates and continue if no token is provided // (gitlab doesn't require to have a token). The only time this can return an error is when // creating a pool or if config fails, which shouldn't happen here because this is run right at // startup. - let updater = context.repository_stats_updater()?; - let runtime = context.runtime()?; + let updater = context.repository_stats_updater.clone(); + let runtime = context.runtime.clone(); async_cron( &runtime, "repository stats updater", @@ -90,11 +89,11 @@ pub fn start_background_repository_stats_updater(context: &C) -> Res Ok(()) } -pub fn start_background_queue_rebuild(context: &C) -> Result<(), Error> { - let runtime = context.runtime()?; - let pool = context.pool()?; - let config = context.config()?; - let build_queue = runtime.block_on(context.async_build_queue())?; +pub fn start_background_queue_rebuild(context: &Context) -> Result<(), Error> { + let runtime = context.runtime.clone(); + let pool = context.pool.clone(); + let config = context.config.clone(); + let build_queue = context.async_build_queue.clone(); if config.max_queued_rebuilds.is_none() { info!("rebuild config incomplete, skipping rebuild queueing"); @@ -119,12 +118,12 @@ pub fn start_background_queue_rebuild(context: &C) -> Result<(), Err Ok(()) } -pub fn start_background_cdn_invalidator(context: &C) -> Result<(), Error> { - let metrics = context.instance_metrics()?; - let config = context.config()?; - let pool = context.pool()?; - let runtime = context.runtime()?; - let cdn = runtime.block_on(context.cdn())?; +pub fn start_background_cdn_invalidator(context: &Context) -> Result<(), Error> { + let metrics = context.instance_metrics.clone(); + let config = context.config.clone(); + let pool = context.pool.clone(); + let runtime = context.runtime.clone(); + let cdn = context.cdn.clone(); if config.cloudfront_distribution_id_web.is_none() && config.cloudfront_distribution_id_static.is_none() @@ -178,10 +177,7 @@ pub fn start_background_cdn_invalidator(context: &C) -> Result<(), E Ok(()) } -pub fn start_daemon( - context: C, - enable_registry_watcher: bool, -) -> Result<(), Error> { +pub fn start_daemon(context: Context, enable_registry_watcher: bool) -> Result<(), Error> { let context = Arc::new(context); // Start the web server before doing anything more expensive @@ -189,29 +185,27 @@ pub fn start_daemon( info!("Starting web server"); let webserver_thread = thread::spawn({ let context = context.clone(); - move || start_web_server(None, &*context) + move || start_web_server(None, &context) }); if enable_registry_watcher { // check new crates every minute - start_registry_watcher(&*context)?; + start_registry_watcher(&context)?; } // build new crates every minute - let build_queue = context.build_queue()?; - let config = context.config()?; - let rustwide_builder = RustwideBuilder::init(&*context)?; + let rustwide_builder = RustwideBuilder::init(&context)?; thread::Builder::new() .name("build queue reader".to_string()) .spawn({ let context = context.clone(); - move || queue_builder(&*context, rustwide_builder, build_queue, config).unwrap() + move || queue_builder(&context, rustwide_builder).unwrap() }) .unwrap(); - start_background_repository_stats_updater(&*context)?; - start_background_cdn_invalidator(&*context)?; - start_background_queue_rebuild(&*context)?; + start_background_repository_stats_updater(&context)?; + start_background_cdn_invalidator(&context)?; + start_background_queue_rebuild(&context)?; // NOTE: if a error occurred earlier in `start_daemon`, the server will _not_ be joined - // instead it will get killed when the process exits. @@ -220,8 +214,12 @@ pub fn start_daemon( .map_err(|err| anyhow!("web server panicked: {:?}", err))? } -pub(crate) fn async_cron(runtime: &Runtime, name: &'static str, interval: Duration, exec: F) -where +pub(crate) fn async_cron( + runtime: &runtime::Handle, + name: &'static str, + interval: Duration, + exec: F, +) where Fut: Future> + Send, F: Fn() -> Fut + Send + 'static, { diff --git a/src/utils/mod.rs b/src/utils/mod.rs index 836fad2d3..bbde45481 100644 --- a/src/utils/mod.rs +++ b/src/utils/mod.rs @@ -191,7 +191,7 @@ mod tests { #[test] fn test_get_config_empty() { async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; sqlx::query!("DELETE FROM config") .execute(&mut *conn) .await?; @@ -208,7 +208,7 @@ mod tests { #[test] fn test_set_and_get_config_() { async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; sqlx::query!("DELETE FROM config") .execute(&mut *conn) .await?; diff --git a/src/utils/queue.rs b/src/utils/queue.rs index fee83b2a7..7df91672a 100644 --- a/src/utils/queue.rs +++ b/src/utils/queue.rs @@ -80,7 +80,7 @@ mod tests { #[test] fn set_priority() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); let mut conn = db.async_conn().await; set_crate_priority(&mut conn, "docsrs-%", -100).await?; @@ -121,7 +121,7 @@ mod tests { #[test] fn remove_priority() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); let mut conn = db.async_conn().await; set_crate_priority(&mut conn, "docsrs-%", -100).await?; @@ -143,7 +143,7 @@ mod tests { #[test] fn get_priority() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); let mut conn = db.async_conn().await; set_crate_priority(&mut conn, "docsrs-%", -100).await?; @@ -170,7 +170,7 @@ mod tests { #[test] fn get_default_priority() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); let mut conn = db.async_conn().await; assert_eq!( diff --git a/src/utils/queue_builder.rs b/src/utils/queue_builder.rs index 3430f62d4..8a1d90502 100644 --- a/src/utils/queue_builder.rs +++ b/src/utils/queue_builder.rs @@ -1,26 +1,23 @@ use crate::Context; -use crate::{BuildQueue, Config, docbuilder::RustwideBuilder, utils::report_error}; +use crate::{docbuilder::RustwideBuilder, utils::report_error}; use anyhow::{Context as _, Error}; use std::panic::{AssertUnwindSafe, catch_unwind}; -use std::sync::Arc; use std::time::Duration; use std::{fs, io, path::Path, thread}; use tracing::{debug, error, warn}; -pub fn queue_builder( - context: &C, - mut builder: RustwideBuilder, - build_queue: Arc, - config: Arc, -) -> Result<(), Error> { +pub fn queue_builder(context: &Context, mut builder: RustwideBuilder) -> Result<(), Error> { loop { - if let Err(e) = remove_tempdirs(&config.temp_dir) { + let temp_dir = &context.config.temp_dir; + if let Err(e) = remove_tempdirs(temp_dir) { report_error(&anyhow::anyhow!(e).context(format!( "failed to clean temporary directory {:?}", - &config.temp_dir + temp_dir ))); } + let build_queue = &context.build_queue; + // check lock file match build_queue.is_locked().context("could not get queue lock") { Ok(true) => { diff --git a/src/web/build_details.rs b/src/web/build_details.rs index 25aadd3c5..35c787156 100644 --- a/src/web/build_details.rs +++ b/src/web/build_details.rs @@ -183,7 +183,7 @@ mod tests { #[test] fn test_partial_build_result() { async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; let (_, build_id) = fake_release_that_failed_before_build( &mut conn, "foo", @@ -214,7 +214,7 @@ mod tests { #[test] fn test_partial_build_result_plus_default_target_from_previous_build() { async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; let (release_id, build_id) = fake_release_that_failed_before_build( &mut conn, "foo", diff --git a/src/web/builds.rs b/src/web/builds.rs index a8c1261dd..0980d332a 100644 --- a/src/web/builds.rs +++ b/src/web/builds.rs @@ -205,11 +205,12 @@ mod tests { use crate::{ db::Overrides, test::{ - AxumResponseTestExt, AxumRouterTestExt, FakeBuild, async_wrapper, + AxumResponseTestExt, AxumRouterTestExt, FakeBuild, TestEnvironment, async_wrapper, fake_release_that_failed_before_build, }, web::cache::CachePolicy, }; + use anyhow::Result; use axum::{body::Body, http::Request}; use kuchikiki::traits::TendrilSink; use reqwest::StatusCode; @@ -218,7 +219,7 @@ mod tests { #[test] fn build_list_empty_build() { async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; fake_release_that_failed_before_build(&mut conn, "foo", "0.1.0", "some errors").await?; let response = env @@ -227,7 +228,7 @@ mod tests { .get("/crate/foo/0.1.0/builds") .await? .error_for_status()?; - response.assert_cache_control(CachePolicy::NoCaching, &env.config()); + response.assert_cache_control(CachePolicy::NoCaching, env.config()); let page = kuchikiki::parse_html().one(response.text().await?); let rows: Vec<_> = page @@ -271,7 +272,7 @@ mod tests { .await?; let response = env.web_app().await.get("/crate/foo/0.1.0/builds").await?; - response.assert_cache_control(CachePolicy::NoCaching, &env.config()); + response.assert_cache_control(CachePolicy::NoCaching, env.config()); let page = kuchikiki::parse_html().one(response.text().await?); let rows: Vec<_> = page @@ -291,154 +292,161 @@ mod tests { }); } - #[test] - fn build_trigger_rebuild_missing_config() { - async_wrapper(|env| async move { - env.override_config(|config| config.cratesio_token = None); - env.fake_release() + #[tokio::test(flavor = "multi_thread")] + async fn build_trigger_rebuild_missing_config() -> Result<()> { + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .cratesio_token(None) + .build()?, + ) + .await?; + + env.fake_release() + .await + .name("foo") + .version("0.1.0") + .create() + .await?; + + { + let response = env + .web_app() .await - .name("foo") - .version("0.1.0") - .create() + .get("/crate/regex/1.3.1/rebuild") .await?; + // Needs POST + assert_eq!(response.status(), StatusCode::METHOD_NOT_ALLOWED); + } - { - let response = env - .web_app() - .await - .get("/crate/regex/1.3.1/rebuild") - .await?; - // Needs POST - assert_eq!(response.status(), StatusCode::METHOD_NOT_ALLOWED); - } - - { - let response = env - .web_app() - .await - .post("/crate/regex/1.3.1/rebuild") - .await?; - assert_eq!(response.status(), StatusCode::UNAUTHORIZED); - let json: serde_json::Value = response.json().await?; - assert_eq!( - json, - serde_json::json!({ - "title": "Unauthorized", - "message": "Endpoint is not configured" - }) - ); - } + { + let response = env + .web_app() + .await + .post("/crate/regex/1.3.1/rebuild") + .await?; + assert_eq!(response.status(), StatusCode::UNAUTHORIZED); + let json: serde_json::Value = response.json().await?; + assert_eq!( + json, + serde_json::json!({ + "title": "Unauthorized", + "message": "Endpoint is not configured" + }) + ); + } - Ok(()) - }) + Ok(()) } - #[test] - fn build_trigger_rebuild_with_config() { - async_wrapper(|env| async move { - let correct_token = "foo137"; - env.override_config(|config| config.cratesio_token = Some(correct_token.into())); + #[tokio::test(flavor = "multi_thread")] + async fn build_trigger_rebuild_with_config() -> Result<()> { + let correct_token = "foo137"; + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .cratesio_token(Some(correct_token.into())) + .build()?, + ) + .await?; - env.fake_release() + env.fake_release() + .await + .name("foo") + .version("0.1.0") + .create() + .await?; + + { + let response = env + .web_app() .await - .name("foo") - .version("0.1.0") - .create() + .post("/crate/regex/1.3.1/rebuild") + .await?; + assert_eq!(response.status(), StatusCode::UNAUTHORIZED); + let json: serde_json::Value = response.json().await?; + assert_eq!( + json, + serde_json::json!({ + "title": "Unauthorized", + "message": "Missing authentication token" + }) + ); + } + + { + let app = env.web_app().await; + let response = app + .oneshot( + Request::builder() + .uri("/crate/regex/1.3.1/rebuild") + .method("POST") + .header("Authorization", "Bearer someinvalidtoken") + .body(Body::empty()) + .unwrap(), + ) + .await?; + assert_eq!(response.status(), StatusCode::UNAUTHORIZED); + let json: serde_json::Value = response.json().await?; + assert_eq!( + json, + serde_json::json!({ + "title": "Unauthorized", + "message": "The token used for authentication is not valid" + }) + ); + } + + let build_queue = env.async_build_queue(); + + assert_eq!(build_queue.pending_count().await?, 0); + assert!(!build_queue.has_build_queued("foo", "0.1.0").await?); + + { + let app = env.web_app().await; + let response = app + .oneshot( + Request::builder() + .uri("/crate/foo/0.1.0/rebuild") + .method("POST") + .header("Authorization", &format!("Bearer {correct_token}")) + .body(Body::empty()) + .unwrap(), + ) .await?; + assert_eq!(response.status(), StatusCode::CREATED); + let json: serde_json::Value = response.json().await?; + assert_eq!(json, serde_json::json!({})); + } + + assert_eq!(build_queue.pending_count().await?, 1); + assert!(build_queue.has_build_queued("foo", "0.1.0").await?); + + { + let app = env.web_app().await; + let response = app + .oneshot( + Request::builder() + .uri("/crate/foo/0.1.0/rebuild") + .method("POST") + .header("Authorization", &format!("Bearer {correct_token}")) + .body(Body::empty()) + .unwrap(), + ) + .await?; + assert_eq!(response.status(), StatusCode::BAD_REQUEST); + let json: serde_json::Value = response.json().await?; + assert_eq!( + json, + serde_json::json!({ + "title": "Bad request", + "message": "crate foo 0.1.0 already queued for rebuild" + }) + ); + } - { - let response = env - .web_app() - .await - .post("/crate/regex/1.3.1/rebuild") - .await?; - assert_eq!(response.status(), StatusCode::UNAUTHORIZED); - let json: serde_json::Value = response.json().await?; - assert_eq!( - json, - serde_json::json!({ - "title": "Unauthorized", - "message": "Missing authentication token" - }) - ); - } - - { - let app = env.web_app().await; - let response = app - .oneshot( - Request::builder() - .uri("/crate/regex/1.3.1/rebuild") - .method("POST") - .header("Authorization", "Bearer someinvalidtoken") - .body(Body::empty()) - .unwrap(), - ) - .await?; - assert_eq!(response.status(), StatusCode::UNAUTHORIZED); - let json: serde_json::Value = response.json().await?; - assert_eq!( - json, - serde_json::json!({ - "title": "Unauthorized", - "message": "The token used for authentication is not valid" - }) - ); - } - - let build_queue = env.async_build_queue().await; - - assert_eq!(build_queue.pending_count().await?, 0); - assert!(!build_queue.has_build_queued("foo", "0.1.0").await?); - - { - let app = env.web_app().await; - let response = app - .oneshot( - Request::builder() - .uri("/crate/foo/0.1.0/rebuild") - .method("POST") - .header("Authorization", &format!("Bearer {correct_token}")) - .body(Body::empty()) - .unwrap(), - ) - .await?; - assert_eq!(response.status(), StatusCode::CREATED); - let json: serde_json::Value = response.json().await?; - assert_eq!(json, serde_json::json!({})); - } - - assert_eq!(build_queue.pending_count().await?, 1); - assert!(build_queue.has_build_queued("foo", "0.1.0").await?); - - { - let app = env.web_app().await; - let response = app - .oneshot( - Request::builder() - .uri("/crate/foo/0.1.0/rebuild") - .method("POST") - .header("Authorization", &format!("Bearer {correct_token}")) - .body(Body::empty()) - .unwrap(), - ) - .await?; - assert_eq!(response.status(), StatusCode::BAD_REQUEST); - let json: serde_json::Value = response.json().await?; - assert_eq!( - json, - serde_json::json!({ - "title": "Bad request", - "message": "crate foo 0.1.0 already queued for rebuild" - }) - ); - } - - assert_eq!(build_queue.pending_count().await?, 1); - assert!(build_queue.has_build_queued("foo", "0.1.0").await?); + assert_eq!(build_queue.pending_count().await?, 1); + assert!(build_queue.has_build_queued("foo", "0.1.0").await?); - Ok(()) - }); + Ok(()) } #[test] @@ -454,7 +462,7 @@ mod tests { let response = env.web_app().await.get("/crate/foo/0.1.0/builds").await?; - response.assert_cache_control(CachePolicy::NoCaching, &env.config()); + response.assert_cache_control(CachePolicy::NoCaching, env.config()); let page = kuchikiki::parse_html().one(response.text().await?); let rows: Vec<_> = page @@ -488,7 +496,7 @@ mod tests { .create() .await?; - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; let limits = Overrides { memory: Some(6 * 1024 * 1024 * 1024), targets: Some(1), diff --git a/src/web/cache.rs b/src/web/cache.rs index 1dbdad773..3602e0adc 100644 --- a/src/web/cache.rs +++ b/src/web/cache.rs @@ -111,7 +111,8 @@ pub(crate) async fn cache_middleware(req: AxumHttpRequest, next: Next) -> AxumRe #[cfg(test)] mod tests { use super::*; - use crate::test::wrapper; + use crate::test::TestEnvironment; + use anyhow::Result; use test_case::test_case; #[test_case(CachePolicy::NoCaching, Some("max-age=0"))] @@ -125,76 +126,72 @@ mod tests { CachePolicy::ForeverInCdnAndStaleInBrowser, Some("stale-while-revalidate=86400") )] - fn render(cache: CachePolicy, expected: Option<&str>) { - wrapper(|env| { - assert_eq!( - cache.render(&env.config()), - expected.map(|s| HeaderValue::from_str(s).unwrap()) - ); - Ok(()) - }); + fn render(cache: CachePolicy, expected: Option<&str>) -> Result<()> { + let config = TestEnvironment::base_config().build()?; + assert_eq!( + cache.render(&config), + expected.map(|s| HeaderValue::from_str(s).unwrap()) + ); + Ok(()) } #[test] - fn render_stale_without_config() { - wrapper(|env| { - env.override_config(|config| config.cache_control_stale_while_revalidate = None); - - assert!( - CachePolicy::ForeverInCdnAndStaleInBrowser - .render(&env.config()) - .is_none() - ); - Ok(()) - }); + fn render_stale_without_config() -> Result<()> { + let config = TestEnvironment::base_config() + .cache_control_stale_while_revalidate(None) + .build()?; + + assert!( + CachePolicy::ForeverInCdnAndStaleInBrowser + .render(&config) + .is_none() + ); + + Ok(()) } #[test] - fn render_stale_with_config() { - wrapper(|env| { - env.override_config(|config| { - config.cache_control_stale_while_revalidate = Some(666); - }); - - assert_eq!( - CachePolicy::ForeverInCdnAndStaleInBrowser - .render(&env.config()) - .unwrap(), - "stale-while-revalidate=666" - ); - Ok(()) - }); + fn render_stale_with_config() -> Result<()> { + let config = TestEnvironment::base_config() + .cache_control_stale_while_revalidate(Some(666)) + .build()?; + + assert_eq!( + CachePolicy::ForeverInCdnAndStaleInBrowser + .render(&config) + .unwrap(), + "stale-while-revalidate=666" + ); + + Ok(()) } #[test] - fn render_forever_in_cdn_disabled() { - wrapper(|env| { - env.override_config(|config| { - config.cache_invalidatable_responses = false; - }); - - assert_eq!( - CachePolicy::ForeverInCdn.render(&env.config()).unwrap(), - "max-age=0" - ); - Ok(()) - }); + fn render_forever_in_cdn_disabled() -> Result<()> { + let config = TestEnvironment::base_config() + .cache_invalidatable_responses(false) + .build()?; + + assert_eq!( + CachePolicy::ForeverInCdn.render(&config).unwrap(), + "max-age=0" + ); + + Ok(()) } #[test] - fn render_forever_in_cdn_or_stale_disabled() { - wrapper(|env| { - env.override_config(|config| { - config.cache_invalidatable_responses = false; - }); - - assert_eq!( - CachePolicy::ForeverInCdnAndStaleInBrowser - .render(&env.config()) - .unwrap(), - "max-age=0" - ); - Ok(()) - }); + fn render_forever_in_cdn_or_stale_disabled() -> Result<()> { + let config = TestEnvironment::base_config() + .cache_invalidatable_responses(false) + .build()?; + + assert_eq!( + CachePolicy::ForeverInCdnAndStaleInBrowser + .render(&config) + .unwrap(), + "max-age=0" + ); + Ok(()) } } diff --git a/src/web/crate_details.rs b/src/web/crate_details.rs index 85a5b8706..d6e5692b9 100644 --- a/src/web/crate_details.rs +++ b/src/web/crate_details.rs @@ -889,7 +889,7 @@ mod tests { #[test] fn test_crate_details_documentation_url_is_none_when_url_is_docs_rs() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); let mut conn = db.async_conn().await; env.fake_release() @@ -932,7 +932,7 @@ mod tests { #[test] fn test_last_successful_build_when_last_releases_failed_or_yanked() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); env.fake_release() .await @@ -981,7 +981,7 @@ mod tests { #[test] fn test_last_successful_build_when_all_releases_failed_or_yanked() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); env.fake_release() .await @@ -1015,7 +1015,7 @@ mod tests { #[test] fn test_last_successful_build_with_intermittent_releases_failed_or_yanked() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); env.fake_release() .await @@ -1055,7 +1055,7 @@ mod tests { #[test] fn test_releases_should_be_sorted() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); // Add new releases of 'foo' out-of-order since CrateDetails should sort them descending env.fake_release() @@ -1224,8 +1224,7 @@ mod tests { .await?; let response = env.web_app().await.get("/crate/foo/0.0.1").await?; - response - .assert_cache_control(CachePolicy::ForeverInCdnAndStaleInBrowser, &env.config()); + response.assert_cache_control(CachePolicy::ForeverInCdnAndStaleInBrowser, env.config()); assert!( response @@ -1241,7 +1240,7 @@ mod tests { #[test] fn test_latest_version() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); env.fake_release() .await @@ -1278,7 +1277,7 @@ mod tests { #[test] fn test_latest_version_ignores_prerelease() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); env.fake_release() .await @@ -1315,7 +1314,7 @@ mod tests { #[test] fn test_latest_version_ignores_yanked() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); env.fake_release() .await @@ -1353,7 +1352,7 @@ mod tests { #[test] fn test_latest_version_only_yanked() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); env.fake_release() .await @@ -1393,7 +1392,7 @@ mod tests { #[test] fn test_latest_version_in_progress() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); env.fake_release() .await @@ -1500,7 +1499,7 @@ mod tests { #[test] fn test_updating_owners() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); env.fake_release() .await @@ -1786,7 +1785,7 @@ mod tests { .create() .await?; - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; sqlx::query!("UPDATE releases SET features = NULL WHERE id = $1", id.0) .execute(&mut *conn) .await?; @@ -1807,7 +1806,7 @@ mod tests { #[test] fn test_minimal_failed_release_doesnt_error_features() { async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; fake_release_that_failed_before_build(&mut conn, "foo", "0.1.0", "some errors").await?; let text_content = env @@ -1831,7 +1830,7 @@ mod tests { #[test] fn test_minimal_failed_release_doesnt_error() { async_wrapper(|env| async move { - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; fake_release_that_failed_before_build(&mut conn, "foo", "0.1.0", "some errors").await?; let text_content = env @@ -1914,7 +1913,7 @@ mod tests { .to_string(); let response = env.web_app().await.get(&platform_menu_url).await.unwrap(); assert!(response.status().is_success()); - response.assert_cache_control(CachePolicy::ForeverInCdn, &env.config()); + response.assert_cache_control(CachePolicy::ForeverInCdn, env.config()); let list2 = check_links( response.text().await.unwrap(), true, @@ -2094,7 +2093,7 @@ mod tests { let resp = web.get("/crate/dummy/latest").await?; assert!(resp.status().is_success()); - resp.assert_cache_control(CachePolicy::ForeverInCdn, &env.config()); + resp.assert_cache_control(CachePolicy::ForeverInCdn, env.config()); let body = resp.text().await?; assert!(body.contains(" Result<()> { + let env = TestEnvironment::new().await?; - env.fake_release().await.create().await?; + let now = Utc::now(); - let mut file = File::from_path( - &*env.async_storage().await, - "rustdoc/fake-package/1.0.0/fake-package/index.html", - &env.config(), - ) - .await - .unwrap(); - file.0.date_updated = now; - - let resp = file.into_response(); - assert!(resp.headers().get(CACHE_CONTROL).is_none()); - let cache = resp - .extensions() - .get::() - .expect("missing cache response extension"); - assert!(matches!(cache, CachePolicy::ForeverInCdnAndBrowser)); - assert_eq!( - resp.headers().get(LAST_MODIFIED).unwrap(), - &now.format("%a, %d %b %Y %T UTC").to_string(), - ); - - Ok(()) - }); + env.fake_release().await.create().await?; + + let mut file = File::from_path( + env.async_storage(), + "rustdoc/fake-package/1.0.0/fake-package/index.html", + env.config(), + ) + .await?; + + file.0.date_updated = now; + + let resp = file.into_response(); + assert!(resp.headers().get(CACHE_CONTROL).is_none()); + let cache = resp + .extensions() + .get::() + .expect("missing cache response extension"); + assert!(matches!(cache, CachePolicy::ForeverInCdnAndBrowser)); + assert_eq!( + resp.headers().get(LAST_MODIFIED).unwrap(), + &now.format("%a, %d %b %Y %T UTC").to_string(), + ); + + Ok(()) } - #[test] - fn test_max_size() { + #[tokio::test(flavor = "multi_thread")] + async fn test_max_size() -> Result<()> { const MAX_SIZE: usize = 1024; const MAX_HTML_SIZE: usize = 128; - async_wrapper(|env| async move { - env.override_config(|config| { - config.max_file_size = MAX_SIZE; - config.max_file_size_html = MAX_HTML_SIZE; - }); + let env = Rc::new( + TestEnvironment::with_config( + TestEnvironment::base_config() + .max_file_size(MAX_SIZE) + .max_file_size_html(MAX_HTML_SIZE) + .build()?, + ) + .await?, + ); - env.fake_release() + env.fake_release() + .await + .name("dummy") + .version("0.1.0") + .rustdoc_file_with("small.html", &[b'A'; MAX_HTML_SIZE / 2] as &[u8]) + .rustdoc_file_with("exact.html", &[b'A'; MAX_HTML_SIZE] as &[u8]) + .rustdoc_file_with("big.html", &[b'A'; MAX_HTML_SIZE * 2] as &[u8]) + .rustdoc_file_with("small.js", &[b'A'; MAX_SIZE / 2] as &[u8]) + .rustdoc_file_with("exact.js", &[b'A'; MAX_SIZE] as &[u8]) + .rustdoc_file_with("big.js", &[b'A'; MAX_SIZE * 2] as &[u8]) + .create() + .await?; + + let file = |path| { + let env = env.clone(); + async move { + File::from_path( + env.async_storage(), + &format!("rustdoc/dummy/0.1.0/{path}"), + env.config(), + ) .await - .name("dummy") - .version("0.1.0") - .rustdoc_file_with("small.html", &[b'A'; MAX_HTML_SIZE / 2] as &[u8]) - .rustdoc_file_with("exact.html", &[b'A'; MAX_HTML_SIZE] as &[u8]) - .rustdoc_file_with("big.html", &[b'A'; MAX_HTML_SIZE * 2] as &[u8]) - .rustdoc_file_with("small.js", &[b'A'; MAX_SIZE / 2] as &[u8]) - .rustdoc_file_with("exact.js", &[b'A'; MAX_SIZE] as &[u8]) - .rustdoc_file_with("big.js", &[b'A'; MAX_SIZE * 2] as &[u8]) - .create() - .await?; - - let file = |path| { - let env = env.clone(); - async move { - File::from_path( - &*env.async_storage().await, - &format!("rustdoc/dummy/0.1.0/{path}"), - &env.config(), - ) - .await - } - }; - let assert_len = |len, path| async move { - assert_eq!(len, file(path).await.unwrap().0.content.len()); - }; - let assert_too_big = |path| async move { - file(path) - .await - .unwrap_err() - .downcast_ref::() - .and_then(|io| io.get_ref()) - .and_then(|err| err.downcast_ref::()) - .is_some() - }; - - assert_len(MAX_HTML_SIZE / 2, "small.html").await; - assert_len(MAX_HTML_SIZE, "exact.html").await; - assert_len(MAX_SIZE / 2, "small.js").await; - assert_len(MAX_SIZE, "exact.js").await; - - assert_too_big("big.html").await; - assert_too_big("big.js").await; - - Ok(()) - }) + } + }; + let assert_len = |len, path| async move { + assert_eq!(len, file(path).await.unwrap().0.content.len()); + }; + let assert_too_big = |path| async move { + file(path) + .await + .unwrap_err() + .downcast_ref::() + .and_then(|io| io.get_ref()) + .and_then(|err| err.downcast_ref::()) + .is_some() + }; + + assert_len(MAX_HTML_SIZE / 2, "small.html").await; + assert_len(MAX_HTML_SIZE, "exact.html").await; + assert_len(MAX_SIZE / 2, "small.js").await; + assert_len(MAX_SIZE, "exact.js").await; + + assert_too_big("big.html").await; + assert_too_big("big.js").await; + + Ok(()) } } diff --git a/src/web/metrics.rs b/src/web/metrics.rs index 33585adf4..7b7ec5cd8 100644 --- a/src/web/metrics.rs +++ b/src/web/metrics.rs @@ -110,7 +110,6 @@ pub(crate) async fn request_recorder( #[cfg(test)] mod tests { - use crate::Context; use crate::test::{AxumResponseTestExt, AxumRouterTestExt, async_wrapper}; use std::collections::HashMap; @@ -187,7 +186,7 @@ mod tests { } // this shows what the routes were *actually* recorded as, making it easier to update ROUTES if the name changes. - let metrics_serialized = metrics.gather(&env.async_pool().await?)?; + let metrics_serialized = metrics.gather(&env.context.pool)?; let all_routes_visited = metrics_serialized .iter() .find(|x| x.name() == "docsrs_routes_visited") diff --git a/src/web/mod.rs b/src/web/mod.rs index 86089b694..615cd32d8 100644 --- a/src/web/mod.rs +++ b/src/web/mod.rs @@ -400,17 +400,13 @@ async fn set_sentry_transaction_name_from_axum_route( next.run(request).await } -async fn apply_middleware( +async fn apply_middleware( router: AxumRouter, - context: &C, + context: &Context, template_data: Option>, ) -> Result { - let config = context.config()?; let has_templates = template_data.is_some(); - let async_storage = context.async_storage().await?; - let build_queue = context.async_build_queue().await?; - Ok(router.layer( ServiceBuilder::new() .layer(TraceLayer::new_for_http()) @@ -421,18 +417,21 @@ async fn apply_middleware( )) .layer(CatchPanicLayer::new()) .layer(option_layer( - config + context + .config .report_request_timeouts .then_some(middleware::from_fn(log_timeouts_to_sentry)), )) - .layer(option_layer(config.request_timeout.map(TimeoutLayer::new))) - .layer(Extension(context.async_pool().await?)) - .layer(Extension(build_queue)) - .layer(Extension(context.service_metrics()?)) - .layer(Extension(context.instance_metrics()?)) - .layer(Extension(context.config()?)) - .layer(Extension(context.registry_api()?)) - .layer(Extension(async_storage)) + .layer(option_layer( + context.config.request_timeout.map(TimeoutLayer::new), + )) + .layer(Extension(context.pool.clone())) + .layer(Extension(context.async_build_queue.clone())) + .layer(Extension(context.service_metrics.clone())) + .layer(Extension(context.instance_metrics.clone())) + .layer(Extension(context.config.clone())) + .layer(Extension(context.registry_api.clone())) + .layer(Extension(context.async_storage.clone())) .layer(option_layer(template_data.map(Extension))) .layer(middleware::from_fn(csp::csp_middleware)) .layer(option_layer(has_templates.then_some(middleware::from_fn( @@ -442,20 +441,20 @@ async fn apply_middleware( )) } -pub(crate) async fn build_axum_app( - context: &C, +pub(crate) async fn build_axum_app( + context: &Context, template_data: Arc, ) -> Result { apply_middleware(routes::build_axum_routes(), context, Some(template_data)).await } -pub(crate) async fn build_metrics_axum_app(context: &C) -> Result { +pub(crate) async fn build_metrics_axum_app(context: &Context) -> Result { apply_middleware(routes::build_metric_routes(), context, None).await } -pub fn start_background_metrics_webserver( +pub fn start_background_metrics_webserver( addr: Option, - context: &C, + context: &Context, ) -> Result<(), Error> { let axum_addr: SocketAddr = addr.unwrap_or(DEFAULT_BIND); @@ -465,12 +464,12 @@ pub fn start_background_metrics_webserver( axum_addr.port() ); - let runtime = context.runtime()?; - let metrics_axum_app = runtime + let metrics_axum_app = context + .runtime .block_on(build_metrics_axum_app(context))? .into_make_service(); - runtime.spawn(async move { + context.runtime.spawn(async move { match tokio::net::TcpListener::bind(axum_addr) .await .context("error binding socket for metrics web server") @@ -493,8 +492,8 @@ pub fn start_background_metrics_webserver( } #[instrument(skip_all)] -pub fn start_web_server(addr: Option, context: &C) -> Result<(), Error> { - let template_data = Arc::new(TemplateData::new(context.config()?.render_threads)?); +pub fn start_web_server(addr: Option, context: &Context) -> Result<(), Error> { + let template_data = Arc::new(TemplateData::new(context.config.render_threads)?); let axum_addr = addr.unwrap_or(DEFAULT_BIND); @@ -504,13 +503,7 @@ pub fn start_web_server(addr: Option, context: &C) -> Re axum_addr.port() ); - // initialize the storage and the repo-updater in sync context - // so it can stay sync for now and doesn't fail when they would - // be initialized while starting the server below. - context.storage()?; - context.repository_stats_updater()?; - - context.runtime()?.block_on(async { + context.runtime.block_on(async { let app = build_axum_app(context, template_data) .await? .into_make_service(); @@ -1026,7 +1019,7 @@ mod test { // https://github.com/rust-lang/docs.rs/issues/223 fn prereleases_are_not_considered_for_semver() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); let version = |v| version(v, db); let release = |v| release(v, &env); @@ -1086,7 +1079,7 @@ mod test { // https://github.com/rust-lang/docs.rs/issues/221 fn yanked_crates_are_not_considered() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); let release_id = release("0.3.0", &env).await; @@ -1111,7 +1104,7 @@ mod test { #[test] fn in_progress_releases_are_ignored_when_others_match() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); // normal release release("1.0.0", &env).await; @@ -1141,7 +1134,7 @@ mod test { // https://github.com/rust-lang/docs.rs/issues/1682 fn prereleases_are_considered_when_others_dont_match() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); // normal release release("1.0.0", &env).await; @@ -1166,7 +1159,7 @@ mod test { // vaguely related to https://github.com/rust-lang/docs.rs/issues/395 fn metadata_has_no_effect() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); release("0.1.0+4.1", &env).await; release("0.1.1", &env).await; @@ -1262,7 +1255,7 @@ mod test { fn metadata_from_crate() { async_wrapper(|env| async move { release("0.1.0", &env).await; - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; let metadata = MetaData::from_crate( &mut conn, "foo", diff --git a/src/web/releases.rs b/src/web/releases.rs index 9ba6856c5..3ae276466 100644 --- a/src/web/releases.rs +++ b/src/web/releases.rs @@ -814,7 +814,7 @@ mod tests { use crate::db::{finish_build, initialize_build, initialize_crate, initialize_release}; use crate::registry_api::{CrateOwner, OwnerKind}; use crate::test::{ - AxumResponseTestExt, AxumRouterTestExt, FakeBuild, async_wrapper, + AxumResponseTestExt, AxumRouterTestExt, FakeBuild, TestEnvironment, async_wrapper, fake_release_that_failed_before_build, }; use anyhow::Error; @@ -828,7 +828,7 @@ mod tests { #[test] fn test_release_list_with_incomplete_release_and_successful_build() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); let mut conn = db.async_conn().await; let crate_id = initialize_crate(&mut conn, "foo").await?; @@ -863,7 +863,7 @@ mod tests { #[test] fn get_releases_by_stars() { async_wrapper(|env| async move { - let db = env.async_db().await; + let db = env.async_db(); env.fake_release() .await @@ -986,7 +986,7 @@ mod tests { // crate in the db breaks this test. // That's why we reset the id-sequence to zero for this test. - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; sqlx::query!(r#"ALTER SEQUENCE crates_id_seq RESTART WITH 1"#) .execute(&mut *conn) .await?; @@ -1055,139 +1055,144 @@ mod tests { }) } - #[test] - fn search_result_can_retrieve_sort_by_from_pagination() { - async_wrapper(|env| async move { - let mut crates_io = mockito::Server::new_async().await; - env.override_config(|config| { - config.registry_api_host = crates_io.url().parse().unwrap(); - }); + #[tokio::test(flavor = "multi_thread")] + async fn search_result_can_retrieve_sort_by_from_pagination() -> Result<()> { + let mut crates_io = mockito::Server::new_async().await; - let web = env.web_app().await; - env.fake_release() - .await - .name("some_random_crate") - .create() - .await?; - - let _m = crates_io - .mock("GET", "/api/v1/crates") - .match_query(Matcher::AllOf(vec![ - Matcher::UrlEncoded("q".into(), "some_random_crate".into()), - Matcher::UrlEncoded("per_page".into(), "30".into()), - Matcher::UrlEncoded("page".into(), "2".into()), - Matcher::UrlEncoded("sort".into(), "recent-updates".into()), - ])) - .with_status(200) - .with_header("content-type", "application/json") - .with_body( - json!({ - "crates": [ - { "name": "some_random_crate" }, - ], - "meta": { - "next_page": "?q=some_random_crate&sort=recent-updates&per_page=30&page=2", - "prev_page": "?q=some_random_crate&sort=recent-updates&per_page=30&page=1", - } - }) - .to_string(), - ) - .create_async().await; - - // click the "Next Page" Button, the "Sort by" SelectBox should keep the same option. - let next_page_url = format!( - "/releases/search?paginate={}", - b64.encode("?q=some_random_crate&sort=recent-updates&per_page=30&page=2"), - ); - let response = web.get(&next_page_url).await?; - assert!(response.status().is_success()); + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .registry_api_host(crates_io.url().parse().unwrap()) + .build()?, + ) + .await?; - let page = kuchikiki::parse_html().one(response.text().await?); - let is_target_option_selected = page - .select("#nav-sort > option") - .expect("missing option") - .any(|el| { - let attributes = el.attributes.borrow(); - attributes.get("selected").is_some() - && attributes.get("value").unwrap() == "recent-updates" - }); - assert!(is_target_option_selected); + let web = env.web_app().await; + env.fake_release() + .await + .name("some_random_crate") + .create() + .await?; - Ok(()) - }) - } + let _m = crates_io + .mock("GET", "/api/v1/crates") + .match_query(Matcher::AllOf(vec![ + Matcher::UrlEncoded("q".into(), "some_random_crate".into()), + Matcher::UrlEncoded("per_page".into(), "30".into()), + Matcher::UrlEncoded("page".into(), "2".into()), + Matcher::UrlEncoded("sort".into(), "recent-updates".into()), + ])) + .with_status(200) + .with_header("content-type", "application/json") + .with_body( + json!({ + "crates": [ + { "name": "some_random_crate" }, + ], + "meta": { + "next_page": "?q=some_random_crate&sort=recent-updates&per_page=30&page=2", + "prev_page": "?q=some_random_crate&sort=recent-updates&per_page=30&page=1", + } + }) + .to_string(), + ) + .create_async() + .await; + + // click the "Next Page" Button, the "Sort by" SelectBox should keep the same option. + let next_page_url = format!( + "/releases/search?paginate={}", + b64.encode("?q=some_random_crate&sort=recent-updates&per_page=30&page=2"), + ); + let response = web.get(&next_page_url).await?; + assert!(response.status().is_success()); - #[test] - fn search_result_passes_cratesio_pagination_links() { - async_wrapper(|env| async move { - let mut crates_io = mockito::Server::new_async().await; - env.override_config(|config| { - config.registry_api_host = crates_io.url().parse().unwrap(); + let page = kuchikiki::parse_html().one(response.text().await?); + let is_target_option_selected = page + .select("#nav-sort > option") + .expect("missing option") + .any(|el| { + let attributes = el.attributes.borrow(); + attributes.get("selected").is_some() + && attributes.get("value").unwrap() == "recent-updates" }); + assert!(is_target_option_selected); - let web = env.web_app().await; - env.fake_release() - .await - .name("some_random_crate") - .create() - .await?; + Ok(()) + } - let _m = crates_io - .mock("GET", "/api/v1/crates") - .match_query(Matcher::AllOf(vec![ - Matcher::UrlEncoded("q".into(), "some_random_crate".into()), - Matcher::UrlEncoded("per_page".into(), "30".into()), - ])) - .with_status(200) - .with_header("content-type", "application/json") - .with_body( - json!({ - "crates": [ - { "name": "some_random_crate" }, - ], - "meta": { - "next_page": "?some=parameters&that=cratesio&might=return", - "prev_page": "?and=the¶meters=for&the=previouspage", - } - }) - .to_string(), - ) - .create_async() - .await; - - let response = web.get("/releases/search?query=some_random_crate").await?; - assert!(response.status().is_success()); + #[tokio::test(flavor = "multi_thread")] + async fn search_result_passes_cratesio_pagination_links() -> Result<()> { + let mut crates_io = mockito::Server::new_async().await; - let page = kuchikiki::parse_html().one(response.text().await?); + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .registry_api_host(crates_io.url().parse().unwrap()) + .build()?, + ) + .await?; + + let web = env.web_app().await; + env.fake_release() + .await + .name("some_random_crate") + .create() + .await?; - let other_search_links: Vec<_> = page - .select("a") - .expect("missing link") - .map(|el| { - let attributes = el.attributes.borrow(); - attributes.get("href").unwrap().to_string() + let _m = crates_io + .mock("GET", "/api/v1/crates") + .match_query(Matcher::AllOf(vec![ + Matcher::UrlEncoded("q".into(), "some_random_crate".into()), + Matcher::UrlEncoded("per_page".into(), "30".into()), + ])) + .with_status(200) + .with_header("content-type", "application/json") + .with_body( + json!({ + "crates": [ + { "name": "some_random_crate" }, + ], + "meta": { + "next_page": "?some=parameters&that=cratesio&might=return", + "prev_page": "?and=the¶meters=for&the=previouspage", + } }) - .filter(|url| url.starts_with("/releases/search?")) - .collect(); + .to_string(), + ) + .create_async() + .await; - assert_eq!(other_search_links.len(), 2); - assert_eq!( - other_search_links[0], - format!( - "/releases/search?paginate={}", - b64.encode("?and=the¶meters=for&the=previouspage"), - ) - ); - assert_eq!( - other_search_links[1], - format!( - "/releases/search?paginate={}", - b64.encode("?some=parameters&that=cratesio&might=return") - ) - ); + let response = web.get("/releases/search?query=some_random_crate").await?; + assert!(response.status().is_success()); - Ok(()) - }) + let page = kuchikiki::parse_html().one(response.text().await?); + + let other_search_links: Vec<_> = page + .select("a") + .expect("missing link") + .map(|el| { + let attributes = el.attributes.borrow(); + attributes.get("href").unwrap().to_string() + }) + .filter(|url| url.starts_with("/releases/search?")) + .collect(); + + assert_eq!(other_search_links.len(), 2); + assert_eq!( + other_search_links[0], + format!( + "/releases/search?paginate={}", + b64.encode("?and=the¶meters=for&the=previouspage"), + ) + ); + assert_eq!( + other_search_links[1], + format!( + "/releases/search?paginate={}", + b64.encode("?some=parameters&that=cratesio&might=return") + ) + ); + + Ok(()) } #[test] @@ -1206,294 +1211,307 @@ mod tests { }) } - #[test] - fn crates_io_errors_as_status_code_200() { - async_wrapper(|env| async move { - let mut crates_io = mockito::Server::new_async().await; - env.override_config(|config| { - config.crates_io_api_call_retries = 0; - config.registry_api_host = crates_io.url().parse().unwrap(); - }); + #[tokio::test(flavor = "multi_thread")] + async fn crates_io_errors_as_status_code_200() -> Result<()> { + let mut crates_io = mockito::Server::new_async().await; - let _m = crates_io - .mock("GET", "/api/v1/crates") - .match_query(Matcher::AllOf(vec![ - Matcher::UrlEncoded("q".into(), "doesnt_matter_here".into()), - Matcher::UrlEncoded("per_page".into(), "30".into()), - ])) - .with_status(200) - .with_header("content-type", "application/json") - .with_body( - json!({ - "errors": [ - { "detail": "error name 1" }, - { "detail": "error name 2" }, - ] - }) - .to_string(), - ) - .create_async() - .await; + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .crates_io_api_call_retries(0) + .registry_api_host(crates_io.url().parse().unwrap()) + .build()?, + ) + .await?; + + let _m = crates_io + .mock("GET", "/api/v1/crates") + .match_query(Matcher::AllOf(vec![ + Matcher::UrlEncoded("q".into(), "doesnt_matter_here".into()), + Matcher::UrlEncoded("per_page".into(), "30".into()), + ])) + .with_status(200) + .with_header("content-type", "application/json") + .with_body( + json!({ + "errors": [ + { "detail": "error name 1" }, + { "detail": "error name 2" }, + ] + }) + .to_string(), + ) + .create_async() + .await; - let response = env - .web_app() - .await - .get("/releases/search?query=doesnt_matter_here") - .await?; - assert_eq!(response.status(), 500); + let response = env + .web_app() + .await + .get("/releases/search?query=doesnt_matter_here") + .await?; + assert_eq!(response.status(), 500); - assert!( - response - .text() - .await? - .contains("error name 1\nerror name 2") - ); - Ok(()) - }) + assert!( + response + .text() + .await? + .contains("error name 1\nerror name 2") + ); + Ok(()) } #[test_case(StatusCode::NOT_FOUND)] #[test_case(StatusCode::INTERNAL_SERVER_ERROR)] #[test_case(StatusCode::BAD_GATEWAY)] - fn crates_io_errors_are_correctly_returned_and_we_dont_try_parsing(status: StatusCode) { - async_wrapper(|env| async move { - let mut crates_io = mockito::Server::new_async().await; - env.override_config(|config| { - config.crates_io_api_call_retries = 0; - config.registry_api_host = crates_io.url().parse().unwrap(); - }); - - let _m = crates_io - .mock("GET", "/api/v1/crates") - .match_query(Matcher::AllOf(vec![ - Matcher::UrlEncoded("q".into(), "doesnt_matter_here".into()), - Matcher::UrlEncoded("per_page".into(), "30".into()), - ])) - .with_status(status.as_u16() as usize) - .create_async() - .await; - - let response = env - .web_app() - .await - .get("/releases/search?query=doesnt_matter_here") - .await?; - assert_eq!(response.status(), 500); + #[tokio::test(flavor = "multi_thread")] + async fn crates_io_errors_are_correctly_returned_and_we_dont_try_parsing( + status: StatusCode, + ) -> Result<()> { + let mut crates_io = mockito::Server::new_async().await; + + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .crates_io_api_call_retries(0) + .registry_api_host(crates_io.url().parse().unwrap()) + .build()?, + ) + .await?; + + let _m = crates_io + .mock("GET", "/api/v1/crates") + .match_query(Matcher::AllOf(vec![ + Matcher::UrlEncoded("q".into(), "doesnt_matter_here".into()), + Matcher::UrlEncoded("per_page".into(), "30".into()), + ])) + .with_status(status.as_u16() as usize) + .create_async() + .await; + + let response = env + .web_app() + .await + .get("/releases/search?query=doesnt_matter_here") + .await?; + assert_eq!(response.status(), 500); - assert!(response.text().await?.contains(&format!("{status}"))); - Ok(()) - }) + assert!(response.text().await?.contains(&format!("{status}"))); + Ok(()) } - #[test] - fn search_encoded_pagination_passed_to_cratesio() { - async_wrapper(|env| async move { - let mut crates_io = mockito::Server::new_async().await; - env.override_config(|config| { - config.registry_api_host = crates_io.url().parse().unwrap(); - }); + #[tokio::test(flavor = "multi_thread")] + async fn search_encoded_pagination_passed_to_cratesio() -> Result<()> { + let mut crates_io = mockito::Server::new_async().await; - let web = env.web_app().await; - env.fake_release() - .await - .name("some_random_crate") - .create() - .await?; + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .registry_api_host(crates_io.url().parse().unwrap()) + .build()?, + ) + .await?; - let _m = crates_io - .mock("GET", "/api/v1/crates") - .match_query(Matcher::AllOf(vec![ - Matcher::UrlEncoded("some".into(), "dummy".into()), - Matcher::UrlEncoded("pagination".into(), "parameters".into()), - ])) - .with_status(200) - .with_header("content-type", "application/json") - .with_body( - json!({ - "crates": [ - { "name": "some_random_crate" }, - ], - "meta": { - "next_page": null, - "prev_page": null, - } - }) - .to_string(), - ) - .create_async() - .await; - - let links = get_release_links( - &format!( - "/releases/search?paginate={}", - b64.encode("?some=dummy&pagination=parameters") - ), - &web, - ) + let web = env.web_app().await; + env.fake_release() + .await + .name("some_random_crate") + .create() .await?; - assert_eq!(links.len(), 1); - assert_eq!(links[0], "/some_random_crate/latest/some_random_crate/",); - Ok(()) - }) + let _m = crates_io + .mock("GET", "/api/v1/crates") + .match_query(Matcher::AllOf(vec![ + Matcher::UrlEncoded("some".into(), "dummy".into()), + Matcher::UrlEncoded("pagination".into(), "parameters".into()), + ])) + .with_status(200) + .with_header("content-type", "application/json") + .with_body( + json!({ + "crates": [ + { "name": "some_random_crate" }, + ], + "meta": { + "next_page": null, + "prev_page": null, + } + }) + .to_string(), + ) + .create_async() + .await; + + let links = get_release_links( + &format!( + "/releases/search?paginate={}", + b64.encode("?some=dummy&pagination=parameters") + ), + &web, + ) + .await?; + + assert_eq!(links.len(), 1); + assert_eq!(links[0], "/some_random_crate/latest/some_random_crate/",); + Ok(()) } - #[test] - fn search_lucky_with_unknown_crate() { - async_wrapper(|env| async move { - let mut crates_io = mockito::Server::new_async().await; - env.override_config(|config| { - config.registry_api_host = crates_io.url().parse().unwrap(); - }); + #[tokio::test(flavor = "multi_thread")] + async fn search_lucky_with_unknown_crate() -> Result<()> { + let mut crates_io = mockito::Server::new_async().await; - let web = env.web_app().await; - env.fake_release() - .await - .name("some_random_crate") - .create() - .await?; + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .registry_api_host(crates_io.url().parse().unwrap()) + .build()?, + ) + .await?; - let _m = crates_io - .mock("GET", "/api/v1/crates") - .match_query(Matcher::AllOf(vec![ - Matcher::UrlEncoded("q".into(), "some_random_".into()), - Matcher::UrlEncoded("per_page".into(), "30".into()), - ])) - .with_status(200) - .with_header("content-type", "application/json") - .with_body( - json!({ - "crates": [ - { "name": "some_random_crate" }, - { "name": "some_other_crate" }, - ], - "meta": { - "next_page": null, - "prev_page": null, - } - }) - .to_string(), - ) - .create_async() - .await; - - // when clicking "I'm feeling lucky" and the query doesn't match any crate, - // just fallback to the normal search results. - let links = get_release_links( - "/releases/search?query=some_random_&i-am-feeling-lucky=1", - &web, - ) + let web = env.web_app().await; + env.fake_release() + .await + .name("some_random_crate") + .create() .await?; - assert_eq!(links.len(), 1); - assert_eq!(links[0], "/some_random_crate/latest/some_random_crate/"); - Ok(()) - }) + let _m = crates_io + .mock("GET", "/api/v1/crates") + .match_query(Matcher::AllOf(vec![ + Matcher::UrlEncoded("q".into(), "some_random_".into()), + Matcher::UrlEncoded("per_page".into(), "30".into()), + ])) + .with_status(200) + .with_header("content-type", "application/json") + .with_body( + json!({ + "crates": [ + { "name": "some_random_crate" }, + { "name": "some_other_crate" }, + ], + "meta": { + "next_page": null, + "prev_page": null, + } + }) + .to_string(), + ) + .create_async() + .await; + + // when clicking "I'm feeling lucky" and the query doesn't match any crate, + // just fallback to the normal search results. + let links = get_release_links( + "/releases/search?query=some_random_&i-am-feeling-lucky=1", + &web, + ) + .await?; + + assert_eq!(links.len(), 1); + assert_eq!(links[0], "/some_random_crate/latest/some_random_crate/"); + Ok(()) } - #[test] - fn search() { - async_wrapper(|env| async move { - let mut crates_io = mockito::Server::new_async().await; - env.override_config(|config| { - config.registry_api_host = crates_io.url().parse().unwrap(); - }); + #[tokio::test(flavor = "multi_thread")] + async fn search() -> Result<()> { + let mut crates_io = mockito::Server::new_async().await; - let web = env.web_app().await; - env.fake_release() - .await - .name("some_random_crate") - .version("2.0.0") - .create() - .await?; - env.fake_release() - .await - .name("some_random_crate") - .version("1.0.0") - .create() - .await?; + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .registry_api_host(crates_io.url().parse().unwrap()) + .build()?, + ) + .await?; - env.fake_release() - .await - .name("and_another_one") - .version("0.0.1") - .create() - .await?; + let web = env.web_app().await; + env.fake_release() + .await + .name("some_random_crate") + .version("2.0.0") + .create() + .await?; + env.fake_release() + .await + .name("some_random_crate") + .version("1.0.0") + .create() + .await?; - env.fake_release() - .await - .name("yet_another_crate") - .version("0.1.0") - .yanked(true) - .create() - .await?; + env.fake_release() + .await + .name("and_another_one") + .version("0.0.1") + .create() + .await?; - // release with only in-progress build (= in progress release) will not be shown - env.fake_release() - .await - .name("in_progress") - .version("0.1.0") - .builds(vec![ - FakeBuild::default() - .build_status(BuildStatus::InProgress) - .rustc_version("rustc (blabla 2022-01-01)") - .docsrs_version("docs.rs 4.0.0"), - ]) - .create() - .await?; + env.fake_release() + .await + .name("yet_another_crate") + .version("0.1.0") + .yanked(true) + .create() + .await?; - // release that failed in the fetch-step, will miss some details - let mut conn = env.async_db().await.async_conn().await; - fake_release_that_failed_before_build( - &mut conn, - "failed_hard", - "0.1.0", - "some random error", - ) + // release with only in-progress build (= in progress release) will not be shown + env.fake_release() + .await + .name("in_progress") + .version("0.1.0") + .builds(vec![ + FakeBuild::default() + .build_status(BuildStatus::InProgress) + .rustc_version("rustc (blabla 2022-01-01)") + .docsrs_version("docs.rs 4.0.0"), + ]) + .create() .await?; - let _m = crates_io - .mock("GET", "/api/v1/crates") - .match_query(Matcher::AllOf(vec![ - Matcher::UrlEncoded("q".into(), "some_random_crate".into()), - Matcher::UrlEncoded("per_page".into(), "30".into()), - ])) - .with_status(200) - .with_header("content-type", "application/json") - .with_body( - json!({ - "crates": [ - { "name": "some_random_crate" }, - { "name": "some_other_crate" }, - { "name": "and_another_one" }, - { "name": "yet_another_crate" }, - { "name": "in_progress" }, - { "name": "failed_hard" } - ], - "meta": { - "next_page": null, - "prev_page": null, - } - }) - .to_string(), - ) - .create_async() - .await; - - let links = get_release_links("/releases/search?query=some_random_crate", &web).await?; - - // `some_other_crate` won't be shown since we don't have it yet - assert_eq!(links.len(), 4); - // * `max_version` from the crates.io search result will be ignored since we - // might not have it yet, or the doc-build might be in progress. - // * ranking/order from crates.io result is preserved - // * version used is the highest semver following our own "latest version" logic - assert_eq!(links[0], "/some_random_crate/latest/some_random_crate/"); - assert_eq!(links[1], "/and_another_one/latest/and_another_one/"); - assert_eq!(links[2], "/yet_another_crate/0.1.0/yet_another_crate/"); - assert_eq!(links[3], "/crate/failed_hard/0.1.0"); - Ok(()) - }) + // release that failed in the fetch-step, will miss some details + let mut conn = env.async_db().async_conn().await; + fake_release_that_failed_before_build( + &mut conn, + "failed_hard", + "0.1.0", + "some random error", + ) + .await?; + + let _m = crates_io + .mock("GET", "/api/v1/crates") + .match_query(Matcher::AllOf(vec![ + Matcher::UrlEncoded("q".into(), "some_random_crate".into()), + Matcher::UrlEncoded("per_page".into(), "30".into()), + ])) + .with_status(200) + .with_header("content-type", "application/json") + .with_body( + json!({ + "crates": [ + { "name": "some_random_crate" }, + { "name": "some_other_crate" }, + { "name": "and_another_one" }, + { "name": "yet_another_crate" }, + { "name": "in_progress" }, + { "name": "failed_hard" } + ], + "meta": { + "next_page": null, + "prev_page": null, + } + }) + .to_string(), + ) + .create_async() + .await; + + let links = get_release_links("/releases/search?query=some_random_crate", &web).await?; + + // `some_other_crate` won't be shown since we don't have it yet + assert_eq!(links.len(), 4); + // * `max_version` from the crates.io search result will be ignored since we + // might not have it yet, or the doc-build might be in progress. + // * ranking/order from crates.io result is preserved + // * version used is the highest semver following our own "latest version" logic + assert_eq!(links[0], "/some_random_crate/latest/some_random_crate/"); + assert_eq!(links[1], "/and_another_one/latest/and_another_one/"); + assert_eq!(links[2], "/yet_another_crate/0.1.0/yet_another_crate/"); + assert_eq!(links[3], "/crate/failed_hard/0.1.0"); + Ok(()) } async fn get_release_links(path: &str, web: &axum::Router) -> Result, Error> { @@ -1793,39 +1811,39 @@ mod tests { }) } - #[test] - fn test_deployment_queue() { - async_wrapper(|env| async move { - env.override_config(|config| { - config.cloudfront_distribution_id_web = Some("distribution_id_web".into()); - }); + #[tokio::test(flavor = "multi_thread")] + async fn test_deployment_queue() -> Result<()> { + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .cloudfront_distribution_id_web(Some("distribution_id_web".into())) + .build()?, + ) + .await?; - let web = env.web_app().await; + let web = env.web_app().await; - let mut conn = env.async_db().await.async_conn().await; - cdn::queue_crate_invalidation(&mut conn, &env.config(), "krate_2").await?; + let mut conn = env.async_db().async_conn().await; + cdn::queue_crate_invalidation(&mut conn, env.config(), "krate_2").await?; - let content = - kuchikiki::parse_html().one(web.get("/releases/queue").await?.text().await?); - assert!( - content - .select(".release > div > strong") - .expect("missing heading") - .any(|el| el.text_contents().contains("active CDN deployments")) - ); + let content = kuchikiki::parse_html().one(web.get("/releases/queue").await?.text().await?); + assert!( + content + .select(".release > div > strong") + .expect("missing heading") + .any(|el| el.text_contents().contains("active CDN deployments")) + ); - let items = content - .select(".queue-list > li") - .expect("missing list items") - .collect::>(); + let items = content + .select(".queue-list > li") + .expect("missing list items") + .collect::>(); - assert_eq!(items.len(), 1); - let a = items[0].as_node().select_first("a").expect("missing link"); + assert_eq!(items.len(), 1); + let a = items[0].as_node().select_first("a").expect("missing link"); - assert!(a.text_contents().contains("krate_2")); + assert!(a.text_contents().contains("krate_2")); - Ok(()) - }); + Ok(()) } #[test] @@ -1849,7 +1867,7 @@ mod tests { .any(|el| el.text_contents().contains("active CDN deployments")) ); - let queue = env.async_build_queue().await; + let queue = env.async_build_queue(); queue.add_crate("foo", "1.0.0", 0, None).await?; queue.add_crate("bar", "0.1.0", -10, None).await?; queue.add_crate("baz", "0.0.1", 10, None).await?; @@ -1889,7 +1907,7 @@ mod tests { let web = env.web_app().await; // we have two queued releases, where the build for one is already in progress - let queue = env.async_build_queue().await; + let queue = env.async_build_queue(); queue.add_crate("foo", "1.0.0", 0, None).await?; queue.add_crate("bar", "0.1.0", 0, None).await?; @@ -1964,7 +1982,7 @@ mod tests { fn test_releases_rebuild_queue_with_crates() { async_wrapper(|env| async move { let web = env.web_app().await; - let queue = env.async_build_queue().await; + let queue = env.async_build_queue(); queue .add_crate("foo", "1.0.0", REBUILD_PRIORITY, None) .await?; @@ -2049,7 +2067,7 @@ mod tests { seen.insert("".to_owned()); let resp = web.get("/").await?; - resp.assert_cache_control(CachePolicy::ShortInCdnAndBrowser, &env.config()); + resp.assert_cache_control(CachePolicy::ShortInCdnAndBrowser, env.config()); assert!(resp.status().is_success()); @@ -2139,59 +2157,61 @@ mod tests { }); } - #[test] - fn crates_not_on_docsrs() { - async_wrapper(|env| async move { - let mut crates_io = mockito::Server::new_async().await; - env.override_config(|config| { - config.registry_api_host = crates_io.url().parse().unwrap(); - }); + #[tokio::test(flavor = "multi_thread")] + async fn crates_not_on_docsrs() -> Result<()> { + let mut crates_io = mockito::Server::new_async().await; - let web = env.web_app().await; - env.fake_release() - .await - .name("some_random_crate") - .create() - .await?; + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .registry_api_host(crates_io.url().parse().unwrap()) + .build()?, + ) + .await?; - let _m = crates_io - .mock("GET", "/api/v1/crates") - .match_query(Matcher::AllOf(vec![ - Matcher::UrlEncoded("q".into(), "some_random_crate".into()), - Matcher::UrlEncoded("per_page".into(), "30".into()), - ])) - .with_status(200) - .with_header("content-type", "application/json") - .with_body( - json!({ - "crates": [ - { "name": "some_random_crate" }, - { "name": "some_random_crate2" }, - { "name": "some_random_crate3" }, - ], - "meta": { - "next_page": "null", - "prev_page": "null", - } - }) - .to_string(), - ) - .create_async() - .await; - - let response = web.get("/releases/search?query=some_random_crate").await?; - assert!(response.status().is_success()); + let web = env.web_app().await; + env.fake_release() + .await + .name("some_random_crate") + .create() + .await?; - let page = kuchikiki::parse_html().one(response.text().await?); + let _m = crates_io + .mock("GET", "/api/v1/crates") + .match_query(Matcher::AllOf(vec![ + Matcher::UrlEncoded("q".into(), "some_random_crate".into()), + Matcher::UrlEncoded("per_page".into(), "30".into()), + ])) + .with_status(200) + .with_header("content-type", "application/json") + .with_body( + json!({ + "crates": [ + { "name": "some_random_crate" }, + { "name": "some_random_crate2" }, + { "name": "some_random_crate3" }, + ], + "meta": { + "next_page": "null", + "prev_page": "null", + } + }) + .to_string(), + ) + .create_async() + .await; - assert_eq!(page.select("div.name.not-available").unwrap().count(), 2); - assert_eq!( - page.select("div.name:not(.not-available)").unwrap().count(), - 1 - ); + let response = web.get("/releases/search?query=some_random_crate").await?; + assert!(response.status().is_success()); - Ok(()) - }) + let page = kuchikiki::parse_html().one(response.text().await?); + + assert_eq!(page.select("div.name.not-available").unwrap().count(), 2); + assert_eq!( + page.select("div.name:not(.not-available)").unwrap().count(), + 1 + ); + + Ok(()) } #[test] diff --git a/src/web/routes.rs b/src/web/routes.rs index 324b26587..8e136a259 100644 --- a/src/web/routes.rs +++ b/src/web/routes.rs @@ -403,14 +403,14 @@ mod tests { "/favicon.ico", "/-/static/favicon.ico", CachePolicy::ForeverInCdnAndBrowser, - &config, + config, ) .await?; web.assert_redirect_cached( "/robots.txt", "/-/static/robots.txt", CachePolicy::ForeverInCdnAndBrowser, - &config, + config, ) .await?; @@ -421,7 +421,7 @@ mod tests { "/opensearch.xml", "/-/static/opensearch.xml", CachePolicy::ForeverInCdnAndBrowser, - &config, + config, ) .await?; @@ -438,7 +438,7 @@ mod tests { .get("/-/rustdoc.static/style.css") .await?; assert_eq!(response.status(), StatusCode::NOT_FOUND); - response.assert_cache_control(CachePolicy::NoCaching, &env.config()); + response.assert_cache_control(CachePolicy::NoCaching, env.config()); Ok(()) }) } @@ -447,7 +447,7 @@ mod tests { fn serve_rustdoc_content() { async_wrapper(|env| async move { let web = env.web_app().await; - let storage = env.async_storage().await; + let storage = env.async_storage(); storage .store_one("/rustdoc-static/style.css", "content".as_bytes()) .await?; @@ -457,7 +457,7 @@ mod tests { let response = web.get("/-/rustdoc.static/style.css").await?; assert!(response.status().is_success()); - response.assert_cache_control(CachePolicy::ForeverInCdnAndBrowser, &env.config()); + response.assert_cache_control(CachePolicy::ForeverInCdnAndBrowser, env.config()); assert_eq!(response.text().await?, "content"); assert_eq!( diff --git a/src/web/rustdoc.rs b/src/web/rustdoc.rs index 36affffe7..3b38bfa68 100644 --- a/src/web/rustdoc.rs +++ b/src/web/rustdoc.rs @@ -1102,7 +1102,7 @@ mod test { utils::Dependency, web::{cache::CachePolicy, encode_url_path}, }; - use anyhow::Context; + use anyhow::{Context, Result}; use chrono::{NaiveDate, Utc}; use kuchikiki::traits::TendrilSink; use pretty_assertions::assert_eq; @@ -1168,7 +1168,7 @@ mod test { web.assert_success_cached( "/krate/0.1.0/help.html", CachePolicy::ForeverInCdnAndStaleInBrowser, - &env.config(), + env.config(), ) .await?; Ok(()) @@ -1204,54 +1204,54 @@ mod test { .create() .await?; let web = env.web_app().await; - web.assert_success_cached("/", CachePolicy::ShortInCdnAndBrowser, &env.config()) + web.assert_success_cached("/", CachePolicy::ShortInCdnAndBrowser, env.config()) .await?; web.assert_success_cached( "/crate/buggy/0.1.0", CachePolicy::ForeverInCdnAndStaleInBrowser, - &env.config(), + env.config(), ) .await?; web.assert_success_cached( "/buggy/0.1.0/directory_1/index.html", CachePolicy::ForeverInCdnAndStaleInBrowser, - &env.config(), + env.config(), ) .await?; web.assert_success_cached( "/buggy/0.1.0/directory_2.html/index.html", CachePolicy::ForeverInCdnAndStaleInBrowser, - &env.config(), + env.config(), ) .await?; web.assert_success_cached( "/buggy/0.1.0/directory_3/.gitignore", CachePolicy::ForeverInCdnAndBrowser, - &env.config(), + env.config(), ) .await?; web.assert_success_cached( "/buggy/0.1.0/settings.html", CachePolicy::ForeverInCdnAndStaleInBrowser, - &env.config(), + env.config(), ) .await?; web.assert_success_cached( "/buggy/0.1.0/scrape-examples-help.html", CachePolicy::ForeverInCdnAndStaleInBrowser, - &env.config(), + env.config(), ) .await?; web.assert_success_cached( "/buggy/0.1.0/all.html", CachePolicy::ForeverInCdnAndStaleInBrowser, - &env.config(), + env.config(), ) .await?; web.assert_success_cached( "/buggy/0.1.0/directory_4/empty_file_no_ext", CachePolicy::ForeverInCdnAndBrowser, - &env.config(), + env.config(), ) .await?; Ok(()) @@ -1277,14 +1277,14 @@ mod test { web.assert_success_cached( base, CachePolicy::ForeverInCdnAndStaleInBrowser, - &env.config(), + env.config(), ) .await?; web.assert_redirect_cached( "/dummy/0.1.0/x86_64-unknown-linux-gnu/dummy/", base, CachePolicy::ForeverInCdn, - &env.config(), + env.config(), ) .await?; @@ -1352,7 +1352,7 @@ mod test { .get("/dummy/latest/dummy/") .await? .error_for_status()?; - resp.assert_cache_control(CachePolicy::ForeverInCdn, &env.config()); + resp.assert_cache_control(CachePolicy::ForeverInCdn, env.config()); let body = resp.text().await?; assert!(body.contains(" Result<()> { + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .cache_control_stale_while_revalidate(Some(2592000)) + .build()?, + ) + .await?; - env.fake_release() - .await - .name("dummy") - .version("0.1.0") - .archive_storage(true) - .rustdoc_file("dummy/index.html") - .create() - .await?; + env.fake_release() + .await + .name("dummy") + .version("0.1.0") + .archive_storage(true) + .rustdoc_file("dummy/index.html") + .create() + .await?; - let web = env.web_app().await; + let web = env.web_app().await; - { - let resp = web.get("/dummy/latest/dummy/").await?; - resp.assert_cache_control(CachePolicy::ForeverInCdn, &env.config()); - } + { + let resp = web.get("/dummy/latest/dummy/").await?; + resp.assert_cache_control(CachePolicy::ForeverInCdn, env.config()); + } - { - let resp = web.get("/dummy/0.1.0/dummy/").await?; - resp.assert_cache_control( - CachePolicy::ForeverInCdnAndStaleInBrowser, - &env.config(), - ); - } - Ok(()) - }) + { + let resp = web.get("/dummy/0.1.0/dummy/").await?; + resp.assert_cache_control(CachePolicy::ForeverInCdnAndStaleInBrowser, env.config()); + } + Ok(()) } #[test_case(true)] @@ -1423,7 +1421,7 @@ mod test { // check it works at all let redirect = - latest_version_redirect("/dummy/0.1.0/dummy/", &web, &env.config()).await?; + latest_version_redirect("/dummy/0.1.0/dummy/", &web, env.config()).await?; assert_eq!( redirect, "/crate/dummy/latest/target-redirect/x86_64-unknown-linux-gnu/dummy/index.html" @@ -1431,13 +1429,13 @@ mod test { // check it keeps the subpage let redirect = - latest_version_redirect("/dummy/0.1.0/dummy/blah/", &web, &env.config()).await?; + latest_version_redirect("/dummy/0.1.0/dummy/blah/", &web, env.config()).await?; assert_eq!( redirect, "/crate/dummy/latest/target-redirect/x86_64-unknown-linux-gnu/dummy/blah/index.html" ); let redirect = - latest_version_redirect("/dummy/0.1.0/dummy/blah/blah.html", &web, &env.config()) + latest_version_redirect("/dummy/0.1.0/dummy/blah/blah.html", &web, env.config()) .await?; assert_eq!( redirect, @@ -1448,7 +1446,7 @@ mod test { let redirect = latest_version_redirect( "/dummy/0.1.0/dummy/struct.will-be-deleted.html", &web, - &env.config(), + env.config(), ) .await?; assert_eq!( @@ -1487,7 +1485,7 @@ mod test { let redirect = latest_version_redirect( "/dummy/0.1.0/x86_64-pc-windows-msvc/dummy/index.html", &web, - &env.config(), + env.config(), ) .await?; assert_eq!( @@ -1498,7 +1496,7 @@ mod test { let redirect = latest_version_redirect( "/dummy/0.1.0/x86_64-pc-windows-msvc/dummy/", &web, - &env.config(), + env.config(), ) .await?; assert_eq!( @@ -1509,7 +1507,7 @@ mod test { let redirect = latest_version_redirect( "/dummy/0.1.0/x86_64-pc-windows-msvc/dummy/struct.Blah.html", &web, - &env.config(), + env.config(), ) .await?; assert_eq!( @@ -1544,7 +1542,7 @@ mod test { let web = env.web_app().await; let redirect = - latest_version_redirect("/dummy/0.1.0/dummy/", &web, &env.config()).await?; + latest_version_redirect("/dummy/0.1.0/dummy/", &web, env.config()).await?; assert_eq!(redirect, "/crate/dummy/latest"); Ok(()) @@ -1583,14 +1581,14 @@ mod test { let web = env.web_app().await; let redirect = - latest_version_redirect("/dummy/0.1.0/dummy/", &web, &env.config()).await?; + latest_version_redirect("/dummy/0.1.0/dummy/", &web, env.config()).await?; assert_eq!( redirect, "/crate/dummy/latest/target-redirect/x86_64-unknown-linux-gnu/dummy/index.html" ); let redirect = - latest_version_redirect("/dummy/0.2.1/dummy/", &web, &env.config()).await?; + latest_version_redirect("/dummy/0.2.1/dummy/", &web, env.config()).await?; assert_eq!( redirect, "/crate/dummy/latest/target-redirect/x86_64-unknown-linux-gnu/dummy/index.html" @@ -1660,7 +1658,7 @@ mod test { "/zstd/badge.svg", "https://img.shields.io/docsrs/zstd/latest", CachePolicy::ForeverInCdnAndBrowser, - &env.config(), + env.config(), ) .await?; assert_eq!(response.status(), StatusCode::MOVED_PERMANENTLY); @@ -2399,7 +2397,7 @@ mod test { .create() .await?; - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; // https://stackoverflow.com/questions/18209625/how-do-i-modify-fields-inside-the-new-postgresql-json-datatype sqlx::query!( r#"UPDATE releases SET dependencies = dependencies::jsonb #- '{0,2}' WHERE id = $1"#, id.0 @@ -2608,7 +2606,7 @@ mod test { latest_version_redirect( "/tungstenite/0.10.0/tungstenite/?search=String%20-%3E%20Message", &env.web_app().await, - &env.config() + env.config() ) .await?, "/crate/tungstenite/latest/target-redirect/x86_64-unknown-linux-gnu/tungstenite/index.html?search=String%20-%3E%20Message", @@ -2641,7 +2639,7 @@ mod test { latest_version_redirect( "/pyo3/0.2.7/src/pyo3/objects/exc.rs.html", &web, - &env.config(), + env.config(), ) .await?, target_redirect @@ -2691,7 +2689,7 @@ mod test { .get("/crate/hexponent/0.3.1/menus/releases/x86_64-unknown-linux-gnu/hexponent/index.html") .await?; assert!(releases_response.status().is_success()); - releases_response.assert_cache_control(CachePolicy::ForeverInCdn, &env.config()); + releases_response.assert_cache_control(CachePolicy::ForeverInCdn, env.config()); assert_eq!( parse_release_links_from_menu(&releases_response.text().await?), vec![ @@ -2707,7 +2705,7 @@ mod test { .get("/crate/hexponent/0.3.1/menus/releases/hexponent/something.html") .await?; assert!(releases_response.status().is_success()); - releases_response.assert_cache_control(CachePolicy::ForeverInCdn, &env.config()); + releases_response.assert_cache_control(CachePolicy::ForeverInCdn, env.config()); assert_eq!( parse_release_links_from_menu(&releases_response.text().await?), vec![ @@ -3007,7 +3005,7 @@ mod test { let web = env.web_app().await; let response = web.get("/crate/dummy/0.1.0/download").await?; - response.assert_cache_control(CachePolicy::NoCaching, &env.config()); + response.assert_cache_control(CachePolicy::NoCaching, env.config()); assert_eq!(response.status(), StatusCode::NOT_FOUND); Ok(()) }); @@ -3027,118 +3025,122 @@ mod test { let web = env.web_app().await; let response = web.get("/crate/dummy/0.1.0/download").await?; - response.assert_cache_control(CachePolicy::NoCaching, &env.config()); + response.assert_cache_control(CachePolicy::NoCaching, env.config()); assert_eq!(response.status(), StatusCode::NOT_FOUND); Ok(()) }); } - #[test] - fn download_semver() { - async_wrapper(|env| async move { - env.override_config(|config| { - config.s3_static_root_path = "https://static.docs.rs".into() - }); - env.fake_release() - .await - .name("dummy") - .version("0.1.0") - .archive_storage(true) - .create() - .await?; - - let web = env.web_app().await; + #[tokio::test(flavor = "multi_thread")] + async fn download_semver() -> Result<()> { + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .s3_static_root_path("https://static.docs.rs") + .build()?, + ) + .await?; - web.assert_redirect_cached_unchecked( - "/crate/dummy/0.1/download", - "https://static.docs.rs/rustdoc/dummy/0.1.0.zip", - CachePolicy::ForeverInCdn, - &env.config(), - ) + env.fake_release() + .await + .name("dummy") + .version("0.1.0") + .archive_storage(true) + .create() .await?; - assert!( - env.async_storage() - .await - .get_public_access("rustdoc/dummy/0.1.0.zip") - .await? - ); - Ok(()) - }); + + let web = env.web_app().await; + + web.assert_redirect_cached_unchecked( + "/crate/dummy/0.1/download", + "https://static.docs.rs/rustdoc/dummy/0.1.0.zip", + CachePolicy::ForeverInCdn, + env.config(), + ) + .await?; + assert!( + env.async_storage() + .get_public_access("rustdoc/dummy/0.1.0.zip") + .await? + ); + Ok(()) } - #[test] - fn download_specific_version() { - async_wrapper(|env| async move { - env.override_config(|config| { - config.s3_static_root_path = "https://static.docs.rs".into() - }); - env.fake_release() - .await - .name("dummy") - .version("0.1.0") - .archive_storage(true) - .create() - .await?; + #[tokio::test(flavor = "multi_thread")] + async fn download_specfic_version() -> Result<()> { + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .s3_static_root_path("https://static.docs.rs") + .build()?, + ) + .await?; - let web = env.web_app().await; - let storage = env.async_storage().await; + env.fake_release() + .await + .name("dummy") + .version("0.1.0") + .archive_storage(true) + .create() + .await?; - // disable public access to be sure that the handler will enable it - storage - .set_public_access("rustdoc/dummy/0.1.0.zip", false) - .await?; + let web = env.web_app().await; + let storage = env.async_storage(); - web.assert_redirect_cached_unchecked( - "/crate/dummy/0.1.0/download", - "https://static.docs.rs/rustdoc/dummy/0.1.0.zip", - CachePolicy::ForeverInCdn, - &env.config(), - ) + // disable public access to be sure that the handler will enable it + storage + .set_public_access("rustdoc/dummy/0.1.0.zip", false) .await?; - assert!(storage.get_public_access("rustdoc/dummy/0.1.0.zip").await?); - Ok(()) - }); - } - #[test] - fn download_latest_version() { - async_wrapper(|env| async move { - env.override_config(|config| { - config.s3_static_root_path = "https://static.docs.rs".into() - }); - env.fake_release() - .await - .name("dummy") - .version("0.1.0") - .archive_storage(true) - .create() - .await?; + web.assert_redirect_cached_unchecked( + "/crate/dummy/0.1.0/download", + "https://static.docs.rs/rustdoc/dummy/0.1.0.zip", + CachePolicy::ForeverInCdn, + env.config(), + ) + .await?; + assert!(storage.get_public_access("rustdoc/dummy/0.1.0.zip").await?); + Ok(()) + } - env.fake_release() - .await - .name("dummy") - .version("0.2.0") - .archive_storage(true) - .create() - .await?; + #[tokio::test(flavor = "multi_thread")] + async fn download_latest_version() -> Result<()> { + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .s3_static_root_path("https://static.docs.rs") + .build()?, + ) + .await?; - let web = env.web_app().await; + env.fake_release() + .await + .name("dummy") + .version("0.1.0") + .archive_storage(true) + .create() + .await?; - web.assert_redirect_cached_unchecked( - "/crate/dummy/latest/download", - "https://static.docs.rs/rustdoc/dummy/0.2.0.zip", - CachePolicy::ForeverInCdn, - &env.config(), - ) + env.fake_release() + .await + .name("dummy") + .version("0.2.0") + .archive_storage(true) + .create() .await?; - assert!( - env.async_storage() - .await - .get_public_access("rustdoc/dummy/0.2.0.zip") - .await? - ); - Ok(()) - }); + + let web = env.web_app().await; + + web.assert_redirect_cached_unchecked( + "/crate/dummy/latest/download", + "https://static.docs.rs/rustdoc/dummy/0.2.0.zip", + CachePolicy::ForeverInCdn, + env.config(), + ) + .await?; + assert!( + env.async_storage() + .get_public_access("rustdoc/dummy/0.2.0.zip") + .await? + ); + Ok(()) } #[test_case("something.js")] @@ -3176,7 +3178,7 @@ mod test { .create() .await?; - let storage = env.async_storage().await; + let storage = env.async_storage(); storage.store_one("asset.js", *b"content").await?; storage.store_one(path, *b"more_content").await?; @@ -3213,7 +3215,7 @@ mod test { "/clap/2.24.0/i686-pc-windows-gnu/clap/which%20is%20a%20part%20of%20%5B%60Display%60%5D", "/crate/clap/2.24.0/target-redirect/i686-pc-windows-gnu/clap/which%20is%20a%20part%20of%20[%60Display%60]", CachePolicy::ForeverInCdn, - &env.config(), + env.config(), ).await?; Ok(()) @@ -3236,7 +3238,7 @@ mod test { "/clap/latest/clapproc%20macro%20%60Parser%60%20not%20expanded:%20Cannot%20create%20expander%20for", "/clap/latest/clapproc%20macro%20%60Parser%60%20not%20expanded:%20Cannot%20create%20expander%20for/clap/", CachePolicy::ForeverInCdn, - &env.config(), + env.config(), ).await?; Ok(()) @@ -3257,7 +3259,7 @@ mod test { .await?; let web = env.web_app().await; - web.assert_redirect_cached(path, expected, CachePolicy::ForeverInCdn, &env.config()) + web.assert_redirect_cached(path, expected, CachePolicy::ForeverInCdn, env.config()) .await?; Ok(()) @@ -3371,158 +3373,166 @@ mod test { RustdocJsonFormatVersion::Version(42), CompressionAlgorithm::Zstd )] - fn json_download( + #[tokio::test(flavor = "multi_thread")] + async fn json_download( request_path_suffix: &str, redirect_version: &str, redirect_target: &str, redirect_format_version: RustdocJsonFormatVersion, redirect_compression: CompressionAlgorithm, - ) { - async_wrapper(|env| async move { - env.override_config(|config| { - config.s3_static_root_path = "https://static.docs.rs".into(); - }); - env.fake_release() - .await - .name("dummy") - .version("0.1.0") - .archive_storage(true) - .default_target("x86_64-unknown-linux-gnu") - .add_target("i686-pc-windows-msvc") - .create() - .await?; + ) -> Result<()> { + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .s3_static_root_path("https://static.docs.rs") + .build()?, + ) + .await?; - env.fake_release() - .await - .name("dummy") - .version("0.2.0") - .archive_storage(true) - .default_target("x86_64-unknown-linux-gnu") - .add_target("i686-pc-windows-msvc") - .create() - .await?; + env.fake_release() + .await + .name("dummy") + .version("0.1.0") + .archive_storage(true) + .default_target("x86_64-unknown-linux-gnu") + .add_target("i686-pc-windows-msvc") + .create() + .await?; - let web = env.web_app().await; + env.fake_release() + .await + .name("dummy") + .version("0.2.0") + .archive_storage(true) + .default_target("x86_64-unknown-linux-gnu") + .add_target("i686-pc-windows-msvc") + .create() + .await?; - let compression_ext = file_extension_for(redirect_compression); + let web = env.web_app().await; - web.assert_redirect_cached_unchecked( + let compression_ext = file_extension_for(redirect_compression); + + web.assert_redirect_cached_unchecked( &format!("/crate/dummy/{request_path_suffix}"), &format!("https://static.docs.rs/rustdoc-json/dummy/{redirect_version}/{redirect_target}/\ dummy_{redirect_version}_{redirect_target}_{redirect_format_version}.json.{compression_ext}"), CachePolicy::ForeverInCdn, - &env.config(), + env.config(), ) .await?; - Ok(()) - }); + Ok(()) } #[test_case("")] #[test_case(".zst")] - fn test_json_download_fallback_to_old_files_without_compression_extension(ext: &str) { - async_wrapper(|env| async move { - env.override_config(|config| { - config.s3_static_root_path = "https://static.docs.rs".into(); - }); - - const NAME: &str = "dummy"; - const VERSION: &str = "0.1.0"; - const TARGET: &str = "x86_64-unknown-linux-gnu"; - const FORMAT_VERSION: RustdocJsonFormatVersion = RustdocJsonFormatVersion::Latest; - - env.fake_release() - .await - .name(NAME) - .version(VERSION) - .archive_storage(true) - .default_target(TARGET) - .create() - .await?; - - let storage = env.async_storage().await; - - let zstd_blob = storage - .get( - &rustdoc_json_path( - NAME, - VERSION, - TARGET, - FORMAT_VERSION, - Some(CompressionAlgorithm::Zstd), - ), - usize::MAX, - ) - .await?; + #[tokio::test(flavor = "multi_thread")] + async fn test_json_download_fallback_to_old_files_without_compression_extension( + ext: &str, + ) -> Result<()> { + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .s3_static_root_path("https://static.docs.rs") + .build()?, + ) + .await?; + + const NAME: &str = "dummy"; + const VERSION: &str = "0.1.0"; + const TARGET: &str = "x86_64-unknown-linux-gnu"; + const FORMAT_VERSION: RustdocJsonFormatVersion = RustdocJsonFormatVersion::Latest; + + env.fake_release() + .await + .name(NAME) + .version(VERSION) + .archive_storage(true) + .default_target(TARGET) + .create() + .await?; + + let storage = env.async_storage(); + + let zstd_blob = storage + .get( + &rustdoc_json_path( + NAME, + VERSION, + TARGET, + FORMAT_VERSION, + Some(CompressionAlgorithm::Zstd), + ), + usize::MAX, + ) + .await?; - for compression in RUSTDOC_JSON_COMPRESSION_ALGORITHMS { - let path = - rustdoc_json_path(NAME, VERSION, TARGET, FORMAT_VERSION, Some(*compression)); - storage.delete_prefix(&path).await?; - assert!(!storage.exists(&path).await?); - } - storage - .store_one( - &rustdoc_json_path(NAME, VERSION, TARGET, FORMAT_VERSION, None), - zstd_blob.content, - ) - .await?; + for compression in RUSTDOC_JSON_COMPRESSION_ALGORITHMS { + let path = rustdoc_json_path(NAME, VERSION, TARGET, FORMAT_VERSION, Some(*compression)); + storage.delete_prefix(&path).await?; + assert!(!storage.exists(&path).await?); + } + storage + .store_one( + &rustdoc_json_path(NAME, VERSION, TARGET, FORMAT_VERSION, None), + zstd_blob.content, + ) + .await?; - let web = env.web_app().await; + let web = env.web_app().await; - web.assert_redirect_cached_unchecked( - &format!("/crate/dummy/latest/json{ext}"), - &format!( - "https://static.docs.rs/rustdoc-json/{NAME}/{VERSION}/{TARGET}/\ + web.assert_redirect_cached_unchecked( + &format!("/crate/dummy/latest/json{ext}"), + &format!( + "https://static.docs.rs/rustdoc-json/{NAME}/{VERSION}/{TARGET}/\ {NAME}_{VERSION}_{TARGET}_{FORMAT_VERSION}.json" // without .zstd - ), - CachePolicy::ForeverInCdn, - &env.config(), - ) - .await?; - Ok(()) - }); + ), + CachePolicy::ForeverInCdn, + env.config(), + ) + .await?; + Ok(()) } #[test_case("0.1.0/json"; "rustdoc status false")] #[test_case("0.2.0/unknown-target/json"; "unknown target")] #[test_case("0.2.0/json/99"; "target file doesnt exist")] #[test_case("0.42.0/json"; "unknown version")] - fn json_download_not_found(request_path_suffix: &str) { - async_wrapper(|env| async move { - env.override_config(|config| { - config.s3_static_root_path = "https://static.docs.rs".into(); - }); + #[tokio::test(flavor = "multi_thread")] + async fn json_download_not_found(request_path_suffix: &str) -> Result<()> { + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .s3_static_root_path("https://static.docs.rs") + .build()?, + ) + .await?; - env.fake_release() - .await - .name("dummy") - .version("0.1.0") - .archive_storage(true) - .default_target("x86_64-unknown-linux-gnu") - .add_target("i686-pc-windows-msvc") - .binary(true) // binary => rustdoc_status = false - .create() - .await?; + env.fake_release() + .await + .name("dummy") + .version("0.1.0") + .archive_storage(true) + .default_target("x86_64-unknown-linux-gnu") + .add_target("i686-pc-windows-msvc") + .binary(true) // binary => rustdoc_status = false + .create() + .await?; - env.fake_release() - .await - .name("dummy") - .version("0.2.0") - .archive_storage(true) - .default_target("x86_64-unknown-linux-gnu") - .add_target("i686-pc-windows-msvc") - .create() - .await?; + env.fake_release() + .await + .name("dummy") + .version("0.2.0") + .archive_storage(true) + .default_target("x86_64-unknown-linux-gnu") + .add_target("i686-pc-windows-msvc") + .create() + .await?; - let web = env.web_app().await; + let web = env.web_app().await; - let response = web - .get(&format!("/crate/dummy/{request_path_suffix}")) - .await?; + let response = web + .get(&format!("/crate/dummy/{request_path_suffix}")) + .await?; - assert_eq!(response.status(), StatusCode::NOT_FOUND); - Ok(()) - }); + assert_eq!(response.status(), StatusCode::NOT_FOUND); + Ok(()) } } diff --git a/src/web/source.rs b/src/web/source.rs index 730f43725..58fcc074d 100644 --- a/src/web/source.rs +++ b/src/web/source.rs @@ -350,9 +350,10 @@ pub(crate) async fn source_browser_handler( #[cfg(test)] mod tests { use crate::{ - test::{AxumResponseTestExt, AxumRouterTestExt, async_wrapper}, + test::{AxumResponseTestExt, AxumRouterTestExt, TestEnvironment, async_wrapper}, web::{cache::CachePolicy, encode_url_path}, }; + use anyhow::Result; use kuchikiki::traits::TendrilSink; use reqwest::StatusCode; use test_case::test_case; @@ -417,7 +418,7 @@ mod tests { web.assert_success_cached( "/crate/fake/0.1.0/source/", CachePolicy::ForeverInCdnAndStaleInBrowser, - &env.config(), + env.config(), ) .await?; let response = web.get("/crate/fake/0.1.0/source/some_filename.rs").await?; @@ -426,8 +427,7 @@ mod tests { response.headers().get("link").unwrap(), "; rel=\"canonical\"" ); - response - .assert_cache_control(CachePolicy::ForeverInCdnAndStaleInBrowser, &env.config()); + response.assert_cache_control(CachePolicy::ForeverInCdnAndStaleInBrowser, env.config()); assert!(response.text().await?.contains("some_random_content")); Ok(()) }); @@ -462,8 +462,7 @@ mod tests { "application/pdf" ); - response - .assert_cache_control(CachePolicy::ForeverInCdnAndStaleInBrowser, &env.config()); + response.assert_cache_control(CachePolicy::ForeverInCdnAndStaleInBrowser, env.config()); assert!(response.text().await?.contains("some_random_content")); Ok(()) }); @@ -506,7 +505,7 @@ mod tests { let web = env.web_app().await; web.assert_success(path).await?; - let mut conn = env.async_db().await.async_conn().await; + let mut conn = env.async_db().async_conn().await; sqlx::query!( "UPDATE releases SET files = NULL @@ -539,7 +538,7 @@ mod tests { .await .get("/crate/fake/latest/source/") .await?; - resp.assert_cache_control(CachePolicy::ForeverInCdn, &env.config()); + resp.assert_cache_control(CachePolicy::ForeverInCdn, env.config()); let body = resp.text().await?; assert!(body.contains(" Result<()> { + let env = TestEnvironment::with_config( + TestEnvironment::base_config() + .max_file_size(1) + .max_file_size_html(1) + .build()?, + ) + .await?; - let web = env.web_app().await; - let response = web.get("/crate/fake/0.1.0/source/large_file.rs").await?; - assert_eq!(response.status(), StatusCode::OK); - assert!( - response - .text() - .await? - .contains("This file is too large to display") - ); - Ok(()) - }); + env.fake_release() + .await + .name("fake") + .version("0.1.0") + .source_file("large_file.rs", b"some_random_content") + .create() + .await?; + + let web = env.web_app().await; + let response = web.get("/crate/fake/0.1.0/source/large_file.rs").await?; + assert_eq!(response.status(), StatusCode::OK); + assert!( + response + .text() + .await? + .contains("This file is too large to display") + ); + Ok(()) } } diff --git a/src/web/statics.rs b/src/web/statics.rs index 5b02fefac..839c1bf47 100644 --- a/src/web/statics.rs +++ b/src/web/statics.rs @@ -113,7 +113,7 @@ mod tests { let resp = web.get("/-/static/style.css").await?; assert!(resp.status().is_success()); - resp.assert_cache_control(CachePolicy::ForeverInCdnAndBrowser, &env.config()); + resp.assert_cache_control(CachePolicy::ForeverInCdnAndBrowser, env.config()); assert_eq!( resp.headers().get("Content-Type"), Some(&"text/css".parse().unwrap()), @@ -132,7 +132,7 @@ mod tests { let resp = web.get("/-/static/vendored.css").await?; assert!(resp.status().is_success()); - resp.assert_cache_control(CachePolicy::ForeverInCdnAndBrowser, &env.config()); + resp.assert_cache_control(CachePolicy::ForeverInCdnAndBrowser, env.config()); assert_eq!( resp.headers().get("Content-Type"), Some(&"text/css".parse().unwrap()), @@ -172,7 +172,7 @@ mod tests { let resp = web.get(path).await?; assert!(resp.status().is_success()); - resp.assert_cache_control(CachePolicy::ForeverInCdnAndBrowser, &env.config()); + resp.assert_cache_control(CachePolicy::ForeverInCdnAndBrowser, env.config()); assert_eq!( resp.headers().get("Content-Type"), Some(&"text/javascript".parse().unwrap()), @@ -202,7 +202,7 @@ mod tests { let resp = web.get(&url).await?; assert!(resp.status().is_success(), "failed to fetch {url:?}"); - resp.assert_cache_control(CachePolicy::ForeverInCdnAndBrowser, &env.config()); + resp.assert_cache_control(CachePolicy::ForeverInCdnAndBrowser, env.config()); assert_eq!( resp.bytes().await?, fs::read(path).unwrap(), @@ -219,7 +219,7 @@ mod tests { fn static_file_that_doesnt_exist() { async_wrapper(|env| async move { let response = env.web_app().await.get("/-/static/whoop-de-do.png").await?; - response.assert_cache_control(CachePolicy::NoCaching, &env.config()); + response.assert_cache_control(CachePolicy::NoCaching, env.config()); assert_eq!(response.status(), StatusCode::NOT_FOUND); Ok(()) diff --git a/src/web/status.rs b/src/web/status.rs index 563fc1e64..dfc048ea6 100644 --- a/src/web/status.rs +++ b/src/web/status.rs @@ -70,7 +70,7 @@ mod tests { .await .get_and_follow_redirects(&format!("/crate/foo/{version}/status.json")) .await?; - response.assert_cache_control(CachePolicy::NoStoreMustRevalidate, &env.config()); + response.assert_cache_control(CachePolicy::NoStoreMustRevalidate, env.config()); assert_eq!(response.headers()["access-control-allow-origin"], "*"); assert_eq!(response.status(), StatusCode::OK); let value: serde_json::Value = serde_json::from_str(&response.text().await?)?; @@ -101,7 +101,7 @@ mod tests { let redirect = web .assert_redirect("/crate/foo/*/status.json", "/crate/foo/latest/status.json") .await?; - redirect.assert_cache_control(CachePolicy::NoStoreMustRevalidate, &env.config()); + redirect.assert_cache_control(CachePolicy::NoStoreMustRevalidate, env.config()); assert_eq!(redirect.headers()["access-control-allow-origin"], "*"); Ok(()) @@ -126,7 +126,7 @@ mod tests { "/crate/foo/0.1.0/status.json", ) .await?; - redirect.assert_cache_control(CachePolicy::NoStoreMustRevalidate, &env.config()); + redirect.assert_cache_control(CachePolicy::NoStoreMustRevalidate, env.config()); assert_eq!(redirect.headers()["access-control-allow-origin"], "*"); Ok(()) @@ -152,7 +152,7 @@ mod tests { .await .get_and_follow_redirects(&format!("/crate/foo/{version}/status.json")) .await?; - response.assert_cache_control(CachePolicy::NoStoreMustRevalidate, &env.config()); + response.assert_cache_control(CachePolicy::NoStoreMustRevalidate, env.config()); assert_eq!(response.headers()["access-control-allow-origin"], "*"); dbg!(&response); assert_eq!(response.status(), StatusCode::OK); @@ -194,7 +194,7 @@ mod tests { .await .get_and_follow_redirects(&format!("/crate/{krate}/{version}/status.json")) .await?; - response.assert_cache_control(CachePolicy::NoStoreMustRevalidate, &env.config()); + response.assert_cache_control(CachePolicy::NoStoreMustRevalidate, env.config()); assert_eq!(response.headers()["access-control-allow-origin"], "*"); assert_eq!(response.status(), StatusCode::NOT_FOUND); Ok(())