Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion mithril-aggregator/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "mithril-aggregator"
version = "0.6.3"
version = "0.6.4"
description = "A Mithril Aggregator server"
authors = { workspace = true }
edition = { workspace = true }
Expand Down
28 changes: 23 additions & 5 deletions mithril-aggregator/src/artifact_builder/cardano_database.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
use std::path::{Path, PathBuf};
use std::{
path::{Path, PathBuf},
sync::Arc,
};

use anyhow::{anyhow, Context};
use async_trait::async_trait;
Expand All @@ -12,24 +15,28 @@ use mithril_common::{
StdResult,
};

use crate::artifact_builder::ArtifactBuilder;
use crate::artifact_builder::{AncillaryArtifactBuilder, ArtifactBuilder};

pub struct CardanoDatabaseArtifactBuilder {
db_directory: PathBuf, // TODO: temporary, will be accessed through another dependency instead of direct path.
db_directory: PathBuf,
cardano_node_version: Version,
compression_algorithm: CompressionAlgorithm,
#[allow(dead_code)]
ancillary_builder: Arc<AncillaryArtifactBuilder>,
}

impl CardanoDatabaseArtifactBuilder {
pub fn new(
db_directory: PathBuf,
cardano_node_version: &Version,
compression_algorithm: CompressionAlgorithm,
ancillary_builder: Arc<AncillaryArtifactBuilder>,
) -> Self {
Self {
db_directory,
cardano_node_version: cardano_node_version.clone(),
compression_algorithm,
ancillary_builder,
}
}
}
Expand All @@ -55,11 +62,17 @@ impl ArtifactBuilder<CardanoDbBeacon, CardanoDatabaseSnapshot> for CardanoDataba
})?;
let total_db_size_uncompressed = compute_uncompressed_database_size(&self.db_directory)?;

let locations = ArtifactsLocations {
ancillary: vec![],
digest: vec![],
immutables: vec![],
};

let cardano_database = CardanoDatabaseSnapshot::new(
merkle_root.to_string(),
beacon,
total_db_size_uncompressed,
ArtifactsLocations::default(), // TODO: temporary default locations, will be injected in next PR.
locations,
self.compression_algorithm,
&self.cardano_node_version,
);
Expand Down Expand Up @@ -153,6 +166,7 @@ mod tests {
test_dir,
&Version::parse("1.0.0").unwrap(),
CompressionAlgorithm::Zstandard,
Arc::new(AncillaryArtifactBuilder::new(vec![])),
);

let beacon = fake_data::beacon();
Expand All @@ -177,7 +191,11 @@ mod tests {
"merkleroot".to_string(),
beacon,
expected_total_size,
ArtifactsLocations::default(),
ArtifactsLocations {
ancillary: vec![],
digest: vec![],
immutables: vec![],
},
CompressionAlgorithm::Zstandard,
&Version::parse("1.0.0").unwrap(),
);
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
#![allow(dead_code)]
use async_trait::async_trait;
use std::{path::Path, sync::Arc};

use mithril_common::{entities::AncillaryLocation, StdResult};

use crate::{FileUploader, LocalUploader};

/// The [AncillaryFileUploader] trait allows identifying uploaders that return locations for ancillary archive files.
#[cfg_attr(test, mockall::automock)]
#[async_trait]
pub trait AncillaryFileUploader: Send + Sync {
/// Uploads the archive at the given filepath and returns the location of the uploaded file.
async fn upload(&self, filepath: &Path) -> StdResult<AncillaryLocation>;
}

#[async_trait]
impl AncillaryFileUploader for LocalUploader {
async fn upload(&self, filepath: &Path) -> StdResult<AncillaryLocation> {
let uri = FileUploader::upload(self, filepath).await?.into();

Ok(AncillaryLocation::CloudStorage { uri })
}
}

/// The [AncillaryArtifactBuilder] creates an ancillary archive from the cardano database directory (including ledger and volatile directories).
/// The archive is uploaded with the provided uploaders.
pub struct AncillaryArtifactBuilder {
uploaders: Vec<Arc<dyn AncillaryFileUploader>>,
}

impl AncillaryArtifactBuilder {
pub fn new(uploaders: Vec<Arc<dyn AncillaryFileUploader>>) -> Self {
Self { uploaders }
}

pub async fn upload_archive(&self, db_directory: &Path) -> StdResult<Vec<AncillaryLocation>> {
let mut locations = Vec::new();
for uploader in &self.uploaders {
// TODO: Temporary preparation work, `db_directory` is used as the ancillary archive path for now.
let location = uploader.upload(db_directory).await?;
locations.push(location);
}

Ok(locations)
}
}

#[cfg(test)]
mod tests {
use mockall::predicate::eq;

use super::*;

#[tokio::test]
async fn upload_archive_should_return_empty_locations_with_no_uploader() {
let builder = AncillaryArtifactBuilder::new(vec![]);

let locations = builder.upload_archive(Path::new("whatever")).await.unwrap();

assert!(locations.is_empty());
}

#[tokio::test]
async fn upload_archive_should_return_all_uploaders_returned_locations() {
let mut first_uploader = MockAncillaryFileUploader::new();
first_uploader
.expect_upload()
.with(eq(Path::new("archive_path")))
.times(1)
.return_once(|_| {
Ok(AncillaryLocation::CloudStorage {
uri: "an_uri".to_string(),
})
});

let mut second_uploader = MockAncillaryFileUploader::new();
second_uploader
.expect_upload()
.with(eq(Path::new("archive_path")))
.times(1)
.return_once(|_| {
Ok(AncillaryLocation::CloudStorage {
uri: "another_uri".to_string(),
})
});

let uploaders: Vec<Arc<dyn AncillaryFileUploader>> =
vec![Arc::new(first_uploader), Arc::new(second_uploader)];

let builder = AncillaryArtifactBuilder::new(uploaders);

let locations = builder
.upload_archive(Path::new("archive_path"))
.await
.unwrap();

assert_eq!(
locations,
vec![
AncillaryLocation::CloudStorage {
uri: "an_uri".to_string()
},
AncillaryLocation::CloudStorage {
uri: "another_uri".to_string()
}
]
);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
//! The module is responsible for creating and uploading the archives of the Cardano database artifacts.
mod ancillary;

pub use ancillary::*;
2 changes: 2 additions & 0 deletions mithril-aggregator/src/artifact_builder/mod.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
//! The module used for building artifact
mod cardano_database;
mod cardano_database_artifacts;
mod cardano_immutable_files_full;
mod cardano_stake_distribution;
mod cardano_transactions;
mod interface;
mod mithril_stake_distribution;

pub use cardano_database::*;
pub use cardano_database_artifacts::*;
pub use cardano_immutable_files_full::*;
pub use cardano_stake_distribution::*;
pub use cardano_transactions::*;
Expand Down
11 changes: 10 additions & 1 deletion mithril-aggregator/src/configuration.rs
Original file line number Diff line number Diff line change
Expand Up @@ -284,7 +284,7 @@ impl Configuration {
.map_err(|e| anyhow!(ConfigError::Message(e.to_string())))
}

/// Return the file of the SQLite stores. If the directory does not exist, it is created.
/// Return the directory of the SQLite stores. If the directory does not exist, it is created.
pub fn get_sqlite_dir(&self) -> PathBuf {
let store_dir = &self.data_stores_directory;

Expand All @@ -295,6 +295,15 @@ impl Configuration {
self.data_stores_directory.clone()
}

/// Return the snapshots directory.
pub fn get_snapshot_dir(&self) -> StdResult<PathBuf> {
if !&self.snapshot_directory.exists() {
std::fs::create_dir_all(&self.snapshot_directory)?;
}

Ok(self.snapshot_directory.clone())
}

/// Same as the [store retention limit][Configuration::store_retention_limit] but will never
/// yield a value lower than 3.
///
Expand Down
Loading