diff --git a/Cargo.Bazel.json.lock b/Cargo.Bazel.json.lock index 2bec0d397564..bd6f4954b06c 100644 --- a/Cargo.Bazel.json.lock +++ b/Cargo.Bazel.json.lock @@ -1,5 +1,5 @@ { - "checksum": "d154dd0e371170ad8d3c57a751b807c9e2920d800fdd658238acadec4f95a70d", + "checksum": "8cf087fb7348882bf3c64a1534deb497bd708e4a35ca265dce551011d2f13aa6", "crates": { "abnf 0.12.0": { "name": "abnf", @@ -22175,7 +22175,7 @@ "target": "rusb" }, { - "id": "rusqlite 0.28.0", + "id": "rusqlite 0.37.0", "target": "rusqlite" }, { @@ -22435,6 +22435,10 @@ "id": "tokio-metrics 0.4.0", "target": "tokio_metrics" }, + { + "id": "tokio-rusqlite 0.7.0", + "target": "tokio_rusqlite" + }, { "id": "tokio-rustls 0.26.0", "target": "tokio_rustls" @@ -26814,7 +26818,6 @@ ], "crate_features": { "common": [ - "default", "std" ], "selects": {} @@ -26858,6 +26861,13 @@ "compile_data_glob": [ "**" ], + "crate_features": { + "common": [ + "alloc", + "default" + ], + "selects": {} + }, "edition": "2018", "version": "0.3.0" }, @@ -31396,6 +31406,54 @@ ], "license_file": "LICENSE-APACHE" }, + "hashlink 0.10.0": { + "name": "hashlink", + "version": "0.10.0", + "package_url": "https://github.com/kyren/hashlink", + "repository": { + "Http": { + "url": "https://static.crates.io/crates/hashlink/0.10.0/download", + "sha256": "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" + } + }, + "targets": [ + { + "Library": { + "crate_name": "hashlink", + "crate_root": "src/lib.rs", + "srcs": { + "allow_empty": true, + "include": [ + "**/*.rs" + ] + } + } + } + ], + "library_target_name": "hashlink", + "common_attrs": { + "compile_data_glob": [ + "**" + ], + "deps": { + "common": [ + { + "id": "hashbrown 0.15.2", + "target": "hashbrown" + } + ], + "selects": {} + }, + "edition": "2018", + "version": "0.10.0" + }, + "license": "MIT OR Apache-2.0", + "license_ids": [ + "Apache-2.0", + "MIT" + ], + "license_file": "LICENSE-APACHE" + }, "hdrhistogram 7.5.2": { "name": "hdrhistogram", "version": "7.5.2", @@ -45453,14 +45511,14 @@ ], "license_file": null }, - "libsqlite3-sys 0.25.2": { + "libsqlite3-sys 0.35.0": { "name": "libsqlite3-sys", - "version": "0.25.2", + "version": "0.35.0", "package_url": "https://github.com/rusqlite/rusqlite", "repository": { "Http": { - "url": "https://static.crates.io/crates/libsqlite3-sys/0.25.2/download", - "sha256": "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" + "url": "https://static.crates.io/crates/libsqlite3-sys/0.35.0/download", + "sha256": "133c182a6a2c87864fe97778797e46c7e999672690dc9fa3ee8e241aa4a9c13f" } }, "targets": [ @@ -45500,7 +45558,7 @@ "bundled_bindings", "cc", "default", - "min_sqlite_version_3_6_8", + "min_sqlite_version_3_14_0", "pkg-config", "vcpkg" ], @@ -45509,14 +45567,14 @@ "deps": { "common": [ { - "id": "libsqlite3-sys 0.25.2", + "id": "libsqlite3-sys 0.35.0", "target": "build_script_build" } ], "selects": {} }, - "edition": "2018", - "version": "0.25.2" + "edition": "2021", + "version": "0.35.0" }, "build_script_attrs": { "compile_data_glob": [ @@ -68336,14 +68394,14 @@ ], "license_file": "LICENSE" }, - "rusqlite 0.28.0": { + "rusqlite 0.37.0": { "name": "rusqlite", - "version": "0.28.0", + "version": "0.37.0", "package_url": "https://github.com/rusqlite/rusqlite", "repository": { "Http": { - "url": "https://static.crates.io/crates/rusqlite/0.28.0/download", - "sha256": "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" + "url": "https://static.crates.io/crates/rusqlite/0.37.0/download", + "sha256": "165ca6e57b20e1351573e3729b958bc62f0e48025386970b6e4d29e7a7e71f3f" } }, "targets": [ @@ -68375,11 +68433,11 @@ "deps": { "common": [ { - "id": "bitflags 1.3.2", + "id": "bitflags 2.10.0", "target": "bitflags" }, { - "id": "fallible-iterator 0.2.0", + "id": "fallible-iterator 0.3.0", "target": "fallible_iterator" }, { @@ -68387,11 +68445,11 @@ "target": "fallible_streaming_iterator" }, { - "id": "hashlink 0.8.3", + "id": "hashlink 0.10.0", "target": "hashlink" }, { - "id": "libsqlite3-sys 0.25.2", + "id": "libsqlite3-sys 0.35.0", "target": "libsqlite3_sys" }, { @@ -68401,8 +68459,8 @@ ], "selects": {} }, - "edition": "2018", - "version": "0.28.0" + "edition": "2021", + "version": "0.37.0" }, "license": "MIT", "license_ids": [ @@ -82282,6 +82340,67 @@ ], "license_file": "LICENSE" }, + "tokio-rusqlite 0.7.0": { + "name": "tokio-rusqlite", + "version": "0.7.0", + "package_url": "https://github.com/programatik29/tokio-rusqlite", + "repository": { + "Http": { + "url": "https://static.crates.io/crates/tokio-rusqlite/0.7.0/download", + "sha256": "302563ae4a2127f3d2c105f4f2f0bd7cae3609371755600ebc148e0ccd8510d6" + } + }, + "targets": [ + { + "Library": { + "crate_name": "tokio_rusqlite", + "crate_root": "src/lib.rs", + "srcs": { + "allow_empty": true, + "include": [ + "**/*.rs" + ] + } + } + } + ], + "library_target_name": "tokio_rusqlite", + "common_attrs": { + "compile_data_glob": [ + "**" + ], + "crate_features": { + "common": [ + "default" + ], + "selects": {} + }, + "deps": { + "common": [ + { + "id": "crossbeam-channel 0.5.15", + "target": "crossbeam_channel" + }, + { + "id": "rusqlite 0.37.0", + "target": "rusqlite" + }, + { + "id": "tokio 1.48.0", + "target": "tokio" + } + ], + "selects": {} + }, + "edition": "2021", + "version": "0.7.0" + }, + "license": "MIT", + "license_ids": [ + "MIT" + ], + "license_file": "LICENSE" + }, "tokio-rustls 0.24.1": { "name": "tokio-rustls", "version": "0.24.1", @@ -98991,7 +99110,7 @@ "rsa 0.9.6", "rstest 0.19.0", "rusb 0.9.3", - "rusqlite 0.28.0", + "rusqlite 0.37.0", "rust-ini 0.21.2", "rust_decimal 1.36.0", "rust_decimal_macros 1.36.0", @@ -99060,6 +99179,7 @@ "tokio 1.48.0", "tokio-io-timeout 1.2.0", "tokio-metrics 0.4.0", + "tokio-rusqlite 0.7.0", "tokio-rustls 0.26.0", "tokio-serde 0.8.0", "tokio-socks 0.5.2", diff --git a/Cargo.Bazel.toml.lock b/Cargo.Bazel.toml.lock index 96df6a5471d2..adca7c67ce7e 100644 --- a/Cargo.Bazel.toml.lock +++ b/Cargo.Bazel.toml.lock @@ -3683,7 +3683,7 @@ dependencies = [ "goldenfile", "gpt", "group 0.13.0", - "hashlink", + "hashlink 0.8.3", "hex", "hex-literal 0.4.1", "hkdf", @@ -3911,6 +3911,7 @@ dependencies = [ "tokio", "tokio-io-timeout", "tokio-metrics", + "tokio-rusqlite", "tokio-rustls 0.26.0", "tokio-serde", "tokio-socks", @@ -5401,6 +5402,15 @@ dependencies = [ "hashbrown 0.14.5", ] +[[package]] +name = "hashlink" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" +dependencies = [ + "hashbrown 0.15.2", +] + [[package]] name = "hdrhistogram" version = "7.5.2" @@ -7933,9 +7943,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.25.2" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" +checksum = "133c182a6a2c87864fe97778797e46c7e999672690dc9fa3ee8e241aa4a9c13f" dependencies = [ "cc", "pkg-config", @@ -11568,14 +11578,14 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.28.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" +checksum = "165ca6e57b20e1351573e3729b958bc62f0e48025386970b6e4d29e7a7e71f3f" dependencies = [ - "bitflags 1.3.2", - "fallible-iterator 0.2.0", + "bitflags 2.10.0", + "fallible-iterator 0.3.0", "fallible-streaming-iterator", - "hashlink", + "hashlink 0.10.0", "libsqlite3-sys", "smallvec", ] @@ -13812,6 +13822,17 @@ dependencies = [ "tokio-stream", ] +[[package]] +name = "tokio-rusqlite" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "302563ae4a2127f3d2c105f4f2f0bd7cae3609371755600ebc148e0ccd8510d6" +dependencies = [ + "crossbeam-channel", + "rusqlite", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.24.1" diff --git a/Cargo.lock b/Cargo.lock index 43d11bbb1cbe..ca66b9427ec8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5746,6 +5746,15 @@ dependencies = [ "hashbrown 0.14.5", ] +[[package]] +name = "hashlink" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" +dependencies = [ + "hashbrown 0.15.2", +] + [[package]] name = "hdrhistogram" version = "7.5.4" @@ -7025,7 +7034,7 @@ dependencies = [ "criterion", "flate2", "futures", - "hashlink", + "hashlink 0.8.4", "hex", "http 1.3.1", "hyper-util", @@ -10401,6 +10410,7 @@ dependencies = [ "strum_macros 0.26.4", "tempfile", "tokio", + "tokio-rusqlite", "tower-http", "tower-request-id", "tracing", @@ -17492,9 +17502,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.25.2" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" +checksum = "133c182a6a2c87864fe97778797e46c7e999672690dc9fa3ee8e241aa4a9c13f" dependencies = [ "cc", "pkg-config", @@ -21934,14 +21944,14 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.28.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" +checksum = "165ca6e57b20e1351573e3729b958bc62f0e48025386970b6e4d29e7a7e71f3f" dependencies = [ - "bitflags 1.3.2", - "fallible-iterator 0.2.0", + "bitflags 2.10.0", + "fallible-iterator 0.3.0", "fallible-streaming-iterator", - "hashlink", + "hashlink 0.10.0", "libsqlite3-sys", "smallvec", ] @@ -24445,6 +24455,17 @@ dependencies = [ "tokio-stream", ] +[[package]] +name = "tokio-rusqlite" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "302563ae4a2127f3d2c105f4f2f0bd7cae3609371755600ebc148e0ccd8510d6" +dependencies = [ + "crossbeam-channel", + "rusqlite", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.23.4" diff --git a/bazel/rust.MODULE.bazel b/bazel/rust.MODULE.bazel index ac149ead4103..eba44eb9a350 100644 --- a/bazel/rust.MODULE.bazel +++ b/bazel/rust.MODULE.bazel @@ -1324,7 +1324,7 @@ crate.spec( crate.spec( features = ["bundled"], package = "rusqlite", - version = "^0.28.0", + version = "^0.37.0", ) crate.spec( package = "rust_decimal", @@ -1687,6 +1687,10 @@ crate.spec( package = "tokio-test", version = "^0.4.4", ) +crate.spec( + package = "tokio-rusqlite", + version = "^0.7", +) crate.spec( package = "tokio-tungstenite", version = "^0.26.0", diff --git a/rs/rosetta-api/icp/Cargo.toml b/rs/rosetta-api/icp/Cargo.toml index 6c8994296e6a..9aaddc6bc15c 100644 --- a/rs/rosetta-api/icp/Cargo.toml +++ b/rs/rosetta-api/icp/Cargo.toml @@ -42,7 +42,7 @@ registry-canister = { path = "../../registry/canister" } reqwest = { workspace = true } rolling-file = { workspace = true } rosetta-core = { path = "../common/rosetta_core" } -rusqlite = { version = "~0.28.0", features = ["bundled"] } +rusqlite = { version = "0.37", features = ["bundled"] } serde = { workspace = true } serde_bytes = { workspace = true } serde_cbor = { workspace = true } diff --git a/rs/rosetta-api/icp/ledger_canister_blocks_synchronizer/Cargo.toml b/rs/rosetta-api/icp/ledger_canister_blocks_synchronizer/Cargo.toml index 07385bdf74df..85d06a984d4f 100644 --- a/rs/rosetta-api/icp/ledger_canister_blocks_synchronizer/Cargo.toml +++ b/rs/rosetta-api/icp/ledger_canister_blocks_synchronizer/Cargo.toml @@ -22,7 +22,7 @@ icp-ledger = { path = "../../../ledger_suite/icp" } on_wire = { path = "../../../rust_canisters/on_wire" } reqwest = { workspace = true } rosetta-core = { path = "../../common/rosetta_core" } -rusqlite = { version = "~0.28.0", features = ["bundled"] } +rusqlite = { version = "0.37", features = ["bundled"] } serde = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } diff --git a/rs/rosetta-api/icrc1/BUILD.bazel b/rs/rosetta-api/icrc1/BUILD.bazel index ea7825223d9e..35a59ead01af 100644 --- a/rs/rosetta-api/icrc1/BUILD.bazel +++ b/rs/rosetta-api/icrc1/BUILD.bazel @@ -49,6 +49,7 @@ DEPENDENCIES = [ "@crate_index//:strum", "@crate_index//:tempfile", "@crate_index//:tokio", + "@crate_index//:tokio-rusqlite", "@crate_index//:tower-http", "@crate_index//:tower-request-id", "@crate_index//:tracing", diff --git a/rs/rosetta-api/icrc1/Cargo.toml b/rs/rosetta-api/icrc1/Cargo.toml index c568250cad25..f7ad2efc75bf 100644 --- a/rs/rosetta-api/icrc1/Cargo.toml +++ b/rs/rosetta-api/icrc1/Cargo.toml @@ -41,7 +41,8 @@ rand = { workspace = true } reqwest = { workspace = true } rolling-file = "0.2.0" rosetta-core = { path = "../common/rosetta_core" } -rusqlite = { version = "~0.28.0", features = ["bundled"] } +rusqlite = { version = "0.37", features = ["bundled"] } +tokio-rusqlite = "0.7" serde = { workspace = true } serde_bytes = { workspace = true } serde_cbor = { workspace = true } diff --git a/rs/rosetta-api/icrc1/src/common/storage/error.rs b/rs/rosetta-api/icrc1/src/common/storage/error.rs new file mode 100644 index 000000000000..645e255e38c9 --- /dev/null +++ b/rs/rosetta-api/icrc1/src/common/storage/error.rs @@ -0,0 +1,54 @@ +//! Error types for storage operations. + +use std::fmt; + +/// Errors that can occur during storage operations. +#[derive(Debug)] +pub enum StorageError { + /// A rusqlite database operation failed. + Rusqlite(rusqlite::Error), + /// Multiple records were found when at most one was expected. + MultipleRecordsFound(String), + /// A data integrity constraint was violated. + DataIntegrity(String), + /// An internal error occurred (wraps anyhow::Error for complex operations). + Internal(anyhow::Error), +} + +impl fmt::Display for StorageError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + StorageError::Rusqlite(e) => write!(f, "Database error: {}", e), + StorageError::MultipleRecordsFound(msg) => { + write!(f, "Multiple records found: {}", msg) + } + StorageError::DataIntegrity(msg) => write!(f, "Data integrity error: {}", msg), + StorageError::Internal(e) => write!(f, "{}", e), + } + } +} + +impl std::error::Error for StorageError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + StorageError::Rusqlite(e) => Some(e), + StorageError::Internal(e) => e.source(), + _ => None, + } + } +} + +impl From for StorageError { + fn from(e: rusqlite::Error) -> Self { + StorageError::Rusqlite(e) + } +} + +impl From for StorageError { + fn from(e: anyhow::Error) -> Self { + StorageError::Internal(e) + } +} + +/// Result type alias for storage operations. +pub type Result = std::result::Result; diff --git a/rs/rosetta-api/icrc1/src/common/storage/mod.rs b/rs/rosetta-api/icrc1/src/common/storage/mod.rs index 12ba2cd9c256..f73122dcf642 100644 --- a/rs/rosetta-api/icrc1/src/common/storage/mod.rs +++ b/rs/rosetta-api/icrc1/src/common/storage/mod.rs @@ -1,3 +1,4 @@ +pub mod error; pub mod schema; pub mod storage_client; pub mod storage_operations; diff --git a/rs/rosetta-api/icrc1/src/common/storage/schema.rs b/rs/rosetta-api/icrc1/src/common/storage/schema.rs index 9a82fb6181e6..b85a371ffdb3 100644 --- a/rs/rosetta-api/icrc1/src/common/storage/schema.rs +++ b/rs/rosetta-api/icrc1/src/common/storage/schema.rs @@ -1,17 +1,17 @@ use crate::common::storage::{ + error::{Result, StorageError}, storage_operations::{ METADATA_SCHEMA_VERSION, get_rosetta_metadata, initialize_counter_if_missing, }, types::RosettaCounter, }; -use anyhow::bail; use rusqlite::{Connection, params}; pub const SCHEMA_VERSION: u64 = 1; /// Creates all the necessary tables for the ICRC1 Rosetta storage system. /// This function is used by both production code and tests to ensure consistency. -pub fn create_tables(connection: &Connection) -> anyhow::Result<()> { +pub fn create_tables(connection: &Connection) -> Result<()> { // Metadata table connection.execute( r#" @@ -76,12 +76,7 @@ pub fn create_tables(connection: &Connection) -> anyhow::Result<()> { )?; // Initialize counters using the new counter management system - initialize_counter_if_missing(connection, &RosettaCounter::SyncedBlocks).map_err(|e| { - rusqlite::Error::SqliteFailure( - rusqlite::ffi::Error::new(rusqlite::ffi::SQLITE_ABORT), - Some(format!("Failed to initialize SyncedBlocks counter: {e}")), - ) - })?; + initialize_counter_if_missing(connection, &RosettaCounter::SyncedBlocks)?; // The trigger increments the counter of `SyncedBlocks` by 1 whenever a new block is // inserted into the blocks table. For transactions that call `INSERT OR IGNORE` and try to @@ -111,14 +106,16 @@ pub fn create_tables(connection: &Connection) -> anyhow::Result<()> { )?; let stored_schema_version = match get_rosetta_metadata(connection, METADATA_SCHEMA_VERSION)? { - Some(value) => u64::from_le_bytes(value.as_slice().try_into()?), + Some(value) => u64::from_le_bytes(value.as_slice().try_into().map_err(|_| { + StorageError::DataIntegrity("Invalid schema version bytes".to_string()) + })?), None => 0, }; if stored_schema_version > SCHEMA_VERSION { - bail!(format!( + return Err(StorageError::DataIntegrity(format!( "Selected database has schema version {stored_schema_version} which is incompatible with current schema version {SCHEMA_VERSION}." - )); + ))); } if stored_schema_version != SCHEMA_VERSION { @@ -132,7 +129,7 @@ pub fn create_tables(connection: &Connection) -> anyhow::Result<()> { } /// Creates all the necessary indexes for optimal query performance. -pub fn create_indexes(connection: &Connection) -> anyhow::Result<()> { +pub fn create_indexes(connection: &Connection) -> Result<()> { connection.execute( r#" CREATE INDEX IF NOT EXISTS block_idx_account_balances diff --git a/rs/rosetta-api/icrc1/src/common/storage/storage_client.rs b/rs/rosetta-api/icrc1/src/common/storage/storage_client.rs index 016e0e432f64..5a4e85611b69 100644 --- a/rs/rosetta-api/icrc1/src/common/storage/storage_client.rs +++ b/rs/rosetta-api/icrc1/src/common/storage/storage_client.rs @@ -1,14 +1,15 @@ +use super::error::StorageError; use super::storage_operations; use crate::common::storage::types::{MetadataEntry, RosettaBlock}; -use anyhow::{Result, bail}; +use anyhow::bail; use candid::Nat; use ic_base_types::CanisterId; use icrc_ledger_types::icrc1::account::Account; use rosetta_core::metrics::RosettaMetrics; -use rusqlite::{Connection, OpenFlags}; use serde_bytes::ByteBuf; use std::cmp::Ordering; -use std::{path::Path, sync::Mutex}; +use std::path::Path; +use tokio_rusqlite::Connection; use tracing::warn; const BALANCE_SYNC_BATCH_SIZE_DEFAULT: u64 = 100_000; @@ -55,7 +56,7 @@ impl TokenInfo { #[derive(Debug)] pub struct StorageClient { - storage_connection: Mutex, + storage_connection: Connection, token_info: Option, flush_cache_and_shrink_memory: bool, balance_sync_batch_size: u64, @@ -63,24 +64,25 @@ pub struct StorageClient { impl StorageClient { /// Constructs a new SQLite in-persistent store. - pub fn new_persistent(db_file_path: &Path) -> anyhow::Result { + pub async fn new_persistent(db_file_path: &Path) -> anyhow::Result { Self::new_persistent_with_cache_and_batch_size( db_file_path, None, false, Some(BALANCE_SYNC_BATCH_SIZE_DEFAULT), ) + .await } /// Constructs a new SQLite in-persistent store with custom cache size and batch size. - pub fn new_persistent_with_cache_and_batch_size( + pub async fn new_persistent_with_cache_and_batch_size( db_file_path: &Path, cache_size_kb: Option, flush_cache_shrink_mem: bool, balance_sync_batch_size: Option, ) -> anyhow::Result { std::fs::create_dir_all(db_file_path.parent().unwrap())?; - let connection = rusqlite::Connection::open(db_file_path)?; + let connection = Connection::open(db_file_path).await?; let batch_size = balance_sync_batch_size.unwrap_or(BALANCE_SYNC_BATCH_SIZE_DEFAULT); Self::new( connection, @@ -88,21 +90,21 @@ impl StorageClient { flush_cache_shrink_mem, batch_size, ) + .await } /// Constructs a new SQLite in-memory store. - pub fn new_in_memory() -> anyhow::Result { - let connection = rusqlite::Connection::open_in_memory()?; - Self::new(connection, None, false, BALANCE_SYNC_BATCH_SIZE_DEFAULT) + pub async fn new_in_memory() -> anyhow::Result { + let connection = Connection::open_in_memory().await?; + Self::new(connection, None, false, BALANCE_SYNC_BATCH_SIZE_DEFAULT).await } - /// Constructs a new SQLite in-memory store with a named DB that can be shared across instances. - pub fn new_named_in_memory(name: &str) -> anyhow::Result { - let connection = Connection::open_with_flags( - format!("'file:{name}?mode=memory&cache=shared', uri=True"), - OpenFlags::default(), - )?; - Self::new(connection, None, false, BALANCE_SYNC_BATCH_SIZE_DEFAULT) + /// Constructs a new SQLite in-memory store with a name for shared access. + /// This allows multiple connections to access the same in-memory database. + pub async fn new_named_in_memory(name: &str) -> anyhow::Result { + let uri = format!("file:{}?mode=memory&cache=shared", name); + let connection = Connection::open(&uri).await?; + Self::new(connection, None, false, BALANCE_SYNC_BATCH_SIZE_DEFAULT).await } pub fn get_token_display_name(&self) -> String { @@ -121,53 +123,61 @@ impl StorageClient { } } - fn new( - connection: rusqlite::Connection, + async fn new( + connection: Connection, cache_size_kb: Option, flush_cache_and_shrink_memory: bool, balance_sync_batch_size: u64, ) -> anyhow::Result { + connection + .call(move |conn| { + conn.pragma_update(None, "foreign_keys", 1)?; + + match cache_size_kb { + None => { + tracing::info!("No cache size configured"); + } + Some(cache_kb) => { + let cache_size = -cache_kb; // Negative to specify KB + conn.pragma_update(None, "cache_size", cache_size)?; + tracing::info!("SQLite cache_size set to {} KB", cache_kb); + } + } + + match flush_cache_and_shrink_memory { + true => { + tracing::info!( + "Flushing cache and shrinking memory after updating balances." + ) + } + false => { + tracing::info!( + "Not flushing cache and shrinking memory after updating balances." + ) + } + } + + tracing::info!("Using balance sync batch size {}", balance_sync_batch_size); + + // Create tables + super::schema::create_tables(conn)?; + + Ok::<_, StorageError>(()) + }) + .await?; + let storage_client = Self { - storage_connection: Mutex::new(connection), + storage_connection: connection, token_info: None, flush_cache_and_shrink_memory, balance_sync_batch_size, }; - let conn = storage_client.storage_connection.lock().unwrap(); - - conn.pragma_update(None, "foreign_keys", 1)?; - - match cache_size_kb { - None => { - tracing::info!("No cache size configured"); - } - Some(cache_kb) => { - let cache_size = -cache_kb; // Negative to specify KB - conn.pragma_update(None, "cache_size", cache_size)?; - tracing::info!("SQLite cache_size set to {} KB", cache_kb); - } - } - - match flush_cache_and_shrink_memory { - true => { - tracing::info!("Flushing cache and shrinking memory after updating balances.") - } - false => { - tracing::info!("Not flushing cache and shrinking memory after updating balances.") - } - } - - tracing::info!("Using balance sync batch size {}", balance_sync_batch_size); - - drop(conn); - - storage_client.create_tables()?; // Run the fee collector balances repair if needed tracing::info!( "Storage initialization: Checking if fee collector balance repair is needed" ); - storage_client.repair_fee_collector_balances()?; + storage_client.repair_fee_collector_balances().await?; Ok(storage_client) } @@ -176,12 +186,12 @@ impl StorageClient { self.token_info = Some(token_info); } - pub fn does_blockchain_have_gaps(&self) -> anyhow::Result { - let Some(highest_block_idx) = self.get_highest_block_idx()? else { + pub async fn does_blockchain_have_gaps(&self) -> anyhow::Result { + let Some(highest_block_idx) = self.get_highest_block_idx().await? else { // If the blockchain is empty, there are no gaps. return Ok(false); }; - let block_count = self.get_block_count()?; + let block_count = self.get_block_count().await?; match block_count.cmp(&highest_block_idx.saturating_add(1)) { Ordering::Equal => Ok(false), Ordering::Less => { @@ -203,180 +213,240 @@ impl StorageClient { } // Gets a block with a certain index. Returns `None` if no block exists in the database with that index. Returns an error if multiple blocks with that index exist. - pub fn get_block_at_idx(&self, block_idx: u64) -> anyhow::Result> { - let open_connection = self.storage_connection.lock().unwrap(); - storage_operations::get_block_at_idx(&open_connection, block_idx) + pub async fn get_block_at_idx(&self, block_idx: u64) -> anyhow::Result> { + Ok(self + .storage_connection + .call(move |conn| storage_operations::get_block_at_idx(conn, block_idx)) + .await?) } // Gets a block with a certain hash. Returns `None` if no block exists in the database with that hash. Returns an error if multiple blocks with that hash exist. - pub fn get_block_by_hash(&self, hash: ByteBuf) -> anyhow::Result> { - let open_connection = self.storage_connection.lock().unwrap(); - storage_operations::get_block_by_hash(&open_connection, hash) + pub async fn get_block_by_hash(&self, hash: ByteBuf) -> anyhow::Result> { + Ok(self + .storage_connection + .call(move |conn| storage_operations::get_block_by_hash(conn, hash)) + .await?) } // Gets the block with the highest block index. Returns `None` if no block exists in the database. - pub fn get_block_with_highest_block_idx(&self) -> anyhow::Result> { - let open_connection = self.storage_connection.lock().unwrap(); - storage_operations::get_block_with_highest_block_idx(&open_connection) + pub async fn get_block_with_highest_block_idx(&self) -> anyhow::Result> { + Ok(self + .storage_connection + .call(move |conn| storage_operations::get_block_with_highest_block_idx(conn)) + .await?) } // Gets the block with the lowest block index. Returns `None` if no block exists in the database. - pub fn get_block_with_lowest_block_idx(&self) -> anyhow::Result> { - let open_connection = self.storage_connection.lock().unwrap(); - storage_operations::get_block_with_lowest_block_idx(&open_connection) + pub async fn get_block_with_lowest_block_idx(&self) -> anyhow::Result> { + Ok(self + .storage_connection + .call(move |conn| storage_operations::get_block_with_lowest_block_idx(conn)) + .await?) } // Returns a range of blocks including the start index and the end index. // Returns an empty vector if the start index is outside of the range of the database. // Returns a subsect of the blocks range [start_index,end_index] if the end_index is outside of the range of the database. - pub fn get_blocks_by_index_range( + pub async fn get_blocks_by_index_range( &self, start_index: u64, end_index: u64, ) -> anyhow::Result> { - let open_connection = self.storage_connection.lock().unwrap(); - storage_operations::get_blocks_by_index_range(&open_connection, start_index, end_index) + Ok(self + .storage_connection + .call(move |conn| { + storage_operations::get_blocks_by_index_range(conn, start_index, end_index) + }) + .await?) } /// Returns all the gaps in the stored blockchain. /// Gaps are defined as a range of blocks with indices [a+1,b-1] where the Blocks Block(a) and Block(b) exist in the database but the blocks with indices in the range (a,b) do not. /// Exp.: If there exists exactly one gap between the indices [a+1,b-1], then this function will return a vector with a single entry that contains the tuple of blocks [(Block(a),Block(b))]. - pub fn get_blockchain_gaps(&self) -> anyhow::Result> { - let open_connection = self.storage_connection.lock().unwrap(); - storage_operations::get_blockchain_gaps(&open_connection) + pub async fn get_blockchain_gaps(&self) -> anyhow::Result> { + Ok(self + .storage_connection + .call(move |conn| storage_operations::get_blockchain_gaps(conn)) + .await?) } - pub fn get_highest_block_idx(&self) -> Result> { - let open_connection = self.storage_connection.lock().unwrap(); - storage_operations::get_highest_block_idx_in_blocks_table(&open_connection) + pub async fn get_highest_block_idx(&self) -> anyhow::Result> { + Ok(self + .storage_connection + .call(move |conn| storage_operations::get_highest_block_idx_in_blocks_table(conn)) + .await?) } // Gets a transaction with a certain hash. Returns [] if no transaction exists in the database with that hash. Returns a vector with multiple entries if more than one transaction // with the given transaction hash exists - pub fn get_transactions_by_hash( + pub async fn get_transactions_by_hash( &self, hash: ByteBuf, ) -> anyhow::Result> { Ok(self - .get_blocks_by_transaction_hash(hash)? + .get_blocks_by_transaction_hash(hash) + .await? .into_iter() .map(|block| block.get_transaction()) .collect::>()) } - pub fn get_blocks_by_custom_query

( + pub async fn get_blocks_by_custom_query( &self, sql_query: String, - params: P, - ) -> anyhow::Result> - where - P: rusqlite::Params, - { - let open_connection = self.storage_connection.lock().unwrap(); - storage_operations::get_blocks_by_custom_query(&open_connection, sql_query, params) - } - - pub fn get_blocks_by_transaction_hash( + params: Vec<(String, rusqlite::types::Value)>, + ) -> anyhow::Result> { + Ok(self + .storage_connection + .call(move |conn| { + // Convert Vec<(String, Value)> to the format rusqlite expects + let params_refs: Vec<(&str, &dyn rusqlite::ToSql)> = params + .iter() + .map(|(k, v)| (k.as_str(), v as &dyn rusqlite::ToSql)) + .collect(); + storage_operations::get_blocks_by_custom_query( + conn, + sql_query, + params_refs.as_slice(), + ) + }) + .await?) + } + + pub async fn get_blocks_by_transaction_hash( &self, hash: ByteBuf, ) -> anyhow::Result> { - let open_connection = self.storage_connection.lock().unwrap(); - storage_operations::get_blocks_by_transaction_hash(&open_connection, hash) + Ok(self + .storage_connection + .call(move |conn| storage_operations::get_blocks_by_transaction_hash(conn, hash)) + .await?) } // Gets a transaction with a certain index. Returns None if no transaction exists in the database with that index. Returns an error if multiple transactions with that index exist. - pub fn get_transaction_at_idx( + pub async fn get_transaction_at_idx( &self, block_idx: u64, ) -> anyhow::Result> { Ok(self - .get_block_at_idx(block_idx)? + .get_block_at_idx(block_idx) + .await? .map(|block| block.get_transaction())) } - pub fn read_metadata(&self) -> anyhow::Result> { - let open_connection = self.storage_connection.lock().unwrap(); - storage_operations::get_metadata(&open_connection) - } - - pub fn write_metadata(&self, metadata: Vec) -> anyhow::Result<()> { - let mut open_connection = self.storage_connection.lock().unwrap(); - storage_operations::store_metadata(&mut open_connection, metadata) + pub async fn read_metadata(&self) -> anyhow::Result> { + Ok(self + .storage_connection + .call(move |conn| storage_operations::get_metadata(conn)) + .await?) } - pub fn reset_blocks_counter(&self) -> Result<()> { - let open_connection = self.storage_connection.lock().unwrap(); - storage_operations::reset_blocks_counter(&open_connection) + pub async fn write_metadata(&self, metadata: Vec) -> anyhow::Result<()> { + Ok(self + .storage_connection + .call(move |conn| storage_operations::store_metadata(conn, metadata)) + .await?) } - fn create_tables(&self) -> anyhow::Result<()> { - let open_connection = self.storage_connection.lock().unwrap(); - super::schema::create_tables(&open_connection) + pub async fn reset_blocks_counter(&self) -> anyhow::Result<()> { + Ok(self + .storage_connection + .call(move |conn| storage_operations::reset_blocks_counter(conn)) + .await?) } // Populates the blocks and transactions table by the Rosettablocks provided // This function does NOT populate the account_balance table. - pub fn store_blocks(&self, blocks: Vec) -> anyhow::Result<()> { - let mut open_connection = self.storage_connection.lock().unwrap(); - storage_operations::store_blocks(&mut open_connection, blocks) + pub async fn store_blocks(&self, blocks: Vec) -> anyhow::Result<()> { + Ok(self + .storage_connection + .call(move |conn| storage_operations::store_blocks(conn, blocks)) + .await?) } // Extracts the information from the transaction and blocks table and fills the account balance table with that information // Throws an error if there are gaps in the transaction or blocks table. - pub fn update_account_balances(&self) -> anyhow::Result<()> { - if self.does_blockchain_have_gaps()? { + pub async fn update_account_balances(&self) -> anyhow::Result<()> { + if self.does_blockchain_have_gaps().await? { bail!("Tried to update account balances but there exist gaps in the database.",); } - let mut open_connection = self.storage_connection.lock().unwrap(); - storage_operations::update_account_balances( - &mut open_connection, - self.flush_cache_and_shrink_memory, - self.balance_sync_batch_size, - ) + let flush_cache_and_shrink_memory = self.flush_cache_and_shrink_memory; + let balance_sync_batch_size = self.balance_sync_batch_size; + Ok(self + .storage_connection + .call(move |conn| { + storage_operations::update_account_balances( + conn, + flush_cache_and_shrink_memory, + balance_sync_batch_size, + ) + }) + .await?) } /// Retrieves the highest block index in the account balance table. /// Returns None if the account balance table is empty. - pub fn get_highest_block_idx_in_account_balance_table(&self) -> Result> { - let open_connection = self.storage_connection.lock().unwrap(); - storage_operations::get_highest_block_idx_in_account_balance_table(&open_connection) + pub async fn get_highest_block_idx_in_account_balance_table( + &self, + ) -> anyhow::Result> { + Ok(self + .storage_connection + .call(move |conn| { + storage_operations::get_highest_block_idx_in_account_balance_table(conn) + }) + .await?) } // Retrieves the account balance at a certain block height // Returns None if the account does not exist in the database - pub fn get_account_balance_at_block_idx( + pub async fn get_account_balance_at_block_idx( &self, account: &Account, block_idx: u64, ) -> anyhow::Result> { - let open_connection = self.storage_connection.lock().unwrap(); - storage_operations::get_account_balance_at_block_idx(&open_connection, account, block_idx) + let account = *account; + Ok(self + .storage_connection + .call(move |conn| { + storage_operations::get_account_balance_at_block_idx(conn, &account, block_idx) + }) + .await?) } // Retrieves the account balance at the heighest block height in the database // Returns None if the account does not exist in the database - pub fn get_account_balance(&self, account: &Account) -> anyhow::Result> { - let open_connection = self.storage_connection.lock().unwrap(); - storage_operations::get_account_balance_at_highest_block_idx(&open_connection, account) + pub async fn get_account_balance(&self, account: &Account) -> anyhow::Result> { + let account = *account; + Ok(self + .storage_connection + .call(move |conn| { + storage_operations::get_account_balance_at_highest_block_idx(conn, &account) + }) + .await?) } // Retrieves the aggregated balance of all subaccounts for a given principal at a specific block height - pub fn get_aggregated_balance_for_principal_at_block_idx( + pub async fn get_aggregated_balance_for_principal_at_block_idx( &self, principal: &ic_base_types::PrincipalId, block_idx: u64, ) -> anyhow::Result { - let open_connection = self.storage_connection.lock().unwrap(); - storage_operations::get_aggregated_balance_for_principal_at_block_idx( - &open_connection, - principal, - block_idx, - ) + let principal = *principal; + Ok(self + .storage_connection + .call(move |conn| { + storage_operations::get_aggregated_balance_for_principal_at_block_idx( + conn, &principal, block_idx, + ) + }) + .await?) } - pub fn get_block_count(&self) -> anyhow::Result { - let open_connection = self.storage_connection.lock().unwrap(); - storage_operations::get_block_count(&open_connection) + pub async fn get_block_count(&self) -> anyhow::Result { + Ok(self + .storage_connection + .call(move |conn| storage_operations::get_block_count(conn)) + .await?) } /// Repairs account balances for databases created before the fee collector block index fix. @@ -388,12 +458,14 @@ impl StorageClient { /// # Returns /// /// Returns `Ok(())` if the repair was successful, or an error if the repair failed. - pub fn repair_fee_collector_balances(&self) -> anyhow::Result<()> { - let mut open_connection = self.storage_connection.lock().unwrap(); - storage_operations::repair_fee_collector_balances( - &mut open_connection, - self.balance_sync_batch_size, - ) + pub async fn repair_fee_collector_balances(&self) -> anyhow::Result<()> { + let balance_sync_batch_size = self.balance_sync_batch_size; + Ok(self + .storage_connection + .call(move |conn| { + storage_operations::repair_fee_collector_balances(conn, balance_sync_batch_size) + }) + .await?) } } @@ -422,18 +494,23 @@ mod tests { #[test] fn smoke_test() { - let storage_client_memory = StorageClient::new_in_memory(); - assert!(storage_client_memory.is_ok()); - let tmpdir = create_tmp_dir(); - let file_path = tmpdir.path().join("db.sqlite"); - let storage_client_persistent = StorageClient::new_persistent(&file_path); - assert!(storage_client_persistent.is_ok()); + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let storage_client_memory = StorageClient::new_in_memory().await; + assert!(storage_client_memory.is_ok()); + let tmpdir = create_tmp_dir(); + let file_path = tmpdir.path().join("db.sqlite"); + let storage_client_persistent = StorageClient::new_persistent(&file_path).await; + assert!(storage_client_persistent.is_ok()); + }); } proptest! { #[test] fn test_read_and_write_blocks_u64(blockchain in prop::collection::vec(blocks_strategy::(arb_amount()),0..5)){ - let storage_client_memory = StorageClient::new_in_memory().unwrap(); + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let storage_client_memory = StorageClient::new_in_memory().await.unwrap(); let mut rosetta_blocks = vec![]; for (index,block) in blockchain.into_iter().enumerate(){ // Make sure rosetta blocks store the correct transactions @@ -449,18 +526,21 @@ mod tests { rosetta_blocks.push(rosetta_block) } - storage_client_memory.store_blocks(rosetta_blocks.clone()).unwrap(); + storage_client_memory.store_blocks(rosetta_blocks.clone()).await.unwrap(); for rosetta_block in rosetta_blocks.into_iter(){ - let block_read = storage_client_memory.get_block_at_idx(rosetta_block.clone().index).unwrap().unwrap(); + let block_read = storage_client_memory.get_block_at_idx(rosetta_block.clone().index).await.unwrap().unwrap(); assert_eq!(block_read,rosetta_block); - let block_read = storage_client_memory.get_block_by_hash(rosetta_block.clone().get_block_hash()).unwrap().unwrap(); + let block_read = storage_client_memory.get_block_by_hash(rosetta_block.clone().get_block_hash()).await.unwrap().unwrap(); assert_eq!(block_read,rosetta_block); } + }) } #[test] fn test_read_and_write_blocks_u256(blockchain in prop::collection::vec(blocks_strategy::(arb_amount()),0..5)){ - let storage_client_memory = StorageClient::new_in_memory().unwrap(); + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let storage_client_memory = StorageClient::new_in_memory().await.unwrap(); let mut rosetta_blocks = vec![]; for (index,block) in blockchain.into_iter().enumerate(){ // Make sure rosetta blocks store the correct transactions @@ -475,27 +555,30 @@ mod tests { // Make sure the encoding and decoding works rosetta_blocks.push(rosetta_block) } - storage_client_memory.store_blocks(rosetta_blocks.clone()).unwrap(); + storage_client_memory.store_blocks(rosetta_blocks.clone()).await.unwrap(); for rosetta_block in rosetta_blocks.into_iter(){ - let block_read = storage_client_memory.get_block_at_idx(rosetta_block.clone().index).unwrap().unwrap(); + let block_read = storage_client_memory.get_block_at_idx(rosetta_block.clone().index).await.unwrap().unwrap(); assert_eq!(block_read,rosetta_block); - let block_read = storage_client_memory.get_block_by_hash(rosetta_block.clone().get_block_hash()).unwrap().unwrap(); + let block_read = storage_client_memory.get_block_by_hash(rosetta_block.clone().get_block_hash()).await.unwrap().unwrap(); assert_eq!(block_read,rosetta_block); } + }) } #[test] fn test_read_and_write_transactions(blockchain in valid_blockchain_with_gaps_strategy::(1000)){ - let storage_client_memory = StorageClient::new_in_memory().unwrap(); + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let storage_client_memory = StorageClient::new_in_memory().await.unwrap(); let rosetta_blocks: Vec<_> = blockchain.0.iter().zip(blockchain.1.iter()) .map(|(block, index)| RosettaBlock::from_generic_block(encoded_block_to_generic_block(&block.clone().encode()), *index as u64).unwrap()) .collect(); - storage_client_memory.store_blocks(rosetta_blocks.clone()).unwrap(); + storage_client_memory.store_blocks(rosetta_blocks.clone()).await.unwrap(); for block in rosetta_blocks.clone(){ let tx0 = block.get_transaction(); - let tx1 = storage_client_memory.get_block_at_idx(block.index).unwrap().unwrap().get_transaction(); - let tx2 = storage_client_memory.get_transaction_at_idx(block.index).unwrap().unwrap(); - let tx3 = &storage_client_memory.get_transactions_by_hash(block.clone().get_transaction_hash()).unwrap().clone()[0]; + let tx1 = storage_client_memory.get_block_at_idx(block.index).await.unwrap().unwrap().get_transaction(); + let tx2 = storage_client_memory.get_transaction_at_idx(block.index).await.unwrap().unwrap(); + let tx3 = &storage_client_memory.get_transactions_by_hash(block.clone().get_transaction_hash()).await.unwrap().clone()[0]; assert_eq!(tx0,tx1); assert_eq!(tx1,tx2); assert_eq!(tx2,*tx3); @@ -504,50 +587,56 @@ mod tests { if !rosetta_blocks.is_empty() { let last_block = &rosetta_blocks[rosetta_blocks.len().saturating_sub(1)]; // If the index is out of range the function should return `None`. - assert!(storage_client_memory.get_transaction_at_idx(last_block.index+1).unwrap().is_none()); + assert!(storage_client_memory.get_transaction_at_idx(last_block.index+1).await.unwrap().is_none()); // Duplicate the last transaction generated let duplicate_tx_block = RosettaBlock::from_generic_block(last_block.get_generic_block(), last_block.index + 1).unwrap(); - storage_client_memory.store_blocks([duplicate_tx_block.clone()].to_vec()).unwrap(); + storage_client_memory.store_blocks([duplicate_tx_block.clone()].to_vec()).await.unwrap(); // The hash of the duplicated transaction should still be the same --> There should be two transactions with the same transaction hash. - assert_eq!(storage_client_memory.get_transactions_by_hash(duplicate_tx_block.clone().get_transaction_hash()).unwrap().len(),2); + assert_eq!(storage_client_memory.get_transactions_by_hash(duplicate_tx_block.clone().get_transaction_hash()).await.unwrap().len(),2); } + }) } #[test] fn test_highest_lowest_block_index(blocks in prop::collection::vec(blocks_strategy::(arb_amount::()),1..100)){ - let storage_client_memory = StorageClient::new_in_memory().unwrap(); + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let storage_client_memory = StorageClient::new_in_memory().await.unwrap(); let mut rosetta_blocks = vec![]; for (index,block) in blocks.clone().into_iter().enumerate(){ rosetta_blocks.push(RosettaBlock::from_generic_block(encoded_block_to_generic_block(&block.encode()),index as u64).unwrap()); } - storage_client_memory.store_blocks(rosetta_blocks).unwrap(); - let block_read = storage_client_memory.get_block_with_highest_block_idx().unwrap().unwrap(); + storage_client_memory.store_blocks(rosetta_blocks).await.unwrap(); + let block_read = storage_client_memory.get_block_with_highest_block_idx().await.unwrap().unwrap(); // Indexing starts at 0. assert_eq!(block_read.index,(blocks.len() as u64)-1); - let block_read = storage_client_memory.get_block_with_lowest_block_idx().unwrap().unwrap(); + let block_read = storage_client_memory.get_block_with_lowest_block_idx().await.unwrap().unwrap(); assert_eq!(block_read.index,0); - let blocks_read = storage_client_memory.get_blocks_by_index_range(0,blocks.len() as u64).unwrap(); + let blocks_read = storage_client_memory.get_blocks_by_index_range(0,blocks.len() as u64).await.unwrap(); // Storage should return all blocks that are stored. assert_eq!(blocks_read.len(),blocks.len()); - let blocks_read = storage_client_memory.get_blocks_by_index_range(blocks.len() as u64 +1,blocks.len() as u64 +2).unwrap(); + let blocks_read = storage_client_memory.get_blocks_by_index_range(blocks.len() as u64 +1,blocks.len() as u64 +2).await.unwrap(); // Start index is outside of the index range of the blocks stored in the database -> Should return an empty vector. assert!(blocks_read.is_empty()); - let blocks_read = storage_client_memory.get_blocks_by_index_range(1,blocks.len() as u64 + 2).unwrap(); + let blocks_read = storage_client_memory.get_blocks_by_index_range(1,blocks.len() as u64 + 2).await.unwrap(); // End index is outside of the blocks stored in the database --> Returns subset of blocks stored in the database. assert_eq!(blocks_read.len(),blocks.len().saturating_sub(1)); + }) } #[test] fn test_deriving_gaps_from_storage(blockchain in valid_blockchain_with_gaps_strategy::(1000).no_shrink()){ - let storage_client_memory = StorageClient::new_in_memory().unwrap(); + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let storage_client_memory = StorageClient::new_in_memory().await.unwrap(); let mut rosetta_blocks = vec![]; for i in 0..blockchain.0.len() { rosetta_blocks.push(RosettaBlock::from_generic_block(encoded_block_to_generic_block(&blockchain.0[i].clone().encode()),blockchain.1[i] as u64).unwrap()); } - storage_client_memory.store_blocks(rosetta_blocks.clone()).unwrap(); + storage_client_memory.store_blocks(rosetta_blocks.clone()).await.unwrap(); // This function will return a list of all the non consecutive intervals. let non_consecutive_intervals = |blocks: Vec| { @@ -564,9 +653,9 @@ mod tests { }; // Fetch the database gaps and map them to indices tuples. - let derived_gaps = storage_client_memory.get_blockchain_gaps().unwrap().into_iter().map(|(a,b)| (a.index,b.index)).collect::>(); + let derived_gaps = storage_client_memory.get_blockchain_gaps().await.unwrap().into_iter().map(|(a,b)| (a.index,b.index)).collect::>(); // Does the blockchain have gaps? - let has_gaps = storage_client_memory.does_blockchain_have_gaps().unwrap(); + let has_gaps = storage_client_memory.does_blockchain_have_gaps().await.unwrap(); // If the database is empty the returned gaps vector should simply be empty. if rosetta_blocks.last().is_some(){ @@ -582,34 +671,41 @@ mod tests { assert!(!has_gaps); } + }) } #[test] fn test_read_and_write_metadata(metadata in metadata_strategy()) { - let storage_client_memory = StorageClient::new_in_memory().unwrap(); - let entries_write = metadata.iter().map(|(key, value)| MetadataEntry::from_metadata_value(key, value)).collect::>>().unwrap(); + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let storage_client_memory = StorageClient::new_in_memory().await.unwrap(); + let entries_write = metadata.iter().map(|(key, value)| MetadataEntry::from_metadata_value(key, value)).collect::>>().unwrap(); let metadata_write = Metadata::from_metadata_entries(&entries_write).unwrap(); - storage_client_memory.write_metadata(entries_write).unwrap(); - let entries_read = storage_client_memory.read_metadata().unwrap(); + storage_client_memory.write_metadata(entries_write).await.unwrap(); + let entries_read = storage_client_memory.read_metadata().await.unwrap(); let metadata_read = Metadata::from_metadata_entries(&entries_read).unwrap(); assert_eq!(metadata_write, metadata_read); + }) } #[test] fn test_updating_account_balances_for_blockchain_with_gaps(blockchain in valid_blockchain_with_gaps_strategy::(1000)){ - let storage_client_memory = StorageClient::new_in_memory().unwrap(); + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let storage_client_memory = StorageClient::new_in_memory().await.unwrap(); let mut rosetta_blocks = vec![]; for i in 0..blockchain.0.len() { rosetta_blocks.push(RosettaBlock::from_encoded_block(&blockchain.0[i].clone().encode(),blockchain.1[i] as u64).unwrap()); } - storage_client_memory.store_blocks(rosetta_blocks.clone()).unwrap(); + storage_client_memory.store_blocks(rosetta_blocks.clone()).await.unwrap(); - if !storage_client_memory.get_blockchain_gaps().unwrap().is_empty(){ + if !storage_client_memory.get_blockchain_gaps().await.unwrap().is_empty(){ // Updating of account balances should not be possible if the stored blockchain contains gaps - assert!(storage_client_memory.update_account_balances().is_err()) + assert!(storage_client_memory.update_account_balances().await.is_err()) } + }) } } } diff --git a/rs/rosetta-api/icrc1/src/common/storage/storage_operations.rs b/rs/rosetta-api/icrc1/src/common/storage/storage_operations.rs index 4d64b136c0d7..791e0e11ef4b 100644 --- a/rs/rosetta-api/icrc1/src/common/storage/storage_operations.rs +++ b/rs/rosetta-api/icrc1/src/common/storage/storage_operations.rs @@ -1,6 +1,6 @@ use crate::MetadataEntry; +use crate::common::storage::error::StorageError; use crate::common::storage::types::{RosettaBlock, RosettaCounter}; -use anyhow::{Context, bail}; use candid::Nat; use ic_base_types::PrincipalId; use ic_ledger_core::tokens::Zero; @@ -14,14 +14,14 @@ use std::collections::{BTreeMap, HashMap}; use std::str::FromStr; use tracing::{info, trace}; +/// Result type alias for storage operations. +pub type Result = std::result::Result; + pub const METADATA_SCHEMA_VERSION: &str = "schema_version"; /// Gets the current value of a counter from the database. /// Returns None if the counter doesn't exist. -pub fn get_counter_value( - connection: &Connection, - counter: &RosettaCounter, -) -> anyhow::Result> { +pub fn get_counter_value(connection: &Connection, counter: &RosettaCounter) -> Result> { let mut stmt = connection.prepare_cached("SELECT value FROM counters WHERE name = ?1")?; let mut rows = stmt.query(params![counter.name()])?; @@ -37,7 +37,7 @@ pub fn set_counter_value( connection: &Connection, counter: &RosettaCounter, value: i64, -) -> anyhow::Result<()> { +) -> Result<()> { connection .prepare_cached("INSERT OR REPLACE INTO counters (name, value) VALUES (?1, ?2)")? .execute(params![counter.name(), value])?; @@ -50,7 +50,7 @@ pub fn increment_counter( connection: &Connection, counter: &RosettaCounter, increment: i64, -) -> anyhow::Result<()> { +) -> Result<()> { connection .prepare_cached( "INSERT INTO counters (name, value) VALUES (?1, ?2) @@ -62,12 +62,12 @@ pub fn increment_counter( /// Checks if a counter flag is set (value > 0). /// Returns false if the counter doesn't exist. -pub fn is_counter_flag_set( - connection: &Connection, - counter: &RosettaCounter, -) -> anyhow::Result { +pub fn is_counter_flag_set(connection: &Connection, counter: &RosettaCounter) -> Result { if !counter.is_flag() { - bail!("Counter {} is not a flag counter", counter.name()); + return Err(StorageError::DataIntegrity(format!( + "Counter {} is not a flag counter", + counter.name() + ))); } Ok(get_counter_value(connection, counter)?.unwrap_or(0) > 0) @@ -75,9 +75,12 @@ pub fn is_counter_flag_set( /// Sets a counter flag to true (value = 1). /// Only works with flag counters. -pub fn set_counter_flag(connection: &Connection, counter: &RosettaCounter) -> anyhow::Result<()> { +pub fn set_counter_flag(connection: &Connection, counter: &RosettaCounter) -> Result<()> { if !counter.is_flag() { - bail!("Counter {} is not a flag counter", counter.name()); + return Err(StorageError::DataIntegrity(format!( + "Counter {} is not a flag counter", + counter.name() + ))); } set_counter_value(connection, counter, 1) @@ -88,7 +91,7 @@ pub fn set_counter_flag(connection: &Connection, counter: &RosettaCounter) -> an pub fn initialize_counter_if_missing( connection: &Connection, counter: &RosettaCounter, -) -> anyhow::Result<()> { +) -> Result<()> { match counter { RosettaCounter::SyncedBlocks => { // Set to current block count if not exists @@ -112,7 +115,7 @@ pub fn initialize_counter_if_missing( pub fn get_fee_collector_from_block( rosetta_block: &RosettaBlock, connection: &Connection, -) -> anyhow::Result> { +) -> Result> { // First check if the fee collector is directly specified in the block if let Some(fee_collector) = rosetta_block.get_fee_collector() { return Ok(Some(fee_collector)); @@ -120,22 +123,21 @@ pub fn get_fee_collector_from_block( // If not, check if there's a fee_collector_block_index that points to another block if let Some(fee_collector_block_index) = rosetta_block.get_fee_collector_block_index() { - let referenced_block = get_block_at_idx(connection, fee_collector_block_index)? - .with_context(|| { - format!( + let referenced_block = + get_block_at_idx(connection, fee_collector_block_index)?.ok_or_else(|| { + StorageError::DataIntegrity(format!( "Block at index {} has fee_collector_block_index {} but there is no block at that index", rosetta_block.index, fee_collector_block_index - ) + )) })?; if let Some(fee_collector) = referenced_block.get_fee_collector() { return Ok(Some(fee_collector)); } else { - bail!( + return Err(StorageError::DataIntegrity(format!( "Block at index {} has fee_collector_block_index {} but that block has no fee_collector set", - rosetta_block.index, - fee_collector_block_index - ); + rosetta_block.index, fee_collector_block_index + ))); } } @@ -143,10 +145,7 @@ pub fn get_fee_collector_from_block( Ok(None) } -pub fn store_metadata( - connection: &mut Connection, - metadata: Vec, -) -> anyhow::Result<()> { +pub fn store_metadata(connection: &mut Connection, metadata: Vec) -> Result<()> { let insert_tx = connection.transaction()?; for entry in metadata.into_iter() { @@ -156,7 +155,7 @@ pub fn store_metadata( Ok(()) } -pub fn get_metadata(connection: &Connection) -> anyhow::Result> { +pub fn get_metadata(connection: &Connection) -> Result> { let mut stmt_metadata = connection.prepare_cached("SELECT key, value FROM metadata")?; let rows = stmt_metadata.query_map(params![], |row| { Ok(MetadataEntry { @@ -172,7 +171,7 @@ pub fn get_metadata(connection: &Connection) -> anyhow::Result anyhow::Result>> { +pub fn get_rosetta_metadata(connection: &Connection, key: &str) -> Result>> { let mut stmt_metadata = connection.prepare_cached(&format!( "SELECT value FROM rosetta_metadata WHERE key = '{key}'" ))?; @@ -185,7 +184,9 @@ pub fn get_rosetta_metadata(connection: &Connection, key: &str) -> anyhow::Resul match result.len() { 0 => Ok(None), 1 => Ok(Some(result.swap_remove(0))), - _ => bail!(format!("Multiple metadata entries found for key: {key}")), + _ => Err(StorageError::MultipleRecordsFound(format!( + "Multiple metadata entries found for key: {key}" + ))), } } @@ -193,262 +194,279 @@ pub fn update_account_balances( connection: &mut Connection, flush_cache_and_shrink_memory: bool, batch_size: u64, -) -> anyhow::Result<()> { - // Utility method that tries to fetch the balance from the cache first and, if - // no balance has been found, fetches it from the database - fn get_account_balance_with_cache( - account: &Account, - index: u64, - connection: &mut Connection, - account_balances_cache: &mut HashMap>, - ) -> anyhow::Result> { - // Either fetch the balance from the cache or from the database - match account_balances_cache.get(account).map(|balances| { - balances - .last_key_value() - .map(|(_, balance)| balance.clone()) - }) { - Some(balance) => Ok(balance), - None => get_account_balance_at_block_idx(connection, account, index), - } - } +) -> Result<()> { + use anyhow::Context; - fn debit( - account: Account, - amount: Nat, - index: u64, + // Inner function that uses anyhow for convenient error handling + fn inner( connection: &mut Connection, - account_balances_cache: &mut HashMap>, + flush_cache_and_shrink_memory: bool, + batch_size: u64, ) -> anyhow::Result<()> { - let new_balance = if let Some(balance) = - get_account_balance_with_cache(&account, index, connection, account_balances_cache)? - { - Nat(balance.0.checked_sub(&amount.0).with_context(|| { - format!( - "Underflow while debiting account {account} for amount {amount} at index {index} (balance: {balance})" - ) - })?) - } else { - bail!( - "Trying to debit an account {} that has not yet been allocated any tokens (index: {})", - account, - index - ) - }; - account_balances_cache - .entry(account) - .or_default() - .insert(index, new_balance); - Ok(()) - } + // Utility method that tries to fetch the balance from the cache first and, if + // no balance has been found, fetches it from the database + fn get_account_balance_with_cache( + account: &Account, + index: u64, + connection: &mut Connection, + account_balances_cache: &mut HashMap>, + ) -> anyhow::Result> { + // Either fetch the balance from the cache or from the database + match account_balances_cache.get(account).map(|balances| { + balances + .last_key_value() + .map(|(_, balance)| balance.clone()) + }) { + Some(balance) => Ok(balance), + None => { + get_account_balance_at_block_idx(connection, account, index).map_err(Into::into) + } + } + } - fn credit( - account: Account, - amount: Nat, - index: u64, - connection: &mut Connection, - account_balances_cache: &mut HashMap>, - ) -> anyhow::Result<()> { - let new_balance = if let Some(balance) = - get_account_balance_with_cache(&account, index, connection, account_balances_cache)? - { - Nat(balance.0.checked_add(&amount.0).with_context(|| { - format!( - "Overflow while crediting an account {account} for amount {amount} at index {index} (balance: {balance})" + fn debit( + account: Account, + amount: Nat, + index: u64, + connection: &mut Connection, + account_balances_cache: &mut HashMap>, + ) -> anyhow::Result<()> { + let new_balance = if let Some(balance) = + get_account_balance_with_cache(&account, index, connection, account_balances_cache)? + { + Nat(balance.0.checked_sub(&amount.0).with_context(|| { + format!( + "Underflow while debiting account {account} for amount {amount} at index {index} (balance: {balance})" + ) + })?) + } else { + anyhow::bail!( + "Trying to debit an account {} that has not yet been allocated any tokens (index: {})", + account, + index ) - })?) - } else { - amount - }; - account_balances_cache - .entry(account) - .or_default() - .insert(index, new_balance); - Ok(()) - } + }; + account_balances_cache + .entry(account) + .or_default() + .insert(index, new_balance); + Ok(()) + } - // The next block to be updated is the highest block index in the account balance table + 1 if the table is not empty and 0 otherwise - let next_block_to_be_updated = - get_highest_block_idx_in_account_balance_table(connection)?.map_or(0, |idx| idx + 1); - let highest_block_idx = - get_block_with_highest_block_idx(connection)?.map_or(0, |block| block.index); + fn credit( + account: Account, + amount: Nat, + index: u64, + connection: &mut Connection, + account_balances_cache: &mut HashMap>, + ) -> anyhow::Result<()> { + let new_balance = if let Some(balance) = + get_account_balance_with_cache(&account, index, connection, account_balances_cache)? + { + Nat(balance.0.checked_add(&amount.0).with_context(|| { + format!( + "Overflow while crediting an account {account} for amount {amount} at index {index} (balance: {balance})" + ) + })?) + } else { + amount + }; + account_balances_cache + .entry(account) + .or_default() + .insert(index, new_balance); + Ok(()) + } - // If the blocks and account_balance tables show the same max block height then there is nothing that needs to be synced - if highest_block_idx < next_block_to_be_updated { - return Ok(()); - } - let mut batch_start_idx = next_block_to_be_updated; - let mut batch_end_idx = batch_start_idx + batch_size; - let mut rosetta_blocks = get_blocks_by_index_range(connection, batch_start_idx, batch_end_idx)?; - - // For faster inserts, keep a cache of the account balances within a batch range in memory - // This also makes the inserting of the account balances batchable and therefore faster - let mut account_balances_cache: HashMap> = HashMap::new(); - - // As long as there are blocks to be fetched, keep on iterating over the blocks in the database with the given BATCH_SIZE interval - while !rosetta_blocks.is_empty() { - for rosetta_block in rosetta_blocks { - match rosetta_block.get_transaction().operation { - crate::common::storage::types::IcrcOperation::Burn { - from, - amount, - fee: _, - spender: _, - } => { - let fee = rosetta_block - .get_fee_paid()? - .unwrap_or(Nat(BigUint::zero())); - let burn_amount = Nat(amount.0.checked_add(&fee.0) - .with_context(|| format!("Overflow while adding the fee {} to the amount {} for block at index {}", - fee, amount, rosetta_block.index - ))?); - debit( + // The next block to be updated is the highest block index in the account balance table + 1 if the table is not empty and 0 otherwise + let next_block_to_be_updated = + get_highest_block_idx_in_account_balance_table(connection)?.map_or(0, |idx| idx + 1); + let highest_block_idx = + get_block_with_highest_block_idx(connection)?.map_or(0, |block| block.index); + + // If the blocks and account_balance tables show the same max block height then there is nothing that needs to be synced + if highest_block_idx < next_block_to_be_updated { + return Ok(()); + } + let mut batch_start_idx = next_block_to_be_updated; + let mut batch_end_idx = batch_start_idx + batch_size; + let mut rosetta_blocks = + get_blocks_by_index_range(connection, batch_start_idx, batch_end_idx)?; + + // For faster inserts, keep a cache of the account balances within a batch range in memory + // This also makes the inserting of the account balances batchable and therefore faster + let mut account_balances_cache: HashMap> = HashMap::new(); + + // As long as there are blocks to be fetched, keep on iterating over the blocks in the database with the given BATCH_SIZE interval + while !rosetta_blocks.is_empty() { + for rosetta_block in rosetta_blocks { + match rosetta_block.get_transaction().operation { + crate::common::storage::types::IcrcOperation::Burn { from, - burn_amount, - rosetta_block.index, - connection, - &mut account_balances_cache, - )?; - if let Some(collector) = - get_fee_collector_from_block(&rosetta_block, connection)? - { - credit( - collector, - fee, + amount, + fee: _, + spender: _, + } => { + let fee = rosetta_block + .get_fee_paid()? + .unwrap_or(Nat(BigUint::zero())); + let burn_amount = Nat(amount.0.checked_add(&fee.0).with_context(|| { + format!( + "Overflow while adding the fee {} to the amount {} for block at index {}", + fee, amount, rosetta_block.index + ) + })?); + debit( + from, + burn_amount, rosetta_block.index, connection, &mut account_balances_cache, )?; + if let Some(collector) = + get_fee_collector_from_block(&rosetta_block, connection)? + { + credit( + collector, + fee, + rosetta_block.index, + connection, + &mut account_balances_cache, + )?; + } } - } - crate::common::storage::types::IcrcOperation::Mint { to, amount, fee: _ } => { - let fee = rosetta_block - .get_fee_paid()? - .unwrap_or(Nat(BigUint::zero())); - let credit_amount = Nat(amount.0.checked_sub(&fee.0) - .with_context(|| format!("Underflow while subtracting the fee {} from the amount {} for block at index {}", - fee, amount, rosetta_block.index - ))?); - credit( - to, - credit_amount, - rosetta_block.index, - connection, - &mut account_balances_cache, - )?; - if let Some(collector) = - get_fee_collector_from_block(&rosetta_block, connection)? - { + crate::common::storage::types::IcrcOperation::Mint { to, amount, fee: _ } => { + let fee = rosetta_block + .get_fee_paid()? + .unwrap_or(Nat(BigUint::zero())); + let credit_amount = Nat(amount.0.checked_sub(&fee.0).with_context(|| { + format!( + "Underflow while subtracting the fee {} from the amount {} for block at index {}", + fee, amount, rosetta_block.index + ) + })?); credit( - collector, + to, + credit_amount, + rosetta_block.index, + connection, + &mut account_balances_cache, + )?; + if let Some(collector) = + get_fee_collector_from_block(&rosetta_block, connection)? + { + credit( + collector, + fee, + rosetta_block.index, + connection, + &mut account_balances_cache, + )?; + } + } + crate::common::storage::types::IcrcOperation::Approve { + from, + spender: _, + amount: _, + expected_allowance: _, + expires_at: _, + fee: _, + } => { + let fee = rosetta_block + .get_fee_paid()? + .unwrap_or(Nat(BigUint::zero())); + debit( + from, fee, rosetta_block.index, connection, &mut account_balances_cache, )?; } - } - crate::common::storage::types::IcrcOperation::Approve { - from, - spender: _, - amount: _, - expected_allowance: _, - expires_at: _, - fee: _, - } => { - let fee = rosetta_block - .get_fee_paid()? - .unwrap_or(Nat(BigUint::zero())); - debit( + crate::common::storage::types::IcrcOperation::Transfer { from, - fee, - rosetta_block.index, - connection, - &mut account_balances_cache, - )?; - } - crate::common::storage::types::IcrcOperation::Transfer { - from, - to, - amount, - spender: _, - fee: _, - } => { - let fee = rosetta_block - .get_fee_paid()? - .unwrap_or(Nat(BigUint::zero())); - let payable_amount = Nat(amount.0.checked_add(&fee.0) - .with_context(|| format!("Overflow while adding the fee {} to the amount {} for block at index {}", - fee, amount, rosetta_block.index - ))?); - - credit( to, amount, - rosetta_block.index, - connection, - &mut account_balances_cache, - )?; - debit( - from, - payable_amount, - rosetta_block.index, - connection, - &mut account_balances_cache, - )?; - - if let Some(collector) = - get_fee_collector_from_block(&rosetta_block, connection)? - { + spender: _, + fee: _, + } => { + let fee = rosetta_block + .get_fee_paid()? + .unwrap_or(Nat(BigUint::zero())); + let payable_amount = Nat(amount.0.checked_add(&fee.0).with_context(|| { + format!( + "Overflow while adding the fee {} to the amount {} for block at index {}", + fee, amount, rosetta_block.index + ) + })?); + credit( - collector, - fee, + to, + amount, + rosetta_block.index, + connection, + &mut account_balances_cache, + )?; + debit( + from, + payable_amount, rosetta_block.index, connection, &mut account_balances_cache, )?; + + if let Some(collector) = + get_fee_collector_from_block(&rosetta_block, connection)? + { + credit( + collector, + fee, + rosetta_block.index, + connection, + &mut account_balances_cache, + )?; + } } } } - } - // Flush the cache - let insert_tx = connection.transaction()?; - for (account, block_idx_new_balances) in account_balances_cache.drain() { - for (block_idx, new_balance) in block_idx_new_balances { - insert_tx - .prepare_cached("INSERT INTO account_balances (block_idx, principal, subaccount, amount) VALUES (:block_idx, :principal, :subaccount, :amount)")? - .execute(named_params! { - ":block_idx": block_idx, - ":principal": account.owner.as_slice(), - ":subaccount": account.effective_subaccount().as_slice(), - ":amount": new_balance.to_string(), - })?; + // Flush the cache + let insert_tx = connection.transaction()?; + for (account, block_idx_new_balances) in account_balances_cache.drain() { + for (block_idx, new_balance) in block_idx_new_balances { + insert_tx + .prepare_cached("INSERT INTO account_balances (block_idx, principal, subaccount, amount) VALUES (:block_idx, :principal, :subaccount, :amount)")? + .execute(named_params! { + ":block_idx": block_idx, + ":principal": account.owner.as_slice(), + ":subaccount": account.effective_subaccount().as_slice(), + ":amount": new_balance.to_string(), + })?; + } } - } - insert_tx.commit()?; + insert_tx.commit()?; - if flush_cache_and_shrink_memory { - trace!("flushing cache and shrinking memory"); - connection.cache_flush()?; - connection.pragma_update(None, "shrink_memory", 1)?; - } + if flush_cache_and_shrink_memory { + trace!("flushing cache and shrinking memory"); + connection.cache_flush()?; + connection.pragma_update(None, "shrink_memory", 1)?; + } - // Fetch the next batch of blocks - batch_start_idx = get_highest_block_idx_in_account_balance_table(connection)? - .context("No blocks in account balance table after inserting")? - + 1; - batch_end_idx = batch_start_idx + batch_size; - rosetta_blocks = get_blocks_by_index_range(connection, batch_start_idx, batch_end_idx)?; + // Fetch the next batch of blocks + batch_start_idx = get_highest_block_idx_in_account_balance_table(connection)? + .context("No blocks in account balance table after inserting")? + + 1; + batch_end_idx = batch_start_idx + batch_size; + rosetta_blocks = get_blocks_by_index_range(connection, batch_start_idx, batch_end_idx)?; + } + Ok(()) } - Ok(()) + + inner(connection, flush_cache_and_shrink_memory, batch_size).map_err(StorageError::from) } // Stores a batch of RosettaBlocks -pub fn store_blocks( - connection: &mut Connection, - rosetta_blocks: Vec, -) -> anyhow::Result<()> { +pub fn store_blocks(connection: &mut Connection, rosetta_blocks: Vec) -> Result<()> { let insert_tx = connection.transaction()?; for rosetta_block in rosetta_blocks.into_iter() { let transaction: crate::common::storage::types::IcrcTransaction = @@ -588,10 +606,7 @@ fn convert_timestamp_to_db(timestamp: u64) -> i64 { // Returns a RosettaBlock if the block index exists in the database, else returns None. // Returns an Error if the query fails. -pub fn get_block_at_idx( - connection: &Connection, - block_idx: u64, -) -> anyhow::Result> { +pub fn get_block_at_idx(connection: &Connection, block_idx: u64) -> Result> { let command = format!("SELECT idx,serialized_block FROM blocks WHERE idx = {block_idx}"); let mut stmt = connection.prepare_cached(&command)?; read_single_block(&mut stmt, params![]) @@ -600,10 +615,7 @@ pub fn get_block_at_idx( // Returns a RosettaBlock with the smallest index larger than block_idx. // Returns None if there are no blocks with larger index. // Returns an Error if the query fails. -fn get_block_at_next_idx( - connection: &Connection, - block_idx: u64, -) -> anyhow::Result> { +fn get_block_at_next_idx(connection: &Connection, block_idx: u64) -> Result> { let command = format!( "SELECT idx,serialized_block FROM blocks WHERE idx > {block_idx} ORDER BY idx ASC LIMIT 1" ); @@ -613,27 +625,20 @@ fn get_block_at_next_idx( // Returns a RosettaBlock if the block hash exists in the database, else returns None. // Returns an Error if the query fails. -pub fn get_block_by_hash( - connection: &Connection, - hash: ByteBuf, -) -> anyhow::Result> { +pub fn get_block_by_hash(connection: &Connection, hash: ByteBuf) -> Result> { let mut stmt = connection.prepare_cached("SELECT idx,serialized_block FROM blocks WHERE hash = ?1")?; read_single_block(&mut stmt, params![hash.as_slice().to_vec()]) } -pub fn get_block_with_highest_block_idx( - connection: &Connection, -) -> anyhow::Result> { +pub fn get_block_with_highest_block_idx(connection: &Connection) -> Result> { let command = "SELECT idx,serialized_block FROM blocks WHERE idx = (SELECT MAX(idx) FROM blocks)"; let mut stmt = connection.prepare_cached(command)?; read_single_block(&mut stmt, params![]) } -pub fn get_block_with_lowest_block_idx( - connection: &Connection, -) -> anyhow::Result> { +pub fn get_block_with_lowest_block_idx(connection: &Connection) -> Result> { let command = "SELECT idx,serialized_block FROM blocks WHERE idx = (SELECT MIN(idx) FROM blocks)"; let mut stmt = connection.prepare_cached(command)?; @@ -644,15 +649,13 @@ pub fn get_blocks_by_index_range( connection: &Connection, start_index: u64, end_index: u64, -) -> anyhow::Result> { +) -> Result> { let command = "SELECT idx,serialized_block FROM blocks WHERE idx>= ?1 AND idx<=?2"; let mut stmt = connection.prepare_cached(command)?; read_blocks(&mut stmt, params![start_index, end_index]) } -pub fn get_blockchain_gaps( - connection: &Connection, -) -> anyhow::Result> { +pub fn get_blockchain_gaps(connection: &Connection) -> Result> { // Search for blocks, such that there is no block with index+1. let command = "SELECT b1.idx,b1.serialized_block FROM blocks b1 WHERE not exists(select 1 from blocks b2 where b2.idx = b1.idx + 1)"; let mut stmt = connection.prepare_cached(command)?; @@ -669,7 +672,7 @@ pub fn get_blockchain_gaps( Ok(gap_limits) } -pub fn get_block_count(connection: &Connection) -> anyhow::Result { +pub fn get_block_count(connection: &Connection) -> Result { let count = get_counter_value(connection, &RosettaCounter::SyncedBlocks)?.unwrap_or(0); Ok(count as u64) } @@ -679,7 +682,7 @@ pub fn get_block_count(connection: &Connection) -> anyhow::Result { pub fn get_blocks_by_transaction_hash( connection: &Connection, hash: ByteBuf, -) -> anyhow::Result> { +) -> Result> { let mut stmt = connection.prepare_cached("SELECT idx,serialized_block FROM blocks WHERE tx_hash = ?1")?; read_blocks(&mut stmt, params![hash.as_slice().to_vec()]) @@ -687,7 +690,7 @@ pub fn get_blocks_by_transaction_hash( pub fn get_highest_block_idx_in_account_balance_table( connection: &Connection, -) -> anyhow::Result> { +) -> Result> { match connection .prepare_cached("SELECT block_idx FROM account_balances WHERE block_idx = (SELECT MAX(block_idx) FROM account_balances)")? .query_map(params![], |row| row.get(0))? @@ -698,9 +701,7 @@ pub fn get_highest_block_idx_in_account_balance_table( } } -pub fn get_highest_block_idx_in_blocks_table( - connection: &Connection, -) -> anyhow::Result> { +pub fn get_highest_block_idx_in_blocks_table(connection: &Connection) -> Result> { match connection .prepare_cached("SELECT MAX(idx) FROM blocks")? .query_map(params![], |row| row.get(0))? @@ -714,7 +715,7 @@ pub fn get_highest_block_idx_in_blocks_table( pub fn get_account_balance_at_highest_block_idx( connection: &Connection, account: &Account, -) -> anyhow::Result> { +) -> Result> { get_account_balance_at_block_idx(connection, account, i64::MAX as u64) } @@ -722,8 +723,8 @@ pub fn get_account_balance_at_block_idx( connection: &Connection, account: &Account, block_idx: u64, -) -> anyhow::Result> { - Ok(connection +) -> Result> { + let amount_str: Option = connection .prepare_cached( "SELECT amount \ FROM account_balances \ @@ -740,12 +741,14 @@ pub fn get_account_balance_at_block_idx( })? .mapped(|row| row.get(0)) .next() - .transpose() - .with_context(|| { - format!("Unable to fetch balance of account {account} at index {block_idx}") - })? - .map(|x: String| Nat::from_str(&x)) - .transpose()?) + .transpose()?; + + match amount_str { + Some(s) => Nat::from_str(&s) + .map(Some) + .map_err(|e| StorageError::DataIntegrity(format!("Invalid balance format: {}", e))), + None => Ok(None), + } } /// Gets the aggregated balance of all subaccounts for a given principal at a specific block index. @@ -754,7 +757,7 @@ pub fn get_aggregated_balance_for_principal_at_block_idx( connection: &Connection, principal: &PrincipalId, block_idx: u64, -) -> anyhow::Result { +) -> Result { // Query to get the latest balance for each subaccount of the principal at or before the given block index let mut stmt = connection.prepare_cached( "SELECT a1.subaccount, a1.amount @@ -789,10 +792,9 @@ pub fn get_aggregated_balance_for_principal_at_block_idx( let mut total_balance = Nat(BigUint::zero()); for balance_result in rows { let balance = balance_result?; - total_balance = Nat(total_balance - .0 - .checked_add(&balance.0) - .with_context(|| "Overflow while aggregating balances")?); + total_balance = Nat(total_balance.0.checked_add(&balance.0).ok_or_else(|| { + StorageError::DataIntegrity("Overflow while aggregating balances".to_string()) + })?); } Ok(total_balance) @@ -802,7 +804,7 @@ pub fn get_blocks_by_custom_query

( connection: &Connection, sql_query: String, params: P, -) -> anyhow::Result> +) -> Result> where P: Params, { @@ -810,7 +812,7 @@ where read_blocks(&mut stmt, params) } -pub fn reset_blocks_counter(connection: &Connection) -> anyhow::Result<()> { +pub fn reset_blocks_counter(connection: &Connection) -> Result<()> { let block_count: i64 = connection .prepare_cached("SELECT COUNT(*) FROM blocks")? .query_row(params![], |row| row.get(0))?; @@ -818,10 +820,7 @@ pub fn reset_blocks_counter(connection: &Connection) -> anyhow::Result<()> { set_counter_value(connection, &RosettaCounter::SyncedBlocks, block_count) } -fn read_single_block

( - stmt: &mut CachedStatement, - params: P, -) -> anyhow::Result> +fn read_single_block

(stmt: &mut CachedStatement, params: P) -> Result> where P: Params, { @@ -834,12 +833,14 @@ where Ok(None) } else { // If more than one block was found return an error - bail!("Multiple blocks found with given parameters".to_owned(),) + Err(StorageError::MultipleRecordsFound( + "Multiple blocks found with given parameters".to_owned(), + )) } } // Executes the constructed statement that reads blocks. The statement expects two values: The serialized Block and the index of that block -fn read_blocks

(stmt: &mut CachedStatement, params: P) -> anyhow::Result> +fn read_blocks

(stmt: &mut CachedStatement, params: P) -> Result> where P: Params, { @@ -868,7 +869,7 @@ where pub fn repair_fee_collector_balances( connection: &mut Connection, balance_sync_batch_size: u64, -) -> anyhow::Result<()> { +) -> Result<()> { // Check if the repair has already been performed if is_counter_flag_set(connection, &RosettaCounter::CollectorBalancesFixed)? { // Repair has already been performed, skip it diff --git a/rs/rosetta-api/icrc1/src/common/utils/utils.rs b/rs/rosetta-api/icrc1/src/common/utils/utils.rs index e522537b919e..f6db18048a63 100644 --- a/rs/rosetta-api/icrc1/src/common/utils/utils.rs +++ b/rs/rosetta-api/icrc1/src/common/utils/utils.rs @@ -61,7 +61,7 @@ pub fn convert_timestamp_to_millis(timestamp_nanos: u64) -> anyhow::Result )) } -pub fn get_rosetta_block_from_block_identifier( +pub async fn get_rosetta_block_from_block_identifier( block_identifier: BlockIdentifier, storage_client: &StorageClient, ) -> anyhow::Result { @@ -69,9 +69,10 @@ pub fn get_rosetta_block_from_block_identifier( &PartialBlockIdentifier::from(block_identifier), storage_client, ) + .await } -pub fn get_rosetta_block_from_partial_block_identifier( +pub async fn get_rosetta_block_from_partial_block_identifier( partial_block_identifier: &PartialBlockIdentifier, storage_client: &StorageClient, ) -> anyhow::Result { @@ -86,17 +87,20 @@ pub fn get_rosetta_block_from_partial_block_identifier( let hash_buf = ByteBuf::from(hash_bytes); storage_client .get_block_by_hash(hash_buf.clone()) + .await .with_context(|| format!("Unable to retrieve block with hash: {hash_buf:?}"))? .with_context(|| format!("Block with hash {hash} could not be found"))? } (Some(block_idx), None) => storage_client .get_block_at_idx(block_idx) + .await .with_context(|| format!("Unable to retrieve block with idx: {block_idx}"))? .with_context(|| format!("Block at index {block_idx} could not be found"))?, (Some(block_idx), Some(hash)) => { let rosetta_block = storage_client .get_block_at_idx(block_idx) + .await .with_context(|| format!("Unable to retrieve block with idx: {block_idx}"))? .with_context(|| format!("Block at index {block_idx} could not be found"))?; if &hex::encode(rosetta_block.clone().get_block_hash()) != hash { @@ -112,6 +116,7 @@ pub fn get_rosetta_block_from_partial_block_identifier( } (None, None) => storage_client .get_block_with_highest_block_idx() + .await .with_context(|| "Unable to retrieve the latest block".to_string())? .with_context(|| { "Latest block could not be found, the blockchain is empty".to_string() diff --git a/rs/rosetta-api/icrc1/src/data_api/endpoints.rs b/rs/rosetta-api/icrc1/src/data_api/endpoints.rs index d2d5ada92907..aa51a91451ec 100644 --- a/rs/rosetta-api/icrc1/src/data_api/endpoints.rs +++ b/rs/rosetta-api/icrc1/src/data_api/endpoints.rs @@ -8,19 +8,16 @@ use ic_rosetta_api::models::MempoolResponse; use rosetta_core::{request_types::*, response_types::*}; use std::sync::Arc; -// This endpoint is used to determine whether ICRC Rosetta is ready to be querried for data. +// This endpoint is used to determine whether ICRC Rosetta is ready to be queried for data. // It returns Status Code 200 if an initial sync of the blockchain has been done // This means that no gaps in the blockchain exist and the genesis block has already been fetched pub async fn ready(State(state): State>) -> (StatusCode, Json<()>) { - if state - .token_states - .values() - .all(|state| initial_sync_is_completed(&state.storage, state.synched.clone())) - { - (StatusCode::OK, Json(())) - } else { - (StatusCode::SERVICE_UNAVAILABLE, Json(())) + for token_state in state.token_states.values() { + if !initial_sync_is_completed(&token_state.storage, &token_state.synched).await { + return (StatusCode::SERVICE_UNAVAILABLE, Json(())); + } } + (StatusCode::OK, Json(())) } pub async fn health() -> (StatusCode, Json<()>) { @@ -57,7 +54,7 @@ pub async fn network_status( ) -> Result> { let state = get_state_from_network_id(&request.0.network_identifier, &state) .map_err(|err| Error::invalid_network_id(&format!("{err:?}")))?; - Ok(Json(services::network_status(&state.storage)?)) + Ok(Json(services::network_status(&state.storage).await?)) } pub async fn block( @@ -66,12 +63,15 @@ pub async fn block( ) -> Result> { let state = get_state_from_network_id(&request.network_identifier, &state) .map_err(|err| Error::invalid_network_id(&format!("{err:?}")))?; - Ok(Json(services::block( - &state.storage, - &request.0.block_identifier, - state.metadata.decimals, - state.metadata.symbol.clone(), - )?)) + Ok(Json( + services::block( + &state.storage, + &request.0.block_identifier, + state.metadata.decimals, + state.metadata.symbol.clone(), + ) + .await?, + )) } pub async fn block_transaction( @@ -80,13 +80,16 @@ pub async fn block_transaction( ) -> Result> { let state = get_state_from_network_id(&request.0.network_identifier, &state) .map_err(|err| Error::invalid_network_id(&format!("{err:?}")))?; - Ok(Json(services::block_transaction( - &state.storage, - &request.0.block_identifier, - &request.0.transaction_identifier, - state.metadata.decimals, - state.metadata.symbol.clone(), - )?)) + Ok(Json( + services::block_transaction( + &state.storage, + &request.0.block_identifier, + &request.0.transaction_identifier, + state.metadata.decimals, + state.metadata.symbol.clone(), + ) + .await?, + )) } pub async fn mempool( @@ -113,14 +116,17 @@ pub async fn account_balance( ) -> Result> { let state = get_state_from_network_id(&request.network_identifier, &state) .map_err(|err| Error::invalid_network_id(&format!("{err:?}")))?; - Ok(Json(services::account_balance_with_metadata( - &state.storage, - &request.account_identifier, - &request.block_identifier, - &request.metadata, - state.metadata.decimals, - state.metadata.symbol.clone(), - )?)) + Ok(Json( + services::account_balance_with_metadata( + &state.storage, + &request.account_identifier, + &request.block_identifier, + &request.metadata, + state.metadata.decimals, + state.metadata.symbol.clone(), + ) + .await?, + )) } pub async fn search_transactions( @@ -129,12 +135,15 @@ pub async fn search_transactions( ) -> Result> { let state = get_state_from_network_id(&request.network_identifier, &state) .map_err(|err| Error::invalid_network_id(&format!("{err:?}")))?; - Ok(Json(services::search_transactions( - &state.storage, - request, - state.metadata.symbol.clone(), - state.metadata.decimals, - )?)) + Ok(Json( + services::search_transactions( + &state.storage, + request, + state.metadata.symbol.clone(), + state.metadata.decimals, + ) + .await?, + )) } pub async fn call( @@ -143,13 +152,16 @@ pub async fn call( ) -> Result> { let state = get_state_from_network_id(&request.network_identifier, &state) .map_err(|err| Error::invalid_network_id(&format!("{err:?}")))?; - Ok(Json(services::call( - &state.storage, - &request.method_name, - request.parameters, - rosetta_core::objects::Currency::new( - state.metadata.symbol.clone(), - state.metadata.decimals.into(), - ), - )?)) + Ok(Json( + services::call( + &state.storage, + &request.method_name, + request.parameters, + rosetta_core::objects::Currency::new( + state.metadata.symbol.clone(), + state.metadata.decimals.into(), + ), + ) + .await?, + )) } diff --git a/rs/rosetta-api/icrc1/src/data_api/services.rs b/rs/rosetta-api/icrc1/src/data_api/services.rs index d622f755177e..61cfd4c38220 100644 --- a/rs/rosetta-api/icrc1/src/data_api/services.rs +++ b/rs/rosetta-api/icrc1/src/data_api/services.rs @@ -1,5 +1,4 @@ -use std::sync::Arc; -use std::sync::Mutex; +use tokio::sync::Mutex; use crate::common::constants::DEFAULT_BLOCKCHAIN; use crate::common::constants::MAX_BLOCKS_PER_QUERY_BLOCK_RANGE_REQUEST; @@ -84,9 +83,12 @@ pub fn network_options(ledger_id: &Principal) -> NetworkOptionsResponse { } } -pub fn network_status(storage_client: &StorageClient) -> Result { +pub async fn network_status( + storage_client: &StorageClient, +) -> Result { let highest_processed_block = storage_client .get_highest_block_idx_in_account_balance_table() + .await .map_err(|e| Error::unable_to_find_block(&e))? .ok_or_else(|| { Error::unable_to_find_block(&"Highest processed block not found".to_owned()) @@ -94,11 +96,13 @@ pub fn network_status(storage_client: &StorageClient) -> Result Result Result { let rosetta_block = get_rosetta_block_from_block_identifier(block_identifier.clone(), storage_client) + .await .map_err(|err| Error::invalid_block_identifier(&err))?; if &rosetta_block.clone().get_block_identifier() != block_identifier { @@ -159,7 +164,7 @@ pub fn block_transaction( Ok(rosetta_core::response_types::BlockTransactionResponse { transaction }) } -pub fn block( +pub async fn block( storage_client: &StorageClient, partial_block_identifier: &PartialBlockIdentifier, decimals: u8, @@ -167,6 +172,7 @@ pub fn block( ) -> Result { let rosetta_block = get_rosetta_block_from_partial_block_identifier(partial_block_identifier, storage_client) + .await .map_err(|err| Error::invalid_block_identifier(&err))?; let currency = Currency { symbol, @@ -185,7 +191,7 @@ pub fn block( Ok(BlockResponse::new(Some(block))) } -pub fn account_balance( +pub async fn account_balance( storage_client: &StorageClient, account_identifier: &AccountIdentifier, partial_block_identifier: &Option, @@ -194,9 +200,11 @@ pub fn account_balance( ) -> Result { let rosetta_block = match partial_block_identifier { Some(block_id) => get_rosetta_block_from_partial_block_identifier(block_id, storage_client) + .await .map_err(|err| Error::invalid_block_identifier(&err))?, None => storage_client .get_block_with_highest_block_idx() + .await .map_err(|e| Error::unable_to_find_block(&e))? .ok_or_else(|| Error::unable_to_find_block(&"Current block not found".to_owned()))?, }; @@ -207,6 +215,7 @@ pub fn account_balance( .map_err(|err| Error::parsing_unsuccessful(&err))?), rosetta_block.index, ) + .await .map_err(|e| Error::unable_to_find_account_balance(&e))? .unwrap_or(Nat(BigUint::zero())); @@ -224,7 +233,7 @@ pub fn account_balance( }) } -pub fn account_balance_with_metadata( +pub async fn account_balance_with_metadata( storage_client: &StorageClient, account_identifier: &AccountIdentifier, partial_block_identifier: &Option, @@ -234,9 +243,11 @@ pub fn account_balance_with_metadata( ) -> Result { let rosetta_block = match partial_block_identifier { Some(block_id) => get_rosetta_block_from_partial_block_identifier(block_id, storage_client) + .await .map_err(|err| Error::invalid_block_identifier(&err))?, None => storage_client .get_block_with_highest_block_idx() + .await .map_err(|e| Error::unable_to_find_block(&e))? .ok_or_else(|| Error::unable_to_find_block(&"Current block not found".to_owned()))?, }; @@ -272,6 +283,7 @@ pub fn account_balance_with_metadata( &account.owner.into(), rosetta_block.index, ) + .await .map_err(|e| Error::unable_to_find_account_balance(&e))? } else { // Get balance for the specific account (principal + subaccount) @@ -281,6 +293,7 @@ pub fn account_balance_with_metadata( .map_err(|err| Error::parsing_unsuccessful(&err))?), rosetta_block.index, ) + .await .map_err(|e| Error::unable_to_find_account_balance(&e))? .unwrap_or(Nat(BigUint::zero())) }; @@ -299,7 +312,7 @@ pub fn account_balance_with_metadata( }) } -pub fn search_transactions( +pub async fn search_transactions( storage_client: &StorageClient, request: SearchTransactionsRequest, symbol: String, @@ -349,6 +362,7 @@ pub fn search_transactions( let rosetta_block_with_highest_block_index = storage_client .get_block_with_highest_block_idx() + .await .map_err(|e| Error::unable_to_find_block(&e))?; let Some(rosetta_block_with_highest_block_index) = rosetta_block_with_highest_block_index @@ -425,9 +439,12 @@ pub fn search_transactions( // Base query to fetch the blocks let mut command = String::from("SELECT idx,serialized_block FROM blocks WHERE idx <= :max_block_idx "); - let mut parameters: Vec<(&str, Box)> = Vec::new(); + let mut parameters: Vec<(String, rusqlite::types::Value)> = Vec::new(); - parameters.push((":max_block_idx", Box::new(start_idx))); + parameters.push(( + ":max_block_idx".to_string(), + rusqlite::types::Value::Integer(start_idx as i64), + )); if let Some(transaction_identifier) = request.transaction_identifier.clone() { command.push_str("AND tx_hash = :tx_hash "); @@ -439,46 +456,43 @@ pub fn search_transactions( })? .as_slice() .to_vec(); - parameters.push((":tx_hash", Box::new(tx_hash))); + parameters.push(( + ":tx_hash".to_string(), + rusqlite::types::Value::Blob(tx_hash), + )); } if let Some(operation_type) = operation_type { command.push_str("AND operation_type = :operation_type "); parameters.push(( - ":operation_type", - Box::new(operation_type.to_string().to_lowercase()), + ":operation_type".to_string(), + rusqlite::types::Value::Text(operation_type.to_string().to_lowercase()), )); } if let Some(account) = account { command.push_str("AND ((from_principal = :account_principal AND from_subaccount = :account_subaccount) OR (to_principal = :account_principal AND to_subaccount = :account_subaccount) OR (spender_principal = :account_principal AND spender_subaccount = :account_subaccount)) "); parameters.push(( - ":account_principal", - Box::new(account.owner.as_slice().to_vec()), + ":account_principal".to_string(), + rusqlite::types::Value::Blob(account.owner.as_slice().to_vec()), )); parameters.push(( - ":account_subaccount", - Box::new(*account.effective_subaccount()), + ":account_subaccount".to_string(), + rusqlite::types::Value::Blob(account.effective_subaccount().to_vec()), )); } command.push_str("ORDER BY idx DESC "); command.push_str("LIMIT :limit "); - parameters.push((":limit", Box::new(limit))); + parameters.push(( + ":limit".to_string(), + rusqlite::types::Value::Integer(limit as i64), + )); let mut rosetta_blocks = storage_client - .get_blocks_by_custom_query( - command, - parameters - .iter() - .map(|(key, param)| { - let param_ref: &dyn rusqlite::ToSql = param.as_ref(); - (key.to_owned(), param_ref) - }) - .collect::>() - .as_slice(), - ) + .get_blocks_by_custom_query(command, parameters) + .await .map_err(|e| Error::unable_to_find_block(&format!("Error fetching blocks: {e:?}")))?; let mut transactions = vec![]; @@ -529,27 +543,40 @@ pub fn search_transactions( }) } -pub fn initial_sync_is_completed( +pub async fn initial_sync_is_completed( storage_client: &StorageClient, - sync_state: Arc>>, + sync_state: &Mutex>, ) -> bool { - let mut synched = sync_state.lock().unwrap(); - if synched.is_some() && synched.unwrap() { - synched.unwrap() - } else { - let block_count = storage_client.get_block_count(); - let highest_index = storage_client.get_highest_block_idx_in_account_balance_table(); - *synched = Some(match (block_count, highest_index) { - // If the blockchain contains no blocks we mark it as not completed - (Ok(block_count), Ok(Some(highest_index))) if block_count == highest_index + 1 => true, - _ => false, - }); - // Unwrap is safe because it was just set - (*synched).unwrap() + // Check if already synced (without holding lock across await) + { + let synched = sync_state.lock().await; + if synched.is_some() && synched.unwrap() { + return synched.unwrap(); + } } + + // Need to check sync status - release lock before await + let block_count = storage_client.get_block_count().await; + let highest_index = storage_client + .get_highest_block_idx_in_account_balance_table() + .await; + + let is_synced = match (block_count, highest_index) { + // If the blockchain contains no blocks we mark it as not completed + (Ok(block_count), Ok(Some(highest_index))) if block_count == highest_index + 1 => true, + _ => false, + }; + + // Update cached state + { + let mut synched = sync_state.lock().await; + *synched = Some(is_synced); + } + + is_synced } -pub fn call( +pub async fn call( storage_client: &StorageClient, method_name: &str, parameters: ObjectMap, @@ -572,6 +599,7 @@ pub fn call( blocks.extend( storage_client .get_blocks_by_index_range(lowest_index, highest_index) + .await .map_err(|err| Error::unable_to_find_block(&err))? .into_iter() .map(|block| { @@ -632,262 +660,271 @@ mod test { })] #[test] fn test_network_status_service(blockchain in valid_blockchain_strategy::(BLOCKCHAIN_LENGTH)){ - let storage_client_memory = Arc::new(StorageClient::new_in_memory().unwrap()); - let mut rosetta_blocks = vec![]; - let mut added_index = 0; - for block in blockchain.clone().into_iter() { - // We only push Mint blocks since `update_account_balances` will - // complain if we e.g., transfer from an account with no balance. - if let ic_icrc1::Operation::Mint{..} = block.transaction.operation { - // Since we skip some blocks, the fee collector block index is not correct anymore. - let mut block_no_fc = block; - block_no_fc.fee_collector_block_index = None; - rosetta_blocks.push(RosettaBlock::from_generic_block(encoded_block_to_generic_block(&block_no_fc.encode()), added_index as u64).unwrap()); - added_index += 1; + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let storage_client_memory = Arc::new(StorageClient::new_in_memory().await.unwrap()); + let mut rosetta_blocks = vec![]; + let mut added_index = 0; + for block in blockchain.clone().into_iter() { + // We only push Mint blocks since `update_account_balances` will + // complain if we e.g., transfer from an account with no balance. + if let ic_icrc1::Operation::Mint{..} = block.transaction.operation { + // Since we skip some blocks, the fee collector block index is not correct anymore. + let mut block_no_fc = block; + block_no_fc.fee_collector_block_index = None; + rosetta_blocks.push(RosettaBlock::from_generic_block(encoded_block_to_generic_block(&block_no_fc.encode()), added_index as u64).unwrap()); + added_index += 1; + } } - } - // If there is no block in the database the service should return an error - let network_status_err = network_status(&storage_client_memory).unwrap_err(); - assert!(network_status_err.0.message.contains("Unable to find block")); - if !rosetta_blocks.is_empty() { - - storage_client_memory.store_blocks(rosetta_blocks).unwrap(); - storage_client_memory.update_account_balances().unwrap(); - let block_with_highest_idx = storage_client_memory.get_block_with_highest_block_idx().unwrap().unwrap(); - let genesis_block = storage_client_memory.get_block_with_lowest_block_idx().unwrap().unwrap(); - - let network_status_response = network_status(&storage_client_memory).unwrap(); - - assert_eq!(NetworkStatusResponse { - current_block_identifier: BlockIdentifier::from(block_with_highest_idx.clone()), - current_block_timestamp: convert_timestamp_to_millis(block_with_highest_idx.get_timestamp()).map_err(|err| Error::parsing_unsuccessful(&err)).unwrap(), - genesis_block_identifier: BlockIdentifier::from(genesis_block.clone()), - oldest_block_identifier: Some(BlockIdentifier::from(genesis_block)), - sync_status: None, - peers: vec![], - },network_status_response) - } + // If there is no block in the database the service should return an error + let network_status_err = network_status(&storage_client_memory).await.unwrap_err(); + assert!(network_status_err.0.message.contains("Unable to find block")); + if !rosetta_blocks.is_empty() { + + storage_client_memory.store_blocks(rosetta_blocks).await.unwrap(); + storage_client_memory.update_account_balances().await.unwrap(); + let block_with_highest_idx = storage_client_memory.get_block_with_highest_block_idx().await.unwrap().unwrap(); + let genesis_block = storage_client_memory.get_block_with_lowest_block_idx().await.unwrap().unwrap(); + + let network_status_response = network_status(&storage_client_memory).await.unwrap(); + + assert_eq!(NetworkStatusResponse { + current_block_identifier: BlockIdentifier::from(block_with_highest_idx.clone()), + current_block_timestamp: convert_timestamp_to_millis(block_with_highest_idx.get_timestamp()).map_err(|err| Error::parsing_unsuccessful(&err)).unwrap(), + genesis_block_identifier: BlockIdentifier::from(genesis_block.clone()), + oldest_block_identifier: Some(BlockIdentifier::from(genesis_block)), + sync_status: None, + peers: vec![], + },network_status_response) + } + }); } #[test] fn test_block_service(blockchain in valid_blockchain_strategy::(BLOCKCHAIN_LENGTH)){ - let storage_client_memory = Arc::new(StorageClient::new_in_memory().unwrap()); - let invalid_block_hash = "0x1234".to_string(); - let invalid_block_idx = blockchain.len() as u64 + 1; - let valid_block_idx = (blockchain.len() as u64).saturating_sub(1); - let mut rosetta_blocks = vec![]; - - for (index,block) in blockchain.clone().into_iter().enumerate(){ - rosetta_blocks.push(RosettaBlock::from_generic_block(encoded_block_to_generic_block(&block.encode()),index as u64).unwrap()); - } - - storage_client_memory.store_blocks(rosetta_blocks.clone()).unwrap(); - - let metadata = Metadata{ - symbol: "ICP".to_string(), - decimals: 8 - }; - - let mut block_identifier = PartialBlockIdentifier{ - index: Some(invalid_block_idx), - hash: None - }; - - // If the block identifier index does not exist the service should return an error - let block_res = block(&storage_client_memory,&block_identifier,metadata.decimals,metadata.symbol.clone()); - if blockchain.is_empty() { - assert!(block_res.is_err()); - } else { - assert!(block_res.unwrap_err().0.description.unwrap().contains(&format!("Block at index {invalid_block_idx} could not be found"))); - } - - block_identifier = PartialBlockIdentifier{ - index: None, - hash: Some(hex::encode(invalid_block_hash.clone())) - }; - - // If the block identifier hash does not exist the service should return an error - let block_res = block(&storage_client_memory,&block_identifier,metadata.decimals,metadata.symbol.clone()); - - if blockchain.is_empty() { - assert!(block_res.is_err()); - } else { - assert!(block_res.unwrap_err().0.description.unwrap().contains(&format!("Block with hash {} could not be found",hex::encode(invalid_block_hash.clone())))); - } - - block_identifier = PartialBlockIdentifier{ - index: None, - hash: Some(invalid_block_hash.clone()) - }; - - // If the block identifier hash is invalid the service should return an error - let block_res = block(&storage_client_memory,&block_identifier,metadata.decimals,metadata.symbol.clone()); + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let storage_client_memory = Arc::new(StorageClient::new_in_memory().await.unwrap()); + let invalid_block_hash = "0x1234".to_string(); + let invalid_block_idx = blockchain.len() as u64 + 1; + let valid_block_idx = (blockchain.len() as u64).saturating_sub(1); + let mut rosetta_blocks = vec![]; + + for (index,block) in blockchain.clone().into_iter().enumerate(){ + rosetta_blocks.push(RosettaBlock::from_generic_block(encoded_block_to_generic_block(&block.encode()),index as u64).unwrap()); + } - if blockchain.is_empty() { - assert!(block_res.is_err()); - } else { - assert!(block_res.unwrap_err().0.description.unwrap().contains("Invalid block hash provided")); - } + storage_client_memory.store_blocks(rosetta_blocks.clone()).await.unwrap(); - if !blockchain.is_empty() { - let valid_block_hash = hex::encode(rosetta_blocks[valid_block_idx as usize].clone().get_block_hash()); + let metadata = Metadata{ + symbol: "ICP".to_string(), + decimals: 8 + }; - block_identifier = PartialBlockIdentifier{ - index: Some(valid_block_idx), + let mut block_identifier = PartialBlockIdentifier{ + index: Some(invalid_block_idx), hash: None }; - // If the block identifier index is valid the service should return the block - let block_res = block(&storage_client_memory,&block_identifier,metadata.decimals,metadata.symbol.clone()).unwrap(); - let mut expected_block_res = BlockResponse { - block: Some( - icrc1_rosetta_block_to_rosetta_core_block(rosetta_blocks[valid_block_idx as usize].clone(), Currency { - symbol: metadata.symbol.clone(), - decimals: metadata.decimals.into(), - ..Default::default() - }).unwrap(), - ), - other_transactions:None}; - expected_block_res.block.iter_mut().for_each(|block| block.transactions.iter_mut().for_each(|tx| { - tx.operations.iter_mut().for_each(|op| { - op.status = Some(STATUS_COMPLETED.to_string()); - }) - })); - - compare_blocks(block_res.block.unwrap(),expected_block_res.clone().block.unwrap()); + // If the block identifier index does not exist the service should return an error + let block_res = block(&storage_client_memory,&block_identifier,metadata.decimals,metadata.symbol.clone()).await; + if blockchain.is_empty() { + assert!(block_res.is_err()); + } else { + assert!(block_res.unwrap_err().0.description.unwrap().contains(&format!("Block at index {invalid_block_idx} could not be found"))); + } block_identifier = PartialBlockIdentifier{ index: None, - hash: Some(valid_block_hash.clone()) + hash: Some(hex::encode(invalid_block_hash.clone())) }; - // If the block identifier hash is valid the service should return the block - let block_res = block(&storage_client_memory,&block_identifier,metadata.decimals,metadata.symbol.clone()).unwrap(); - compare_blocks(block_res.block.unwrap(),expected_block_res.clone().block.unwrap()); - - block_identifier = PartialBlockIdentifier{ - index: Some(valid_block_idx), - hash: Some(invalid_block_hash.clone()) - }; + // If the block identifier hash does not exist the service should return an error + let block_res = block(&storage_client_memory,&block_identifier,metadata.decimals,metadata.symbol.clone()).await; - // If the block identifier index and hash are provided but do not match the same block the service should return an error - let block_res = block(&storage_client_memory,&block_identifier,metadata.decimals,metadata.symbol.clone()); - assert!(block_res.unwrap_err().0.description.unwrap().contains(format!("Both index {} and hash {} were provided but they do not match the same block",valid_block_idx.clone(),invalid_block_hash.clone()).as_str())); + if blockchain.is_empty() { + assert!(block_res.is_err()); + } else { + assert!(block_res.unwrap_err().0.description.unwrap().contains(&format!("Block with hash {} could not be found",hex::encode(invalid_block_hash.clone())))); + } block_identifier = PartialBlockIdentifier{ - index: Some(invalid_block_idx), + index: None, hash: Some(invalid_block_hash.clone()) }; - // If the block identifier index and hash are provided but neither of them match a block the service should return an error - let block_res = block(&storage_client_memory,&block_identifier,metadata.decimals,metadata.symbol.clone()); - assert!(block_res.unwrap_err().0.description.unwrap().contains(&format!("Block at index {} could not be found",invalid_block_idx.clone()))); - - block_identifier = PartialBlockIdentifier{ - index: Some(invalid_block_idx), - hash: Some(valid_block_hash.clone()) - }; + // If the block identifier hash is invalid the service should return an error + let block_res = block(&storage_client_memory,&block_identifier,metadata.decimals,metadata.symbol.clone()).await; - // If the block identifier index is invalid and the hash is valid the service should return an error - let block_res = block(&storage_client_memory,&block_identifier,metadata.decimals,metadata.symbol.clone()); - assert!(block_res.unwrap_err().0.description.unwrap().contains(format!("Block at index {invalid_block_idx} could not be found").as_str())); + if blockchain.is_empty() { + assert!(block_res.is_err()); + } else { + assert!(block_res.unwrap_err().0.description.unwrap().contains("Invalid block hash provided")); + } - block_identifier = PartialBlockIdentifier{ - index: None, - hash: None - }; - // If neither block index nor hash is provided, the service should return the last block - let block_res = block(&storage_client_memory,&block_identifier,metadata.decimals,metadata.symbol.clone()).unwrap(); - compare_blocks(block_res.block.unwrap(),expected_block_res.block.unwrap()); - } + if !blockchain.is_empty() { + let valid_block_hash = hex::encode(rosetta_blocks[valid_block_idx as usize].clone().get_block_hash()); + + block_identifier = PartialBlockIdentifier{ + index: Some(valid_block_idx), + hash: None + }; + + // If the block identifier index is valid the service should return the block + let block_res = block(&storage_client_memory,&block_identifier,metadata.decimals,metadata.symbol.clone()).await.unwrap(); + let mut expected_block_res = BlockResponse { + block: Some( + icrc1_rosetta_block_to_rosetta_core_block(rosetta_blocks[valid_block_idx as usize].clone(), Currency { + symbol: metadata.symbol.clone(), + decimals: metadata.decimals.into(), + ..Default::default() + }).unwrap(), + ), + other_transactions:None}; + expected_block_res.block.iter_mut().for_each(|block| block.transactions.iter_mut().for_each(|tx| { + tx.operations.iter_mut().for_each(|op| { + op.status = Some(STATUS_COMPLETED.to_string()); + }) + })); + + compare_blocks(block_res.block.unwrap(),expected_block_res.clone().block.unwrap()); + + block_identifier = PartialBlockIdentifier{ + index: None, + hash: Some(valid_block_hash.clone()) + }; + + // If the block identifier hash is valid the service should return the block + let block_res = block(&storage_client_memory,&block_identifier,metadata.decimals,metadata.symbol.clone()).await.unwrap(); + compare_blocks(block_res.block.unwrap(),expected_block_res.clone().block.unwrap()); + + block_identifier = PartialBlockIdentifier{ + index: Some(valid_block_idx), + hash: Some(invalid_block_hash.clone()) + }; + + // If the block identifier index and hash are provided but do not match the same block the service should return an error + let block_res = block(&storage_client_memory,&block_identifier,metadata.decimals,metadata.symbol.clone()).await; + assert!(block_res.unwrap_err().0.description.unwrap().contains(format!("Both index {} and hash {} were provided but they do not match the same block",valid_block_idx.clone(),invalid_block_hash.clone()).as_str())); + + block_identifier = PartialBlockIdentifier{ + index: Some(invalid_block_idx), + hash: Some(invalid_block_hash.clone()) + }; + + // If the block identifier index and hash are provided but neither of them match a block the service should return an error + let block_res = block(&storage_client_memory,&block_identifier,metadata.decimals,metadata.symbol.clone()).await; + assert!(block_res.unwrap_err().0.description.unwrap().contains(&format!("Block at index {} could not be found",invalid_block_idx.clone()))); + + block_identifier = PartialBlockIdentifier{ + index: Some(invalid_block_idx), + hash: Some(valid_block_hash.clone()) + }; + + // If the block identifier index is invalid and the hash is valid the service should return an error + let block_res = block(&storage_client_memory,&block_identifier,metadata.decimals,metadata.symbol.clone()).await; + assert!(block_res.unwrap_err().0.description.unwrap().contains(format!("Block at index {invalid_block_idx} could not be found").as_str())); + + block_identifier = PartialBlockIdentifier{ + index: None, + hash: None + }; + // If neither block index nor hash is provided, the service should return the last block + let block_res = block(&storage_client_memory,&block_identifier,metadata.decimals,metadata.symbol.clone()).await.unwrap(); + compare_blocks(block_res.block.unwrap(),expected_block_res.block.unwrap()); + } + }); } #[test] fn test_block_transaction_service(blockchain in valid_blockchain_strategy::((MAX_TRANSACTIONS_PER_SEARCH_TRANSACTIONS_REQUEST*5).try_into().unwrap())){ - let storage_client_memory = Arc::new(StorageClient::new_in_memory().unwrap()); - let invalid_block_hash = "0x1234".to_string(); - let invalid_block_idx = blockchain.len() as u64 + 1; - let valid_block_idx = (blockchain.len() as u64).saturating_sub(1); - let mut rosetta_blocks = vec![]; - - for (index,block) in blockchain.clone().into_iter().enumerate(){ - rosetta_blocks.push(RosettaBlock::from_generic_block(encoded_block_to_generic_block(&block.encode()),index as u64).unwrap()); - } + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let storage_client_memory = Arc::new(StorageClient::new_in_memory().await.unwrap()); + let invalid_block_hash = "0x1234".to_string(); + let invalid_block_idx = blockchain.len() as u64 + 1; + let valid_block_idx = (blockchain.len() as u64).saturating_sub(1); + let mut rosetta_blocks = vec![]; - let metadata = Metadata{ - symbol: "ICP".to_string(), - decimals: 8 - }; + for (index,block) in blockchain.clone().into_iter().enumerate(){ + rosetta_blocks.push(RosettaBlock::from_generic_block(encoded_block_to_generic_block(&block.encode()),index as u64).unwrap()); + } - let mut block_identifier = BlockIdentifier{ - index: invalid_block_idx, - hash: invalid_block_hash.clone() - }; + let metadata = Metadata{ + symbol: "ICP".to_string(), + decimals: 8 + }; - let mut transaction_identifier = TransactionIdentifier{ - hash: invalid_block_hash.clone() - }; + let mut block_identifier = BlockIdentifier{ + index: invalid_block_idx, + hash: invalid_block_hash.clone() + }; - // If the storage is empty the service should return an error - let block_transaction_res = block_transaction(&storage_client_memory,&block_identifier,&transaction_identifier,metadata.decimals,metadata.symbol.clone()); - assert!(block_transaction_res.is_err()); + let mut transaction_identifier = TransactionIdentifier{ + hash: invalid_block_hash.clone() + }; - storage_client_memory.store_blocks(rosetta_blocks.clone()).unwrap(); + // If the storage is empty the service should return an error + let block_transaction_res = block_transaction(&storage_client_memory,&block_identifier,&transaction_identifier,metadata.decimals,metadata.symbol.clone()).await; + assert!(block_transaction_res.is_err()); - // If the block identifier index is invalid the service should return an error - let block_transaction_res = block_transaction(&storage_client_memory,&block_identifier,&transaction_identifier,metadata.decimals,metadata.symbol.clone()); + storage_client_memory.store_blocks(rosetta_blocks.clone()).await.unwrap(); - if blockchain.is_empty() { - assert!(block_transaction_res.is_err()); - } else { - assert!(block_transaction_res.unwrap_err().0.description.unwrap().contains(&format!("Block at index {invalid_block_idx} could not be found"))); - } + // If the block identifier index is invalid the service should return an error + let block_transaction_res = block_transaction(&storage_client_memory,&block_identifier,&transaction_identifier,metadata.decimals,metadata.symbol.clone()).await; - if !blockchain.is_empty() { - let valid_block_hash = hex::encode(rosetta_blocks[valid_block_idx as usize].clone().get_block_hash()); - let valid_tx_hash = hex::encode(rosetta_blocks[valid_block_idx as usize].clone().get_transaction_hash().as_ref()); + if blockchain.is_empty() { + assert!(block_transaction_res.is_err()); + } else { + assert!(block_transaction_res.unwrap_err().0.description.unwrap().contains(&format!("Block at index {invalid_block_idx} could not be found"))); + } - block_identifier = BlockIdentifier{ - index: valid_block_idx, - hash: valid_block_hash.clone() - }; + if !blockchain.is_empty() { + let valid_block_hash = hex::encode(rosetta_blocks[valid_block_idx as usize].clone().get_block_hash()); + let valid_tx_hash = hex::encode(rosetta_blocks[valid_block_idx as usize].clone().get_transaction_hash().as_ref()); - transaction_identifier = TransactionIdentifier{ - hash: valid_tx_hash.clone() - }; + block_identifier = BlockIdentifier{ + index: valid_block_idx, + hash: valid_block_hash.clone() + }; - // If the block identifier index and hash are valid the service should return the block - let block_transaction_res = block_transaction(&storage_client_memory,&block_identifier,&transaction_identifier,metadata.decimals,metadata.symbol.clone()).unwrap(); - let mut expected_block_transaction_res = rosetta_core::response_types::BlockTransactionResponse { transaction: icrc1_rosetta_block_to_rosetta_core_transaction(rosetta_blocks[valid_block_idx as usize].clone(), Currency { - symbol: metadata.symbol.clone(), - decimals: metadata.decimals.into(), - ..Default::default() - }).unwrap() }; - expected_block_transaction_res.transaction.operations.iter_mut().for_each(|op| { - op.status = Some(STATUS_COMPLETED.to_string()); - }); + transaction_identifier = TransactionIdentifier{ + hash: valid_tx_hash.clone() + }; - // Sort the related operations so the equality check passes - assert_eq!(block_transaction_res.transaction, expected_block_transaction_res.transaction); + // If the block identifier index and hash are valid the service should return the block + let block_transaction_res = block_transaction(&storage_client_memory,&block_identifier,&transaction_identifier,metadata.decimals,metadata.symbol.clone()).await.unwrap(); + let mut expected_block_transaction_res = rosetta_core::response_types::BlockTransactionResponse { transaction: icrc1_rosetta_block_to_rosetta_core_transaction(rosetta_blocks[valid_block_idx as usize].clone(), Currency { + symbol: metadata.symbol.clone(), + decimals: metadata.decimals.into(), + ..Default::default() + }).unwrap() }; + expected_block_transaction_res.transaction.operations.iter_mut().for_each(|op| { + op.status = Some(STATUS_COMPLETED.to_string()); + }); - transaction_identifier = TransactionIdentifier{ - hash: invalid_block_hash.clone() - }; + // Sort the related operations so the equality check passes + assert_eq!(block_transaction_res.transaction, expected_block_transaction_res.transaction); - // If the transaction identifier hash does not match a transaction in the block the service should return an error - let block_transaction_res = block_transaction(&storage_client_memory,&block_identifier,&transaction_identifier,metadata.decimals,metadata.symbol.clone()); - assert!(block_transaction_res.unwrap_err().0.description.unwrap().contains("Invalid transaction identifier provided")); + transaction_identifier = TransactionIdentifier{ + hash: invalid_block_hash.clone() + }; - block_identifier = BlockIdentifier{ - index: valid_block_idx, - hash: invalid_block_hash.clone() - }; + // If the transaction identifier hash does not match a transaction in the block the service should return an error + let block_transaction_res = block_transaction(&storage_client_memory,&block_identifier,&transaction_identifier,metadata.decimals,metadata.symbol.clone()).await; + assert!(block_transaction_res.unwrap_err().0.description.unwrap().contains("Invalid transaction identifier provided")); - // If the block identifier hash is invalid the service should return an error - let block_transaction_res = block_transaction(&storage_client_memory,&block_identifier,&transaction_identifier,metadata.decimals,metadata.symbol.clone()); - assert!(block_transaction_res.unwrap_err().0.description.unwrap().contains(format!("Both index {} and hash {} were provided but they do not match the same block",valid_block_idx.clone(),invalid_block_hash.clone()).as_str())); - } + block_identifier = BlockIdentifier{ + index: valid_block_idx, + hash: invalid_block_hash.clone() + }; + + // If the block identifier hash is invalid the service should return an error + let block_transaction_res = block_transaction(&storage_client_memory,&block_identifier,&transaction_identifier,metadata.decimals,metadata.symbol.clone()).await; + assert!(block_transaction_res.unwrap_err().0.description.unwrap().contains(format!("Both index {} and hash {} were provided but they do not match the same block",valid_block_idx.clone(),invalid_block_hash.clone()).as_str())); + } + }); } } @@ -903,381 +940,405 @@ mod test { .run( &(valid_blockchain_strategy::(BLOCKCHAIN_LENGTH).no_shrink()), |blockchain| { - let storage_client_memory = StorageClient::new_in_memory().unwrap(); - let mut rosetta_blocks = vec![]; + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let storage_client_memory = StorageClient::new_in_memory().await.unwrap(); + let mut rosetta_blocks = vec![]; - for (index, block) in blockchain.clone().into_iter().enumerate() { - rosetta_blocks.push( - RosettaBlock::from_generic_block( - encoded_block_to_generic_block(&block.encode()), - index as u64, - ) - .unwrap(), - ); - } + for (index, block) in blockchain.clone().into_iter().enumerate() { + rosetta_blocks.push( + RosettaBlock::from_generic_block( + encoded_block_to_generic_block(&block.encode()), + index as u64, + ) + .unwrap(), + ); + } - storage_client_memory - .store_blocks(rosetta_blocks.clone()) - .unwrap(); - let mut search_transactions_request = SearchTransactionsRequest { - ..Default::default() - }; + storage_client_memory + .store_blocks(rosetta_blocks.clone()) + .await + .unwrap(); + let mut search_transactions_request = SearchTransactionsRequest { + ..Default::default() + }; - fn traverse_all_transactions( - storage_client: &StorageClient, - mut search_transactions_request: SearchTransactionsRequest, - ) -> Vec { - let mut transactions = vec![]; - loop { + async fn traverse_all_transactions( + storage_client: &StorageClient, + mut search_transactions_request: SearchTransactionsRequest, + ) -> Vec { + let mut transactions = vec![]; + loop { + let result = search_transactions( + storage_client, + search_transactions_request.clone(), + "ICP".to_string(), + 8, + ) + .await + .unwrap(); + transactions.extend(result.clone().transactions); + search_transactions_request.offset = result.next_offset; + + if search_transactions_request.offset.is_none() { + break; + } + } + + transactions + } + + if !blockchain.is_empty() { + // The maximum number of transactions that can be returned is the minimum between the maximum number of transactions per request or the entire blockchain + let maximum_number_returnable_transactions = rosetta_blocks + .len() + .min(MAX_TRANSACTIONS_PER_SEARCH_TRANSACTIONS_REQUEST as usize); + + // If no filters are provided the service should return all transactions or the maximum of transactions per request let result = search_transactions( - storage_client, + &storage_client_memory, search_transactions_request.clone(), "ICP".to_string(), 8, ) + .await .unwrap(); - transactions.extend(result.clone().transactions); - search_transactions_request.offset = result.next_offset; + assert_eq!( + result.total_count, + maximum_number_returnable_transactions as i64 + ); + assert_eq!(result.transactions.len() as i64, result.total_count); + + // We traverse through all the blocks and check if the transactions are returned correctly if the transaction identifier is provided + for rosetta_block in rosetta_blocks.iter() { + search_transactions_request.transaction_identifier = + Some(rosetta_block.clone().get_transaction_identifier()); + let result = search_transactions( + &storage_client_memory, + search_transactions_request.clone(), + "ICP".to_string(), + 8, + ) + .await + .unwrap(); - if search_transactions_request.offset.is_none() { - break; + let num_of_transactions_with_hash = rosetta_blocks + .iter() + .filter(|block| { + (*block).clone().get_transaction_hash() + == rosetta_block.clone().get_transaction_hash() + }) + .count(); + + // The total count should be the number of transactions with the same transaction identifier + assert_eq!( + result.total_count, + num_of_transactions_with_hash as i64 + ); + // If we provide a transaction identifier the service should return the transactions that match the transaction identifier + let mut expected_transaction = + icrc1_rosetta_block_to_rosetta_core_transaction( + rosetta_block.clone(), + Currency { + symbol: "ICP".to_string(), + decimals: 8, + metadata: None, + }, + ) + .unwrap(); + expected_transaction.operations.iter_mut().for_each(|op| { + op.status = Some(STATUS_COMPLETED.to_string()); + }); + assert_eq!( + result.transactions[0].transaction, + expected_transaction + ); + // If the transaction identifier is provided the next offset should be None + assert_eq!(result.next_offset, None); } - } - - transactions - } - - if !blockchain.is_empty() { - // The maximum number of transactions that can be returned is the minimum between the maximum number of transactions per request or the entire blockchain - let maximum_number_returnable_transactions = rosetta_blocks - .len() - .min(MAX_TRANSACTIONS_PER_SEARCH_TRANSACTIONS_REQUEST as usize); - // If no filters are provided the service should return all transactions or the maximum of transactions per request - let result = search_transactions( - &storage_client_memory, - search_transactions_request.clone(), - "ICP".to_string(), - 8, - ) - .unwrap(); - assert_eq!( - result.total_count, - maximum_number_returnable_transactions as i64 - ); - assert_eq!(result.transactions.len() as i64, result.total_count); + search_transactions_request = SearchTransactionsRequest { + ..Default::default() + }; - // We traverse through all the blocks and check if the transactions are returned correctly if the transaction identifier is provided - for rosetta_block in rosetta_blocks.iter() { - search_transactions_request.transaction_identifier = - Some(rosetta_block.clone().get_transaction_identifier()); + // Let's check that setting the max_block option works as intended + search_transactions_request.max_block = + Some(rosetta_blocks.last().unwrap().index as i64); let result = search_transactions( &storage_client_memory, search_transactions_request.clone(), "ICP".to_string(), 8, ) + .await .unwrap(); + assert_eq!( + result.transactions.len(), + maximum_number_returnable_transactions + ); - let num_of_transactions_with_hash = rosetta_blocks - .iter() - .filter(|block| { - (*block).clone().get_transaction_hash() - == rosetta_block.clone().get_transaction_hash() - }) - .count(); - - // The total count should be the number of transactions with the same transaction identifier - assert_eq!(result.total_count, num_of_transactions_with_hash as i64); - // If we provide a transaction identifier the service should return the transactions that match the transaction identifier - let mut expected_transaction = - icrc1_rosetta_block_to_rosetta_core_transaction( - rosetta_block.clone(), - Currency { - symbol: "ICP".to_string(), - decimals: 8, - metadata: None, - }, - ) - .unwrap(); - expected_transaction.operations.iter_mut().for_each(|op| { - op.status = Some(STATUS_COMPLETED.to_string()); - }); - assert_eq!(result.transactions[0].transaction, expected_transaction); - // If the transaction identifier is provided the next offset should be None - assert_eq!(result.next_offset, None); - } + // The transactiosn should be returned in descending order of block index + assert_eq!( + result.transactions.first().unwrap().block_identifier, + rosetta_blocks + .last() + .unwrap() + .clone() + .get_block_identifier() + ); - search_transactions_request = SearchTransactionsRequest { - ..Default::default() - }; + // If we set the limit to something below the maximum number of blocks we should only receive that number of blocks + search_transactions_request.max_block = None; + search_transactions_request.limit = Some(1); + let result = search_transactions( + &storage_client_memory, + search_transactions_request.clone(), + "ICP".to_string(), + 8, + ) + .await + .unwrap(); + assert_eq!(result.transactions.len(), 1); - // Let's check that setting the max_block option works as intended - search_transactions_request.max_block = - Some(rosetta_blocks.last().unwrap().index as i64); - let result = search_transactions( - &storage_client_memory, - search_transactions_request.clone(), - "ICP".to_string(), - 8, - ) - .unwrap(); - assert_eq!( - result.transactions.len(), - maximum_number_returnable_transactions - ); + // The expected offset is the index of the highest block fetched minus the limit + let expected_offset = 1; + assert_eq!( + result.next_offset, + if rosetta_blocks.len() > 1 { + Some(expected_offset) + } else { + None + } + ); - // The transactiosn should be returned in descending order of block index - assert_eq!( - result.transactions.first().unwrap().block_identifier, - rosetta_blocks - .last() - .unwrap() - .clone() - .get_block_identifier() - ); + search_transactions_request.limit = None; - // If we set the limit to something below the maximum number of blocks we should only receive that number of blocks - search_transactions_request.max_block = None; - search_transactions_request.limit = Some(1); - let result = search_transactions( - &storage_client_memory, - search_transactions_request.clone(), - "ICP".to_string(), - 8, - ) - .unwrap(); - assert_eq!(result.transactions.len(), 1); + // Setting the offset to greater than 0 only makes sense if the storage contains more than 1 block + search_transactions_request.offset = + Some(rosetta_blocks.len().saturating_sub(1).min(1) as i64); - // The expected offset is the index of the highest block fetched minus the limit - let expected_offset = 1; - assert_eq!( - result.next_offset, - if rosetta_blocks.len() > 1 { - Some(expected_offset) - } else { - None - } - ); + let result = search_transactions( + &storage_client_memory, + search_transactions_request.clone(), + "ICP".to_string(), + 8, + ) + .await + .unwrap(); + assert_eq!( + result.transactions.len(), + if rosetta_blocks.len() == 1 { + 1 + } else { + rosetta_blocks.len().saturating_sub(1) + } + .min(MAX_TRANSACTIONS_PER_SEARCH_TRANSACTIONS_REQUEST as usize) + ); - search_transactions_request.limit = None; + search_transactions_request.offset = None; + search_transactions_request.max_block = Some(10); + let result = traverse_all_transactions( + &storage_client_memory, + search_transactions_request.clone(), + ) + .await; - // Setting the offset to greater than 0 only makes sense if the storage contains more than 1 block - search_transactions_request.offset = - Some(rosetta_blocks.len().saturating_sub(1).min(1) as i64); + // The service should return the correct number of transactions if the max block is set, max block is an index so if the index is 10 there are 11 blocks/transactions to search through + assert_eq!(result.len(), rosetta_blocks.len().min(10 + 1)); - let result = search_transactions( - &storage_client_memory, - search_transactions_request.clone(), - "ICP".to_string(), - 8, - ) - .unwrap(); - assert_eq!( - result.transactions.len(), - if rosetta_blocks.len() == 1 { - 1 - } else { - rosetta_blocks.len().saturating_sub(1) - } - .min(MAX_TRANSACTIONS_PER_SEARCH_TRANSACTIONS_REQUEST as usize) - ); + search_transactions_request = SearchTransactionsRequest { + ..Default::default() + }; - search_transactions_request.offset = None; - search_transactions_request.max_block = Some(10); - let result = traverse_all_transactions( - &storage_client_memory, - search_transactions_request.clone(), - ); + // We make sure that the service returns the correct number of transactions for each operation type + search_transactions_request.type_ = Some("TRANSFER".to_string()); + let num_of_transfer_transactions = rosetta_blocks + .iter() + .filter(|block| { + matches!( + block.block.transaction.operation, + IcrcOperation::Transfer { .. } + ) + }) + .count(); + let result = traverse_all_transactions( + &storage_client_memory, + search_transactions_request.clone(), + ) + .await; + assert_eq!(result.len(), num_of_transfer_transactions); - // The service should return the correct number of transactions if the max block is set, max block is an index so if the index is 10 there are 11 blocks/transactions to search through - assert_eq!(result.len(), rosetta_blocks.len().min(10 + 1)); + search_transactions_request.type_ = Some("BURN".to_string()); + let num_of_burn_transactions = rosetta_blocks + .iter() + .filter(|block| { + matches!( + block.block.transaction.operation, + IcrcOperation::Burn { .. } + ) + }) + .count(); + let result = traverse_all_transactions( + &storage_client_memory, + search_transactions_request.clone(), + ) + .await; + assert_eq!(result.len(), num_of_burn_transactions); - search_transactions_request = SearchTransactionsRequest { - ..Default::default() - }; + search_transactions_request.type_ = Some("MINT".to_string()); + let num_of_mint_transactions = rosetta_blocks + .iter() + .filter(|block| { + matches!( + block.block.transaction.operation, + IcrcOperation::Mint { .. } + ) + }) + .count(); + let result = traverse_all_transactions( + &storage_client_memory, + search_transactions_request.clone(), + ) + .await; + assert_eq!(result.len(), num_of_mint_transactions); - // We make sure that the service returns the correct number of transactions for each operation type - search_transactions_request.type_ = Some("TRANSFER".to_string()); - let num_of_transfer_transactions = rosetta_blocks - .iter() - .filter(|block| { - matches!( - block.block.transaction.operation, - IcrcOperation::Transfer { .. } - ) - }) - .count(); - let result = traverse_all_transactions( - &storage_client_memory, - search_transactions_request.clone(), - ); - assert_eq!(result.len(), num_of_transfer_transactions); - - search_transactions_request.type_ = Some("BURN".to_string()); - let num_of_burn_transactions = rosetta_blocks - .iter() - .filter(|block| { - matches!( - block.block.transaction.operation, - IcrcOperation::Burn { .. } - ) - }) - .count(); - let result = traverse_all_transactions( - &storage_client_memory, - search_transactions_request.clone(), - ); - assert_eq!(result.len(), num_of_burn_transactions); - - search_transactions_request.type_ = Some("MINT".to_string()); - let num_of_mint_transactions = rosetta_blocks - .iter() - .filter(|block| { - matches!( - block.block.transaction.operation, - IcrcOperation::Mint { .. } - ) - }) - .count(); - let result = traverse_all_transactions( - &storage_client_memory, - search_transactions_request.clone(), - ); - assert_eq!(result.len(), num_of_mint_transactions); - - search_transactions_request.type_ = Some("APPROVE".to_string()); - let num_of_approve_transactions = rosetta_blocks - .iter() - .filter(|block| { - matches!( - block.block.transaction.operation, - IcrcOperation::Approve { .. } - ) - }) - .count(); - let result = traverse_all_transactions( - &storage_client_memory, - search_transactions_request.clone(), - ); - assert_eq!(result.len(), num_of_approve_transactions); + search_transactions_request.type_ = Some("APPROVE".to_string()); + let num_of_approve_transactions = rosetta_blocks + .iter() + .filter(|block| { + matches!( + block.block.transaction.operation, + IcrcOperation::Approve { .. } + ) + }) + .count(); + let result = traverse_all_transactions( + &storage_client_memory, + search_transactions_request.clone(), + ) + .await; + assert_eq!(result.len(), num_of_approve_transactions); - search_transactions_request = SearchTransactionsRequest { - ..Default::default() - }; + search_transactions_request = SearchTransactionsRequest { + ..Default::default() + }; - // We make sure that the service returns the correct number of transactions for each account - search_transactions_request.account_identifier = Some( - match rosetta_blocks[0].block.transaction.operation { - IcrcOperation::Transfer { from, .. } => from, - IcrcOperation::Mint { to, .. } => to, - IcrcOperation::Burn { from, .. } => from, - IcrcOperation::Approve { from, .. } => from, - } - .into(), - ); + // We make sure that the service returns the correct number of transactions for each account + search_transactions_request.account_identifier = Some( + match rosetta_blocks[0].block.transaction.operation { + IcrcOperation::Transfer { from, .. } => from, + IcrcOperation::Mint { to, .. } => to, + IcrcOperation::Burn { from, .. } => from, + IcrcOperation::Approve { from, .. } => from, + } + .into(), + ); - let num_of_transactions_with_account = rosetta_blocks - .iter() - .filter(|block| match block.block.transaction.operation { - IcrcOperation::Transfer { - from, to, spender, .. - } => spender - .map_or(vec![from, to], |spender| vec![from, to, spender]) - .contains( - &search_transactions_request + let num_of_transactions_with_account = rosetta_blocks + .iter() + .filter(|block| match block.block.transaction.operation { + IcrcOperation::Transfer { + from, to, spender, .. + } => spender + .map_or(vec![from, to], |spender| vec![from, to, spender]) + .contains( + &search_transactions_request + .account_identifier + .clone() + .unwrap() + .try_into() + .unwrap(), + ), + IcrcOperation::Mint { to, .. } => { + to == search_transactions_request .account_identifier .clone() .unwrap() .try_into() - .unwrap(), - ), - IcrcOperation::Mint { to, .. } => { - to == search_transactions_request + .unwrap() + } + IcrcOperation::Burn { from, spender, .. } => spender + .map_or(vec![from], |spender| vec![from, spender]) + .contains( + &search_transactions_request + .account_identifier + .clone() + .unwrap() + .try_into() + .unwrap(), + ), + IcrcOperation::Approve { from, spender, .. } => [from, spender] + .contains( + &search_transactions_request + .account_identifier + .clone() + .unwrap() + .try_into() + .unwrap(), + ), + }) + .count(); + + let result = traverse_all_transactions( + &storage_client_memory, + search_transactions_request.clone(), + ) + .await; + assert_eq!(result.len(), num_of_transactions_with_account); + let involved_accounts = result[0] + .transaction + .operations + .iter() + .map(|op| op.account.clone().unwrap()) + .collect::>(); + assert!( + involved_accounts.contains( + &search_transactions_request .account_identifier .clone() .unwrap() - .try_into() - .unwrap() - } - IcrcOperation::Burn { from, spender, .. } => spender - .map_or(vec![from], |spender| vec![from, spender]) - .contains( - &search_transactions_request - .account_identifier - .clone() - .unwrap() - .try_into() - .unwrap(), - ), - IcrcOperation::Approve { from, spender, .. } => [from, spender] - .contains( - &search_transactions_request - .account_identifier - .clone() - .unwrap() - .try_into() - .unwrap(), - ), - }) - .count(); + ) + ); - let result = traverse_all_transactions( - &storage_client_memory, - search_transactions_request.clone(), - ); - assert_eq!(result.len(), num_of_transactions_with_account); - let involved_accounts = result[0] - .transaction - .operations - .iter() - .map(|op| op.account.clone().unwrap()) - .collect::>(); - assert!( - involved_accounts.contains( - &search_transactions_request - .account_identifier - .clone() - .unwrap() + search_transactions_request.account_identifier = Some( + Account { + owner: ic_base_types::PrincipalId::new_anonymous().into(), + subaccount: Some([9; 32]), + } + .into(), + ); + let result = traverse_all_transactions( + &storage_client_memory, + search_transactions_request.clone(), ) - ); + .await; + // If the account does not exist the service should return an empty list + assert_eq!(result.len(), 0); - search_transactions_request.account_identifier = Some( - Account { - owner: ic_base_types::PrincipalId::new_anonymous().into(), - subaccount: Some([9; 32]), - } - .into(), - ); - let result = traverse_all_transactions( - &storage_client_memory, - search_transactions_request.clone(), - ); - // If the account does not exist the service should return an empty list - assert_eq!(result.len(), 0); - - search_transactions_request = SearchTransactionsRequest { - ..Default::default() - }; + search_transactions_request = SearchTransactionsRequest { + ..Default::default() + }; - search_transactions_request.type_ = Some("INVALID_OPS".to_string()); - let result = search_transactions( - &storage_client_memory, - search_transactions_request.clone(), - "ICP".to_string(), - 8, - ); - assert!(result.is_err()); - } + search_transactions_request.type_ = Some("INVALID_OPS".to_string()); + let result = search_transactions( + &storage_client_memory, + search_transactions_request.clone(), + "ICP".to_string(), + 8, + ) + .await; + assert!(result.is_err()); + } + }); Ok(()) }, ) .unwrap() } - #[test] - fn test_fetch_block_from_empty_blockchain() { - let storage_client_memory = Arc::new(StorageClient::new_in_memory().unwrap()); + #[tokio::test] + async fn test_fetch_block_from_empty_blockchain() { + let storage_client_memory = Arc::new(StorageClient::new_in_memory().await.unwrap()); let metadata = Metadata { symbol: "ICP".to_string(), @@ -1294,7 +1355,8 @@ mod test { &block_identifier, metadata.decimals, metadata.symbol.clone(), - ); + ) + .await; assert!(block_res.is_err()); let block_identifier = PartialBlockIdentifier { @@ -1306,7 +1368,8 @@ mod test { &block_identifier, metadata.decimals, metadata.symbol.clone(), - ); + ) + .await; assert!(block_res.is_err()); let block_identifier = PartialBlockIdentifier { @@ -1318,7 +1381,8 @@ mod test { &block_identifier, metadata.decimals, metadata.symbol.clone(), - ); + ) + .await; assert!(block_res.is_err()); let block_identifier = PartialBlockIdentifier { @@ -1330,7 +1394,8 @@ mod test { &block_identifier, metadata.decimals, metadata.symbol.clone(), - ); + ) + .await; assert!(block_res.is_err()); } @@ -1346,162 +1411,174 @@ mod test { .run( &(valid_blockchain_strategy::(BLOCKCHAIN_LENGTH * 25).no_shrink()), |blockchain| { - let storage_client_memory = StorageClient::new_in_memory().unwrap(); - let mut rosetta_blocks = vec![]; + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let storage_client_memory = StorageClient::new_in_memory().await.unwrap(); + let mut rosetta_blocks = vec![]; - let currency = Currency::new("ICP".to_string(), 8); + let currency = Currency::new("ICP".to_string(), 8); - // Call on an empty database - let response: QueryBlockRangeResponse = call( - &storage_client_memory, - "query_block_range", - ObjectMap::try_from(QueryBlockRangeRequest { - highest_block_index: 100, - number_of_blocks: 10, - }) - .unwrap(), - currency.clone(), - ) - .unwrap() - .result - .try_into() - .unwrap(); - assert!(response.blocks.is_empty()); - - for (index, block) in blockchain.clone().into_iter().enumerate() { - rosetta_blocks.push( - RosettaBlock::from_generic_block( - encoded_block_to_generic_block(&block.encode()), - index as u64, - ) + // Call on an empty database + let response: QueryBlockRangeResponse = call( + &storage_client_memory, + "query_block_range", + ObjectMap::try_from(QueryBlockRangeRequest { + highest_block_index: 100, + number_of_blocks: 10, + }) .unwrap(), - ); - } + currency.clone(), + ) + .await + .unwrap() + .result + .try_into() + .unwrap(); + assert!(response.blocks.is_empty()); + + for (index, block) in blockchain.clone().into_iter().enumerate() { + rosetta_blocks.push( + RosettaBlock::from_generic_block( + encoded_block_to_generic_block(&block.encode()), + index as u64, + ) + .unwrap(), + ); + } - storage_client_memory - .store_blocks(rosetta_blocks.clone()) + storage_client_memory + .store_blocks(rosetta_blocks.clone()) + .await + .unwrap(); + let highest_block_index = rosetta_blocks.len().saturating_sub(1) as u64; + // Call with 0 numbers of blocks + let response: QueryBlockRangeResponse = call( + &storage_client_memory, + "query_block_range", + ObjectMap::try_from(QueryBlockRangeRequest { + highest_block_index, + number_of_blocks: 0, + }) + .unwrap(), + currency.clone(), + ) + .await + .unwrap() + .result + .try_into() .unwrap(); - let highest_block_index = rosetta_blocks.len().saturating_sub(1) as u64; - // Call with 0 numbers of blocks - let response: QueryBlockRangeResponse = call( - &storage_client_memory, - "query_block_range", - ObjectMap::try_from(QueryBlockRangeRequest { - highest_block_index, - number_of_blocks: 0, - }) - .unwrap(), - currency.clone(), - ) - .unwrap() - .result - .try_into() - .unwrap(); - assert!(response.blocks.is_empty()); - - // Call with higher index than there are blocks in the database - let response = call( - &storage_client_memory, - "query_block_range", - ObjectMap::try_from(QueryBlockRangeRequest { - highest_block_index: (rosetta_blocks.len() * 2) as u64, - number_of_blocks: std::cmp::max( - rosetta_blocks.len() as u64, - MAX_BLOCKS_PER_QUERY_BLOCK_RANGE_REQUEST, - ), - }) - .unwrap(), - currency.clone(), - ) - .unwrap(); - let query_block_response: QueryBlockRangeResponse = - response.result.try_into().unwrap(); - // If the blocks measured from the highest block index asked for are not in the database the service should return an empty array of blocks - if rosetta_blocks.len() >= MAX_BLOCKS_PER_QUERY_BLOCK_RANGE_REQUEST as usize { - assert_eq!(query_block_response.blocks.len(), 0); - assert!(!response.idempotent); - } - // If some of the blocks measured from the highest block index asked for are in the database the service should return the blocks that are in the database - else { - if rosetta_blocks.len() * 2 - > MAX_BLOCKS_PER_QUERY_BLOCK_RANGE_REQUEST as usize + assert!(response.blocks.is_empty()); + + // Call with higher index than there are blocks in the database + let response = call( + &storage_client_memory, + "query_block_range", + ObjectMap::try_from(QueryBlockRangeRequest { + highest_block_index: (rosetta_blocks.len() * 2) as u64, + number_of_blocks: std::cmp::max( + rosetta_blocks.len() as u64, + MAX_BLOCKS_PER_QUERY_BLOCK_RANGE_REQUEST, + ), + }) + .unwrap(), + currency.clone(), + ) + .await + .unwrap(); + let query_block_response: QueryBlockRangeResponse = + response.result.try_into().unwrap(); + // If the blocks measured from the highest block index asked for are not in the database the service should return an empty array of blocks + if rosetta_blocks.len() >= MAX_BLOCKS_PER_QUERY_BLOCK_RANGE_REQUEST as usize { - assert_eq!( - query_block_response.blocks.len(), - rosetta_blocks - .len() - .saturating_sub((rosetta_blocks.len() * 2).saturating_sub( - MAX_BLOCKS_PER_QUERY_BLOCK_RANGE_REQUEST as usize - )) - .saturating_sub(1) - ); - } else { - assert_eq!(query_block_response.blocks.len(), rosetta_blocks.len()); + assert_eq!(query_block_response.blocks.len(), 0); + assert!(!response.idempotent); + } + // If some of the blocks measured from the highest block index asked for are in the database the service should return the blocks that are in the database + else { + if rosetta_blocks.len() * 2 + > MAX_BLOCKS_PER_QUERY_BLOCK_RANGE_REQUEST as usize + { + assert_eq!( + query_block_response.blocks.len(), + rosetta_blocks + .len() + .saturating_sub((rosetta_blocks.len() * 2).saturating_sub( + MAX_BLOCKS_PER_QUERY_BLOCK_RANGE_REQUEST as usize + )) + .saturating_sub(1) + ); + } else { + assert_eq!(query_block_response.blocks.len(), rosetta_blocks.len()); + } + assert!(!response.idempotent); } - assert!(!response.idempotent); - } - let number_of_blocks = (rosetta_blocks.len() / 2) as u64; - let query_blocks_request = QueryBlockRangeRequest { - highest_block_index, - number_of_blocks, - }; + let number_of_blocks = (rosetta_blocks.len() / 2) as u64; + let query_blocks_request = QueryBlockRangeRequest { + highest_block_index, + number_of_blocks, + }; - let query_blocks_response = call( - &storage_client_memory, - "query_block_range", - ObjectMap::try_from(query_blocks_request).unwrap(), - currency.clone(), - ) - .unwrap(); - - assert!(query_blocks_response.idempotent); - let response: QueryBlockRangeResponse = - query_blocks_response.result.try_into().unwrap(); - let querried_blocks = response.blocks; - assert_eq!( - querried_blocks.len(), - std::cmp::min(number_of_blocks, MAX_BLOCKS_PER_QUERY_BLOCK_RANGE_REQUEST) - as usize - ); - if !querried_blocks.is_empty() { - assert_eq!( - querried_blocks.first().unwrap().block_identifier.index, - highest_block_index - .saturating_sub(std::cmp::min( - number_of_blocks, - MAX_BLOCKS_PER_QUERY_BLOCK_RANGE_REQUEST - )) - .saturating_add(1) - ); + let query_blocks_response = call( + &storage_client_memory, + "query_block_range", + ObjectMap::try_from(query_blocks_request).unwrap(), + currency.clone(), + ) + .await + .unwrap(); + + assert!(query_blocks_response.idempotent); + let response: QueryBlockRangeResponse = + query_blocks_response.result.try_into().unwrap(); + let querried_blocks = response.blocks; assert_eq!( - querried_blocks.last().unwrap().block_identifier.index, - highest_block_index + querried_blocks.len(), + std::cmp::min( + number_of_blocks, + MAX_BLOCKS_PER_QUERY_BLOCK_RANGE_REQUEST + ) as usize ); - } + if !querried_blocks.is_empty() { + assert_eq!( + querried_blocks.first().unwrap().block_identifier.index, + highest_block_index + .saturating_sub(std::cmp::min( + number_of_blocks, + MAX_BLOCKS_PER_QUERY_BLOCK_RANGE_REQUEST + )) + .saturating_add(1) + ); + assert_eq!( + querried_blocks.last().unwrap().block_identifier.index, + highest_block_index + ); + } - let query_blocks_request = QueryBlockRangeRequest { - highest_block_index, - number_of_blocks: MAX_BLOCKS_PER_QUERY_BLOCK_RANGE_REQUEST + 1, - }; + let query_blocks_request = QueryBlockRangeRequest { + highest_block_index, + number_of_blocks: MAX_BLOCKS_PER_QUERY_BLOCK_RANGE_REQUEST + 1, + }; - let query_blocks_response: QueryBlockRangeResponse = call( - &storage_client_memory, - "query_block_range", - ObjectMap::try_from(query_blocks_request).unwrap(), - currency.clone(), - ) - .unwrap() - .result - .try_into() - .unwrap(); - assert_eq!( - query_blocks_response.blocks.len(), - std::cmp::min( - MAX_BLOCKS_PER_QUERY_BLOCK_RANGE_REQUEST as usize, - rosetta_blocks.len() + let query_blocks_response: QueryBlockRangeResponse = call( + &storage_client_memory, + "query_block_range", + ObjectMap::try_from(query_blocks_request).unwrap(), + currency.clone(), ) - ); + .await + .unwrap() + .result + .try_into() + .unwrap(); + assert_eq!( + query_blocks_response.blocks.len(), + std::cmp::min( + MAX_BLOCKS_PER_QUERY_BLOCK_RANGE_REQUEST as usize, + rosetta_blocks.len() + ) + ); + }); Ok(()) }, @@ -1509,8 +1586,8 @@ mod test { .unwrap(); } - #[test] - fn test_account_balance_with_aggregate_all_subaccounts() { + #[tokio::test] + async fn test_account_balance_with_aggregate_all_subaccounts() { use crate::common::storage::types::{ IcrcBlock, IcrcOperation, IcrcTransaction, RosettaBlock, }; @@ -1519,7 +1596,7 @@ mod test { use rosetta_core::identifiers::AccountIdentifier; use serde_json::{Map, Value}; - let storage_client = StorageClient::new_in_memory().unwrap(); + let storage_client = StorageClient::new_in_memory().await.unwrap(); let metadata = Metadata::from_args("ICP".to_string(), 8); let principal = Principal::anonymous(); @@ -1549,8 +1626,8 @@ mod test { 0, )]; - storage_client.store_blocks(blocks).unwrap(); - storage_client.update_account_balances().unwrap(); + storage_client.store_blocks(blocks).await.unwrap(); + storage_client.update_account_balances().await.unwrap(); // Test 1: Aggregate flag with subaccount should fail let account_with_subaccount = Account { @@ -1571,7 +1648,8 @@ mod test { &metadata_obj, metadata.decimals, metadata.symbol.clone(), - ); + ) + .await; assert!(result.is_err()); // Now that we have blocks, we should get the validation error @@ -1588,7 +1666,7 @@ mod test { // Test 2: Create a simple scenario with aggregated balance // Use a separate storage client for the aggregation test - let storage_client2 = StorageClient::new_in_memory().unwrap(); + let storage_client2 = StorageClient::new_in_memory().await.unwrap(); let subaccount1 = [1u8; 32]; let account1 = Account { owner: principal, @@ -1637,8 +1715,8 @@ mod test { ), ]; - storage_client2.store_blocks(blocks).unwrap(); - storage_client2.update_account_balances().unwrap(); + storage_client2.store_blocks(blocks).await.unwrap(); + storage_client2.update_account_balances().await.unwrap(); // Test aggregated balance: Should be 500 + 1000 = 1500 // For aggregated balance, we need to use an account identifier that represents @@ -1656,7 +1734,8 @@ mod test { &metadata_obj, metadata.decimals, metadata.symbol.clone(), - ); + ) + .await; assert!(result.is_ok()); let balance_response = result.unwrap(); @@ -1672,7 +1751,8 @@ mod test { &None, metadata.decimals, metadata.symbol.clone(), - ); + ) + .await; assert!(result1.is_ok()); assert_eq!(result1.unwrap().balances[0].value.to_string(), "1000"); @@ -1684,7 +1764,8 @@ mod test { &None, metadata.decimals, metadata.symbol.clone(), - ); + ) + .await; assert!(result_main.is_ok()); assert_eq!(result_main.unwrap().balances[0].value.to_string(), "500"); @@ -1696,7 +1777,8 @@ mod test { &None, metadata.decimals, metadata.symbol.clone(), - ); + ) + .await; assert!(result.is_ok()); let balance_response = result.unwrap(); @@ -1705,8 +1787,8 @@ mod test { assert_eq!(balance_response.balances[0].value.to_string(), "500"); } - #[test] - fn test_subaccount_transfers_and_balances() { + #[tokio::test] + async fn test_subaccount_transfers_and_balances() { use crate::common::storage::types::{ IcrcBlock, IcrcOperation, IcrcTransaction, RosettaBlock, }; @@ -1714,7 +1796,7 @@ mod test { use icrc_ledger_types::icrc1::account::Account; use rosetta_core::identifiers::AccountIdentifier; - let storage_client = StorageClient::new_in_memory().unwrap(); + let storage_client = StorageClient::new_in_memory().await.unwrap(); let metadata = Metadata::from_args("ICP".to_string(), 8); let principal = Principal::anonymous(); @@ -1853,10 +1935,10 @@ mod test { ]; // Store blocks - storage_client.store_blocks(blocks).unwrap(); + storage_client.store_blocks(blocks).await.unwrap(); // Update account balances - storage_client.update_account_balances().unwrap(); + storage_client.update_account_balances().await.unwrap(); // Test individual account balances let main_balance = account_balance( @@ -1866,6 +1948,7 @@ mod test { metadata.decimals, metadata.symbol.clone(), ) + .await .unwrap(); // Main account: 1000 - 300 - 10 - 200 - 10 = 480 assert_eq!(main_balance.balances[0].value.to_string(), "480"); @@ -1877,6 +1960,7 @@ mod test { metadata.decimals, metadata.symbol.clone(), ) + .await .unwrap(); // Account1: 300 - 150 - 10 = 140 assert_eq!(account1_balance.balances[0].value.to_string(), "140"); @@ -1888,6 +1972,7 @@ mod test { metadata.decimals, metadata.symbol.clone(), ) + .await .unwrap(); // Account2: 200 assert_eq!(account2_balance.balances[0].value.to_string(), "200"); @@ -1899,6 +1984,7 @@ mod test { metadata.decimals, metadata.symbol.clone(), ) + .await .unwrap(); // Other account: 150 assert_eq!(other_balance.balances[0].value.to_string(), "150"); @@ -1918,6 +2004,7 @@ mod test { metadata.decimals, metadata.symbol.clone(), ) + .await .unwrap(); // Aggregated balance: 480 (main) + 140 (account1) + 200 (account2) = 820 @@ -2083,8 +2170,8 @@ mod test { ); } - #[test] - fn test_debug_aggregated_balance_sql() { + #[tokio::test] + async fn test_debug_aggregated_balance_sql() { use crate::common::storage::types::{ IcrcBlock, IcrcOperation, IcrcTransaction, RosettaBlock, }; @@ -2092,7 +2179,7 @@ mod test { use ic_base_types::PrincipalId; use icrc_ledger_types::icrc1::account::Account; - let storage_client = StorageClient::new_in_memory().unwrap(); + let storage_client = StorageClient::new_in_memory().await.unwrap(); let _metadata = Metadata::from_args("ICP".to_string(), 8); let principal = Principal::anonymous(); @@ -2184,21 +2271,24 @@ mod test { ]; // Store blocks and update balances - storage_client.store_blocks(blocks).unwrap(); - storage_client.update_account_balances().unwrap(); + storage_client.store_blocks(blocks).await.unwrap(); + storage_client.update_account_balances().await.unwrap(); // Check individual balances (use a reasonable high block index instead of u64::MAX) let high_block_idx = 1000u64; let main_balance = storage_client .get_account_balance_at_block_idx(&main_account, high_block_idx) + .await .unwrap() .unwrap_or(Nat::from(0u64)); let explicit_zero_balance = storage_client .get_account_balance_at_block_idx(&explicit_zero_account, high_block_idx) + .await .unwrap() .unwrap_or(Nat::from(0u64)); let account1_balance = storage_client .get_account_balance_at_block_idx(&account1, high_block_idx) + .await .unwrap() .unwrap_or(Nat::from(0u64)); @@ -2213,6 +2303,7 @@ mod test { &PrincipalId::from(principal), high_block_idx, ) + .await .unwrap(); println!("Aggregated balance: {aggregated_balance}"); @@ -2250,8 +2341,8 @@ mod test { } } - #[test] - fn test_mint_and_burn_fees() { + #[tokio::test] + async fn test_mint_and_burn_fees() { use crate::common::storage::types::{ IcrcBlock, IcrcOperation, IcrcTransaction, RosettaBlock, }; @@ -2259,7 +2350,7 @@ mod test { use icrc_ledger_types::icrc1::account::Account; use rosetta_core::identifiers::AccountIdentifier; - let storage_client = StorageClient::new_in_memory().unwrap(); + let storage_client = StorageClient::new_in_memory().await.unwrap(); let symbol = "ICP"; let decimals = 8; @@ -2272,67 +2363,89 @@ mod test { }; let main_account_id = AccountIdentifier::from(main_account); - let add_mint_block = - |block_id: u64, amount: u64, fee: Option, effective_fee: Option| { - let blocks = vec![RosettaBlock::from_icrc_ledger_block( - IcrcBlock { - parent_hash: None, - transaction: IcrcTransaction { - operation: IcrcOperation::Mint { - to: main_account, - amount: Nat::from(amount), - fee: fee.map(Into::into), - }, - created_at_time: None, - memo: None, + // Helper to create mint block + async fn add_mint_block( + storage_client: &StorageClient, + main_account: Account, + block_id: u64, + amount: u64, + fee: Option, + effective_fee: Option, + ) { + let blocks = vec![RosettaBlock::from_icrc_ledger_block( + IcrcBlock { + parent_hash: None, + transaction: IcrcTransaction { + operation: IcrcOperation::Mint { + to: main_account, + amount: Nat::from(amount), + fee: fee.map(Into::into), }, - effective_fee: effective_fee.map(Into::into), - timestamp: 1, - fee_collector: None, - fee_collector_block_index: None, + created_at_time: None, + memo: None, }, - block_id, - )]; + effective_fee: effective_fee.map(Into::into), + timestamp: 1, + fee_collector: None, + fee_collector_block_index: None, + }, + block_id, + )]; - storage_client.store_blocks(blocks).unwrap(); - storage_client.update_account_balances().unwrap(); - }; + storage_client.store_blocks(blocks).await.unwrap(); + storage_client.update_account_balances().await.unwrap(); + } - let add_burn_block = - |block_id: u64, amount: u64, fee: Option, effective_fee: Option| { - let blocks = vec![RosettaBlock::from_icrc_ledger_block( - IcrcBlock { - parent_hash: None, - transaction: IcrcTransaction { - operation: IcrcOperation::Burn { - from: main_account, - amount: Nat::from(amount), - fee: fee.map(Into::into), - spender: None, - }, - created_at_time: None, - memo: None, + // Helper to create burn block + async fn add_burn_block( + storage_client: &StorageClient, + main_account: Account, + block_id: u64, + amount: u64, + fee: Option, + effective_fee: Option, + ) { + let blocks = vec![RosettaBlock::from_icrc_ledger_block( + IcrcBlock { + parent_hash: None, + transaction: IcrcTransaction { + operation: IcrcOperation::Burn { + from: main_account, + amount: Nat::from(amount), + fee: fee.map(Into::into), + spender: None, }, - effective_fee: effective_fee.map(Into::into), - timestamp: 1, - fee_collector: None, - fee_collector_block_index: None, + created_at_time: None, + memo: None, }, - block_id, - )]; + effective_fee: effective_fee.map(Into::into), + timestamp: 1, + fee_collector: None, + fee_collector_block_index: None, + }, + block_id, + )]; - storage_client.store_blocks(blocks).unwrap(); - storage_client.update_account_balances().unwrap(); - }; + storage_client.store_blocks(blocks).await.unwrap(); + storage_client.update_account_balances().await.unwrap(); + } - let check_account_balance = |expected_balance: &str| { + // Helper to check account balance + async fn check_account_balance( + storage_client: &StorageClient, + main_account_id: &AccountIdentifier, + decimals: u8, + symbol: &str, + expected_balance: &str, + ) { let result = account_balance( - &storage_client, - &main_account_id, + storage_client, + main_account_id, &None, decimals, symbol.to_string(), - ); + ) + .await; assert!(result.is_ok()); let balance_response = result.unwrap(); @@ -2341,30 +2454,30 @@ mod test { balance_response.balances[0].value.to_string(), expected_balance ); - }; + } // The operation fee of 100 is applied - add_mint_block(0, 1000, Some(100), None); - check_account_balance("900"); - add_burn_block(1, 100, Some(100), None); - check_account_balance("700"); + add_mint_block(&storage_client, main_account, 0, 1000, Some(100), None).await; + check_account_balance(&storage_client, &main_account_id, decimals, symbol, "900").await; + add_burn_block(&storage_client, main_account, 1, 100, Some(100), None).await; + check_account_balance(&storage_client, &main_account_id, decimals, symbol, "700").await; // The block effective_fee of 100 is applied - add_mint_block(2, 200, Some(200), Some(100)); - check_account_balance("800"); - add_burn_block(3, 200, Some(200), Some(100)); - check_account_balance("500"); + add_mint_block(&storage_client, main_account, 2, 200, Some(200), Some(100)).await; + check_account_balance(&storage_client, &main_account_id, decimals, symbol, "800").await; + add_burn_block(&storage_client, main_account, 3, 200, Some(200), Some(100)).await; + check_account_balance(&storage_client, &main_account_id, decimals, symbol, "500").await; // The block effective_fee of 100 is applied - add_mint_block(4, 200, None, Some(100)); - check_account_balance("600"); - add_burn_block(5, 200, None, Some(100)); - check_account_balance("300"); + add_mint_block(&storage_client, main_account, 4, 200, None, Some(100)).await; + check_account_balance(&storage_client, &main_account_id, decimals, symbol, "600").await; + add_burn_block(&storage_client, main_account, 5, 200, None, Some(100)).await; + check_account_balance(&storage_client, &main_account_id, decimals, symbol, "300").await; // No fee - add_mint_block(6, 200, None, None); - check_account_balance("500"); - add_burn_block(7, 200, None, None); - check_account_balance("300"); + add_mint_block(&storage_client, main_account, 6, 200, None, None).await; + check_account_balance(&storage_client, &main_account_id, decimals, symbol, "500").await; + add_burn_block(&storage_client, main_account, 7, 200, None, None).await; + check_account_balance(&storage_client, &main_account_id, decimals, symbol, "300").await; } } diff --git a/rs/rosetta-api/icrc1/src/ledger_blocks_synchronization/blocks_synchronizer.rs b/rs/rosetta-api/icrc1/src/ledger_blocks_synchronization/blocks_synchronizer.rs index 3637900edddd..fd215aabe160 100644 --- a/rs/rosetta-api/icrc1/src/ledger_blocks_synchronization/blocks_synchronizer.rs +++ b/rs/rosetta-api/icrc1/src/ledger_blocks_synchronization/blocks_synchronizer.rs @@ -63,7 +63,7 @@ async fn verify_and_fix_gaps( storage_client: Arc, archive_canister_ids: Arc>>, ) -> anyhow::Result<()> { - let sync_ranges = derive_synchronization_gaps(storage_client.clone())?; + let sync_ranges = derive_synchronization_gaps(storage_client.clone()).await?; let tip = get_tip_block_hash_and_index(agent.clone()).await?; let (_tip_block_hash, tip_block_index) = match tip { Some(tip) => tip, @@ -87,15 +87,15 @@ async fn verify_and_fix_gaps( /// This function will check whether there is a gap in the database. /// Furthermore, if there exists a gap between the genesis block and the lowest stored block, this function will add this synchronization gap to the gaps returned by the storage client. /// It is guaranteed that all gaps between [0,Highest_Stored_Block] will be returned. -fn derive_synchronization_gaps( +async fn derive_synchronization_gaps( storage_client: Arc, ) -> anyhow::Result> { - if !storage_client.does_blockchain_have_gaps()? { + if !storage_client.does_blockchain_have_gaps().await? { return Ok(vec![]); } // If there is a gap, compute all the gaps. - let gap = storage_client.get_blockchain_gaps()?; + let gap = storage_client.get_blockchain_gaps().await?; // The database should have at most one gap. Otherwise the database file was edited and it can no longer be guaranteed that it contains valid blocks. if gap.len() > 1 { @@ -105,7 +105,7 @@ fn derive_synchronization_gaps( ); } else if gap.is_empty() { // The block counter is off - storage_client.reset_blocks_counter()?; + storage_client.reset_blocks_counter().await?; } let mut sync_ranges = gap @@ -123,7 +123,7 @@ fn derive_synchronization_gaps( // Gaps are only determined within stored block ranges. Blocks with indices that are below the lowest stored block and above the highest stored blocks are not considered. // Check if the lowest block that was stored is the genesis block. - let Some(lowest_block) = storage_client.get_block_with_lowest_block_idx()? else { + let Some(lowest_block) = storage_client.get_block_with_lowest_block_idx().await? else { // If the database is empty then there cannot exist any gaps. return Ok(vec![]); }; @@ -197,12 +197,13 @@ pub async fn start_synching_blocks( // Update the account balances. When queried for its status, the ledger will return the // highest block index for which the account balances have been processed. - match storage_client.update_account_balances() { + match storage_client.update_account_balances().await { Ok(_) => { // We will only end up here if there are no gaps, the blockchain is synced to the // tip, and the account balances have been updated. let highest_block_index = storage_client .get_block_with_highest_block_idx() + .await .unwrap_or(None) .map(|rosetta_block| rosetta_block.index) .unwrap_or(0u64); @@ -281,18 +282,21 @@ pub async fn sync_from_the_tip( // The starting point of the synchronization process is either 0 if the database is empty or the highest stored block index plus one. // The trailing parent hash is either `None` if the database is empty or the block hash of the block with the highest block index in storage. - let sync_range = storage_client.get_block_with_highest_block_idx()?.map_or( - SyncRange::new(0, tip_block_index, ByteBuf::from(tip_block_hash), None), - |block| { - SyncRange::new( - // If storage is up to date then the start index is the same as the tip of the ledger. - block.index + 1, - tip_block_index, - ByteBuf::from(tip_block_hash), - Some(block.clone().get_block_hash()), - ) - }, - ); + let sync_range = storage_client + .get_block_with_highest_block_idx() + .await? + .map_or( + SyncRange::new(0, tip_block_index, ByteBuf::from(tip_block_hash), None), + |block| { + SyncRange::new( + // If storage is up to date then the start index is the same as the tip of the ledger. + block.index + 1, + tip_block_index, + ByteBuf::from(tip_block_hash), + Some(block.clone().get_block_hash()), + ) + }, + ); // Do not make a sync call if the storage is up to date with the replica's ledger. if !sync_range.index_range.is_empty() { @@ -437,7 +441,7 @@ async fn sync_blocks_interval( let number_of_blocks_fetched = fetched_blocks.len() as u64; // Store the fetched blocks in the database. - let result = storage_client.store_blocks(fetched_blocks.clone()); + let result = storage_client.store_blocks(fetched_blocks.clone()).await; if let Err(e) = result { error!("Error while calling storage_client.store_blocks: {}", e); return Err(e); @@ -589,15 +593,24 @@ async fn fetch_blocks_interval( start: archive_query.start.clone(), length: archive_query.length, })?; + // Check if the provided archive canister id is in the list of trusted canister ids - let mut trusted_archive_canisters = archive_canister_ids.lock().await; - if !trusted_archive_canisters.iter().any(|archive_info| { - archive_info.canister_id == archive_query.callback.canister_id - }) { - *trusted_archive_canisters = - fetch_archive_canister_infos(agent.clone()).await?; - - // Check again after updating the list of archive canister ids whether the provided archive canister id is in the list + // (without holding lock across await points) + let is_trusted = { + let trusted_archive_canisters = archive_canister_ids.lock().await; + trusted_archive_canisters.iter().any(|archive_info| { + archive_info.canister_id == archive_query.callback.canister_id + }) + }; + + if !is_trusted { + // Fetch updated archive info without holding the lock + let new_archive_infos = fetch_archive_canister_infos(agent.clone()).await?; + + // Update the list and check again + let mut trusted_archive_canisters = archive_canister_ids.lock().await; + *trusted_archive_canisters = new_archive_infos; + if !trusted_archive_canisters.iter().any(|archive_info| { archive_info.canister_id == archive_query.callback.canister_id }) { @@ -608,6 +621,7 @@ async fn fetch_blocks_interval( } } + // Query the archive without holding any lock let archive_response = agent .agent .query( diff --git a/rs/rosetta-api/icrc1/src/lib.rs b/rs/rosetta-api/icrc1/src/lib.rs index 9559649a757d..b4a136572a5d 100644 --- a/rs/rosetta-api/icrc1/src/lib.rs +++ b/rs/rosetta-api/icrc1/src/lib.rs @@ -9,10 +9,7 @@ use icrc_ledger_types::icrc::generic_metadata_value::MetadataValue; use icrc_ledger_types::icrc3::archive::ArchiveInfo; use num_traits::ToPrimitive; use rosetta_core::objects::Currency; -use std::{ - collections::HashMap, - sync::{Arc, Mutex}, -}; +use std::{collections::HashMap, sync::Arc}; use tokio::sync::Mutex as AsyncMutex; pub mod common; pub mod config; @@ -26,7 +23,7 @@ pub mod ledger_blocks_synchronization; pub struct AppState { pub icrc1_agent: Arc, pub ledger_id: CanisterId, - pub synched: Arc>>, + pub synched: Arc>>, pub archive_canister_ids: Arc>>, pub storage: Arc, pub metadata: Metadata, diff --git a/rs/rosetta-api/icrc1/src/main.rs b/rs/rosetta-api/icrc1/src/main.rs index cbd39e4e4713..29c8e00c7351 100644 --- a/rs/rosetta-api/icrc1/src/main.rs +++ b/rs/rosetta-api/icrc1/src/main.rs @@ -28,7 +28,7 @@ use config::{Args, ParsedConfig, Store, TokenDef}; use rosetta_core::metrics::RosettaMetrics; use rosetta_core::watchdog::WatchdogThread; use std::collections::HashMap; -use std::sync::{Arc, Mutex}; +use std::sync::Arc; use std::{path::PathBuf, process, time::Duration}; use tokio::{net::TcpListener, sync::Mutex as AsyncMutex}; use tower_http::classify::{ServerErrorsAsFailures, SharedClassifier}; @@ -119,7 +119,7 @@ async fn load_metadata( is_offline: bool, ) -> anyhow::Result { if is_offline { - let db_metadata_entries = storage.read_metadata()?; + let db_metadata_entries = storage.read_metadata().await?; let are_metadata_set = token_def.are_metadata_args_set(); // If metadata is empty and the args are not set, bail out. if db_metadata_entries.is_empty() && !are_metadata_set { @@ -189,7 +189,7 @@ async fn load_metadata( .map(|(key, value)| MetadataEntry::from_metadata_value(key, value)) .collect::>>()?; - storage.write_metadata(ic_metadata_entries.clone())?; + storage.write_metadata(ic_metadata_entries.clone()).await?; Metadata::from_metadata_entries(&ic_metadata_entries) } @@ -239,7 +239,7 @@ async fn main() -> Result<()> { }); let mut storage = match &config.store { - Store::InMemory => StorageClient::new_in_memory()?, + Store::InMemory => StorageClient::new_in_memory().await?, Store::File { dir_path } => { let mut path = dir_path.clone(); path.push(format!("{}.db", PrincipalId::from(token_def.ledger_id))); @@ -249,6 +249,7 @@ async fn main() -> Result<()> { config.flush_cache_shrink_mem, config.balance_sync_batch_size, ) + .await .unwrap_or_else(|err| panic!("error creating persistent storage '{path:?}': {err}")) } }; @@ -296,7 +297,7 @@ async fn main() -> Result<()> { let shared_state = Arc::new(AppState { icrc1_agent: icrc1_agent.clone(), ledger_id: token_def.ledger_id, - synched: Arc::new(Mutex::new(None)), + synched: Arc::new(AsyncMutex::new(None)), storage: Arc::new(storage), archive_canister_ids: Arc::new(AsyncMutex::new(vec![])), metadata, diff --git a/rs/rosetta-api/icrc1/tests/integration_test_components/blocks_synchronizer/fetching_blocks_interval_test.rs b/rs/rosetta-api/icrc1/tests/integration_test_components/blocks_synchronizer/fetching_blocks_interval_test.rs index 1e14e1e77451..fa6073323f10 100644 --- a/rs/rosetta-api/icrc1/tests/integration_test_components/blocks_synchronizer/fetching_blocks_interval_test.rs +++ b/rs/rosetta-api/icrc1/tests/integration_test_components/blocks_synchronizer/fetching_blocks_interval_test.rs @@ -20,7 +20,7 @@ use icrc_ledger_types::icrc1::transfer::TransferArg; use lazy_static::lazy_static; use pocket_ic::PocketIcBuilder; use proptest::prelude::*; -use rusqlite::{Connection, OpenFlags}; +use rusqlite::Connection; use std::sync::Arc; use tokio::runtime::Runtime; use tokio::sync::Mutex as AsyncMutex; @@ -31,16 +31,23 @@ lazy_static! { pub static ref NUM_TEST_CASES: u32 = 2; } -fn check_storage_validity(storage_client: Arc, highest_index: u64) { +async fn check_storage_validity(storage_client: Arc, highest_index: u64) { // Get the tip of the blockchain from the storage client - let tip_block = storage_client.get_block_with_highest_block_idx().unwrap(); + let tip_block = storage_client + .get_block_with_highest_block_idx() + .await + .unwrap(); // Get the genesis block from the blockchain - let genesis_block = storage_client.get_block_with_lowest_block_idx().unwrap(); + let genesis_block = storage_client + .get_block_with_lowest_block_idx() + .await + .unwrap(); // Get the the entire blockchain let blocks_stored = storage_client .get_blocks_by_index_range(0, highest_index) + .await .unwrap(); // The index of the tip of the chain should be the number of generated blocks @@ -102,14 +109,14 @@ proptest! { } // Create the storage client where blocks will be stored - let storage_client = Arc::new(StorageClient::new_in_memory().unwrap()); + let storage_client = Arc::new(StorageClient::new_in_memory().await.unwrap()); // Start the synching process // Conduct a full sync from the tip of the blockchain to genesis block blocks_synchronizer::start_synching_blocks(agent.clone(), storage_client.clone(),2,Arc::new(AsyncMutex::new(vec![])), RecurrencyMode::OneShot, Box::new(|| {})).await.unwrap(); // Check that the full sync of all blocks generated by the first batch of blocks is valid - check_storage_validity(storage_client.clone(),transfer_args_batch1.len() as u64); + check_storage_validity(storage_client.clone(),transfer_args_batch1.len() as u64).await; // Create some more blocks to be fetched later for transfer_arg in transfer_args_batch2.iter() { @@ -121,13 +128,13 @@ proptest! { blocks_synchronizer::sync_from_the_tip(agent.clone(), storage_client.clone(),2,Arc::new(AsyncMutex::new(vec![]))).await.unwrap(); // Check that the sync of all blocks generated by the second batch of blocks is valid - check_storage_validity(storage_client.clone(),(transfer_args_batch1.len()+transfer_args_batch2.len()) as u64); + check_storage_validity(storage_client.clone(),(transfer_args_batch1.len()+transfer_args_batch2.len()) as u64).await; // If we do another synchronization where there are no new blocks the synchronizer should be able to handle that blocks_synchronizer::start_synching_blocks(agent.clone(), storage_client.clone(),2,Arc::new(AsyncMutex::new(vec![])), RecurrencyMode::OneShot, Box::new(|| {})).await.unwrap(); // Storage should still be valid - check_storage_validity(storage_client.clone(),(transfer_args_batch1.len()+transfer_args_batch2.len()) as u64); + check_storage_validity(storage_client.clone(),(transfer_args_batch1.len()+transfer_args_batch2.len()) as u64).await; }); } @@ -172,7 +179,7 @@ proptest! { } // Create the storage client where blocks will be stored - let storage_client = Arc::new(StorageClient::new_in_memory().unwrap()); + let storage_client = Arc::new(StorageClient::new_in_memory().await.unwrap()); // Start the synching process // Conduct a full sync from the tip of the blockchain to genesis block @@ -180,7 +187,7 @@ proptest! { blocks_synchronizer::start_synching_blocks(agent.clone(), storage_client.clone(),10,Arc::new(AsyncMutex::new(vec![])), RecurrencyMode::OneShot, Box::new(|| {})).await.unwrap(); // Check that the full sync of all blocks generated is valid - check_storage_validity(storage_client.clone(),transfer_args.len() as u64); + check_storage_validity(storage_client.clone(),transfer_args.len() as u64).await; }); } @@ -204,14 +211,14 @@ proptest! { agent.transfer(transfer_arg.clone()).await.unwrap().unwrap(); } - let storage_client = Arc::new(StorageClient::new_in_memory().unwrap()); + let storage_client = Arc::new(StorageClient::new_in_memory().await.unwrap()); blocks_synchronizer::start_synching_blocks(agent.clone(), storage_client.clone(),10,Arc::new(AsyncMutex::new(vec![])), RecurrencyMode::OneShot, Box::new(|| {})).await.unwrap(); - check_storage_validity(storage_client.clone(),transfer_args.len().saturating_sub(1) as u64); + check_storage_validity(storage_client.clone(),transfer_args.len().saturating_sub(1) as u64).await; // Now we check the certificate of the ledger let (hash,tip_index) = agent.get_certified_chain_tip().await.unwrap().unwrap(); assert_eq!(tip_index,transfer_args.len().saturating_sub(1) as u64); - let tip_block = storage_client.get_block_with_highest_block_idx().unwrap().unwrap(); + let tip_block = storage_client.get_block_with_highest_block_idx().await.unwrap().unwrap(); assert_eq!(tip_block.get_block_hash(),hash); } @@ -271,9 +278,12 @@ fn test_gaps_handling() { } // Create a tokio environment to conduct async calls - const DB_NAME: &str = "test_gaps_handling"; const NUM_BLOCKS: u64 = 10; + // Use a temporary directory for the on-disk database + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("test_gaps_handling.db"); + let rt = Runtime::new().unwrap(); let mut pocket_ic = PocketIcBuilder::new() .with_nns_subnet() @@ -309,6 +319,7 @@ fn test_gaps_handling() { let port = endpoint.port().unwrap(); // Wrap async calls in a blocking Block + let db_path_clone = db_path.clone(); rt.block_on(async { // Create a testing agent let agent = Arc::new(Icrc1Agent { @@ -316,8 +327,8 @@ fn test_gaps_handling() { ledger_canister_id: icrc_ledger_canister_id, }); - // Create the storage client where blocks will be stored - let storage_client = Arc::new(StorageClient::new_named_in_memory(DB_NAME).unwrap()); + // Create the storage client where blocks will be stored (using on-disk database) + let storage_client = Arc::new(StorageClient::new_persistent(&db_path_clone).await.unwrap()); // Start the synching process // Conduct a full sync from the tip of the blockchain to genesis block @@ -333,7 +344,7 @@ fn test_gaps_handling() { .unwrap(); // Check that the full sync of all blocks generated by the first batch of blocks is valid - check_storage_validity(storage_client.clone(), NUM_BLOCKS - 1); + check_storage_validity(storage_client.clone(), NUM_BLOCKS - 1).await; // Sync between the tip of the chain and the stored blocks // The blocksynchronizer now sync the blocks between the current tip of the chain and the most recently stored block @@ -347,14 +358,10 @@ fn test_gaps_handling() { .unwrap(); // Check that the sync of all blocks generated by the second batch of blocks is valid - check_storage_validity(storage_client.clone(), NUM_BLOCKS - 1); + check_storage_validity(storage_client.clone(), NUM_BLOCKS - 1).await; - // Create a connection to the database - let connection = Connection::open_with_flags( - format!("'file:{DB_NAME}?mode=memory&cache=shared', uri=True"), - OpenFlags::default(), - ) - .unwrap(); + // Create a separate connection to the same database file for direct SQL manipulation + let connection = Connection::open(&db_path_clone).unwrap(); // The database should hold all the expected blocks. assert_block_count(&connection, NUM_BLOCKS); @@ -388,7 +395,7 @@ fn test_gaps_handling() { .unwrap(); // Storage should still be valid - check_storage_validity(storage_client.clone(), NUM_BLOCKS - 1); + check_storage_validity(storage_client.clone(), NUM_BLOCKS - 1).await; // Database should be updated assert_block_count(&connection, NUM_BLOCKS); @@ -415,7 +422,7 @@ fn test_gaps_handling() { .unwrap(); // Storage should still be valid - check_storage_validity(storage_client.clone(), NUM_BLOCKS - 1); + check_storage_validity(storage_client.clone(), NUM_BLOCKS - 1).await; // The database should have been updated. assert_block_count(&connection, NUM_BLOCKS); diff --git a/rs/rosetta-api/icrc1/tests/integration_test_components/storage/storing_blockchain_data_test.rs b/rs/rosetta-api/icrc1/tests/integration_test_components/storage/storing_blockchain_data_test.rs index f7a39d6cb978..95316c740d48 100644 --- a/rs/rosetta-api/icrc1/tests/integration_test_components/storage/storing_blockchain_data_test.rs +++ b/rs/rosetta-api/icrc1/tests/integration_test_components/storage/storing_blockchain_data_test.rs @@ -85,10 +85,10 @@ proptest! { }); // Create the storage client where blocks will be stored - let storage_client = Arc::new(StorageClient::new_in_memory().unwrap()); + let storage_client = Arc::new(StorageClient::new_in_memory().await.unwrap()); // No blocks have been synched. The update should succeed with no accounts being updated - storage_client.update_account_balances().unwrap(); + storage_client.update_account_balances().await.unwrap(); // A mapping between accounts, block indices and their respective balances let mut account_balance_at_block_idx = HashMap::new(); @@ -141,7 +141,7 @@ proptest! { } blocks_synchronizer::start_synching_blocks(agent.clone(), storage_client.clone(), 10,Arc::new(AsyncMutex::new(vec![])), RecurrencyMode::OneShot, Box::new(|| {})).await.unwrap(); - storage_client.update_account_balances().unwrap(); + storage_client.update_account_balances().await.unwrap(); let mut block_indices_iter = block_indices.into_iter().collect::>(); block_indices_iter.sort(); @@ -150,14 +150,14 @@ proptest! { for idx in block_indices_iter.into_iter(){ for account in accounts.clone().into_iter(){ account_balance_at_block_idx.contains_key(&(account,idx)).then(|| current_balances.entry(account).and_modify(|balance| *balance = account_balance_at_block_idx.get(&(account,idx)).unwrap().clone())); - assert_eq!(*current_balances.get(&account).unwrap(),storage_client.get_account_balance_at_block_idx(&account,idx).unwrap().unwrap_or(Nat(BigUint::zero()))); + assert_eq!(*current_balances.get(&account).unwrap(),storage_client.get_account_balance_at_block_idx(&account,idx).await.unwrap().unwrap_or(Nat(BigUint::zero()))); } } // Check that the current balances of the ledger and rosetta storage match up for account in accounts.clone().into_iter(){ let balance_ledger = agent.balance_of(account,CallMode::Query).await.unwrap(); - let balance_rosetta = storage_client.get_account_balance(&account).unwrap().unwrap_or(Nat(BigUint::zero())); + let balance_rosetta = storage_client.get_account_balance(&account).await.unwrap().unwrap_or(Nat(BigUint::zero())); assert_eq!(balance_ledger,balance_rosetta); } }); @@ -190,7 +190,7 @@ fn test_self_transfer() { agent: local_replica::get_testing_agent(port).await, ledger_canister_id: icrc_ledger_canister_id, }); - let storage_client = Arc::new(StorageClient::new_in_memory().unwrap()); + let storage_client = Arc::new(StorageClient::new_in_memory().await.unwrap()); blocks_synchronizer::start_synching_blocks( agent.clone(), @@ -202,13 +202,14 @@ fn test_self_transfer() { ) .await .unwrap(); - storage_client.update_account_balances().unwrap(); + storage_client.update_account_balances().await.unwrap(); let balance = agent.balance_of(account, CallMode::Query).await.unwrap(); assert_eq!(balance, Nat::from(100_000_000_u64)); assert_eq!( storage_client .get_account_balance(&account) + .await .unwrap() .unwrap(), Nat::from(100_000_000_u64) @@ -237,13 +238,14 @@ fn test_self_transfer() { ) .await .unwrap(); - storage_client.update_account_balances().unwrap(); + storage_client.update_account_balances().await.unwrap(); let balance = agent.balance_of(account, CallMode::Query).await.unwrap(); assert_eq!(balance, Nat::from(100_000_000 - DEFAULT_TRANSFER_FEE)); assert_eq!( storage_client .get_account_balance(&account) + .await .unwrap() .unwrap(), Nat::from(100_000_000 - DEFAULT_TRANSFER_FEE) @@ -289,7 +291,7 @@ fn test_burn_and_mint_fee() { agent: local_replica::get_testing_agent(port).await, ledger_canister_id: icrc_ledger_canister_id, }); - let storage_client = Arc::new(StorageClient::new_in_memory().unwrap()); + let storage_client = Arc::new(StorageClient::new_in_memory().await.unwrap()); const FEE_COLLECTOR: Account = Account { owner: PrincipalId::new_user_test_id(2).0, @@ -321,11 +323,12 @@ fn test_burn_and_mint_fee() { ) .await .unwrap(); - storage_client.update_account_balances().unwrap(); + storage_client.update_account_balances().await.unwrap(); assert_eq!( storage_client .get_account_balance(&TEST_ACCOUNT) + .await .unwrap() .unwrap(), Nat::from(850u64) // mint 1000 - mint fee 50 - burn 50 - burn fee 50 @@ -333,6 +336,7 @@ fn test_burn_and_mint_fee() { assert!( storage_client .get_account_balance(&FEE_COLLECTOR) + .await .unwrap() .is_none() ); // no fee collector in the first 2 blocks @@ -365,11 +369,12 @@ fn test_burn_and_mint_fee() { ) .await .unwrap(); - storage_client.update_account_balances().unwrap(); + storage_client.update_account_balances().await.unwrap(); assert_eq!( storage_client .get_account_balance(&TEST_ACCOUNT) + .await .unwrap() .unwrap(), Nat::from(800u64) // 850 + mint 100 - mint fee 50 - burn 50 - burn fee 50 @@ -377,6 +382,7 @@ fn test_burn_and_mint_fee() { assert_eq!( storage_client .get_account_balance(&FEE_COLLECTOR) + .await .unwrap() .unwrap(), Nat::from(100u64) // mint fee 50 + burn fee 50