diff --git a/Cargo.lock b/Cargo.lock index 82843dc1f6..19d423f391 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -692,6 +692,7 @@ dependencies = [ "serde_json", "serde_plain", "tempfile", + "tokio", "toml", ] @@ -718,10 +719,14 @@ dependencies = [ "ckb-app-config", "ckb-chain-spec", "ckb-channel", + "ckb-constant", "ckb-dao-utils", + "ckb-db", + "ckb-db-schema", "ckb-error", "ckb-jsonrpc-types", "ckb-logger", + "ckb-logger-service", "ckb-merkle-mountain-range", "ckb-metrics", "ckb-network", @@ -734,12 +739,17 @@ dependencies = [ "ckb-test-chain-utils", "ckb-tx-pool", "ckb-types", + "ckb-util", "ckb-verification", "ckb-verification-contextual", "ckb-verification-traits", + "crossbeam", + "dashmap", + "either", "faux", "is_sorted", "lazy_static", + "minstant", "tempfile", ] @@ -1009,7 +1019,6 @@ dependencies = [ "ckb-logger", "ckb-network", "ckb-network-alert", - "ckb-proposal-table", "ckb-resource", "ckb-rpc", "ckb-shared", @@ -1497,6 +1506,7 @@ name = "ckb-shared" version = "0.116.0-pre" dependencies = [ "arc-swap", + "bitflags 1.3.2", "ckb-app-config", "ckb-async-runtime", "ckb-chain-spec", @@ -1506,6 +1516,7 @@ dependencies = [ "ckb-db-schema", "ckb-error", "ckb-logger", + "ckb-metrics", "ckb-migrate", "ckb-notify", "ckb-proposal-table", @@ -1515,9 +1526,13 @@ dependencies = [ "ckb-systemtime", "ckb-tx-pool", "ckb-types", + "ckb-util", "ckb-verification", + "dashmap", "once_cell", + "sled", "tempfile", + "tokio", ] [[package]] @@ -1578,9 +1593,7 @@ dependencies = [ name = "ckb-sync" version = "0.116.0-pre" dependencies = [ - "bitflags 1.3.2", "ckb-app-config", - "ckb-async-runtime", "ckb-chain", "ckb-chain-spec", "ckb-channel", @@ -1589,6 +1602,7 @@ dependencies = [ "ckb-dao-utils", "ckb-error", "ckb-logger", + "ckb-logger-service", "ckb-metrics", "ckb-network", "ckb-proposal-table", @@ -1614,7 +1628,6 @@ dependencies = [ "once_cell", "rand 0.7.3", "sentry", - "sled", "tempfile", "tokio", ] @@ -2066,6 +2079,20 @@ dependencies = [ "itertools 0.10.5", ] +[[package]] +name = "crossbeam" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" +dependencies = [ + "cfg-if", + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch", + "crossbeam-queue", + "crossbeam-utils", +] + [[package]] name = "crossbeam-channel" version = "0.5.12" @@ -2126,6 +2153,16 @@ dependencies = [ "typenum", ] +[[package]] +name = "ctor" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" +dependencies = [ + "quote", + "syn 1.0.109", +] + [[package]] name = "ctrlc" version = "3.4.4" @@ -2328,9 +2365,9 @@ checksum = "8d978bd5d343e8ab9b5c0fc8d93ff9c602fdc96616ffff9c05ac7a155419b824" [[package]] name = "either" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" +checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" [[package]] name = "encode_unicode" @@ -3523,6 +3560,17 @@ dependencies = [ "adler", ] +[[package]] +name = "minstant" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8dfc09c8abbe145769b6d51fd03f84fdd459906cbd6ac54e438708f016b40bd" +dependencies = [ + "ctor", + "libc", + "wasi 0.7.0", +] + [[package]] name = "mio" version = "0.8.11" @@ -6140,6 +6188,12 @@ dependencies = [ "try-lock", ] +[[package]] +name = "wasi" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b89c3ce4ce14bdc6fb6beaf9ec7928ca331de5df7e5ea278375642a2f478570d" + [[package]] name = "wasi" version = "0.9.0+wasi-snapshot-preview1" diff --git a/Makefile b/Makefile index 7eedf87a76..6308fe8b48 100644 --- a/Makefile +++ b/Makefile @@ -125,13 +125,13 @@ check: setup-ckb-test ## Runs all of the compiler's checks. build: ## Build binary with release profile. cargo build ${VERBOSE} --release -.PHONY: build-for-profiling-without-debug-symbols -build-for-profiling-without-debug-symbols: ## Build binary with for profiling without debug symbols. - JEMALLOC_SYS_WITH_MALLOC_CONF="prof:true" cargo build ${VERBOSE} --release --features "profiling" +.PHONY: profiling +profiling: ## Build binary with for profiling without debug symbols. + JEMALLOC_SYS_WITH_MALLOC_CONF="prof:true" cargo build ${VERBOSE} --profile prod --features "with_sentry,with_dns_seeding,profiling" -.PHONY: build-for-profiling +.PHONY: profiling-with-debug-symbols build-for-profiling: ## Build binary with for profiling. - devtools/release/make-with-debug-symbols build-for-profiling-without-debug-symbols + devtools/release/make-with-debug-symbols profiling .PHONY: prod prod: ## Build binary for production release. diff --git a/benches/benches/benchmarks/always_success.rs b/benches/benches/benchmarks/always_success.rs index 111766000f..33ed8bda8e 100644 --- a/benches/benches/benchmarks/always_success.rs +++ b/benches/benches/benchmarks/always_success.rs @@ -32,7 +32,7 @@ fn bench(c: &mut Criterion) { (0..20).for_each(|_| { let block = gen_always_success_block(&mut blocks, &parent, shared2); chain2 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -44,7 +44,10 @@ fn bench(c: &mut Criterion) { |(chain, blocks)| { blocks.into_iter().skip(1).for_each(|block| { chain - .internal_process_block(Arc::new(block), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch( + Arc::new(block), + Switch::DISABLE_EXTENSION, + ) .expect("process block OK"); }); }, @@ -77,14 +80,14 @@ fn bench(c: &mut Criterion) { (0..5).for_each(|i| { let block = gen_always_success_block(&mut blocks, &parent, shared2); chain2 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) .expect("process block OK"); if i < 2 { chain3 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -96,7 +99,7 @@ fn bench(c: &mut Criterion) { (0..2).for_each(|_| { let block = gen_always_success_block(&mut blocks, &parent, shared3); chain3 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -110,7 +113,10 @@ fn bench(c: &mut Criterion) { .take(5) .for_each(|block| { chain1 - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + Arc::new(block), + Switch::DISABLE_ALL, + ) .expect("process block OK"); }); (chain1.clone(), blocks) @@ -118,7 +124,7 @@ fn bench(c: &mut Criterion) { |(chain, blocks)| { blocks.into_iter().skip(6).for_each(|block| { chain - .process_block(Arc::new(block)) + .blocking_process_block(Arc::new(block)) .expect("process block OK"); }); }, @@ -152,11 +158,17 @@ fn bench(c: &mut Criterion) { let block = gen_always_success_block(&mut blocks, &parent, shared2); let arc_block = Arc::new(block.clone()); chain2 - .internal_process_block(Arc::clone(&arc_block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + Arc::clone(&arc_block), + Switch::DISABLE_ALL, + ) .expect("process block OK"); if i < 2 { chain3 - .internal_process_block(arc_block, Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + arc_block, + Switch::DISABLE_ALL, + ) .expect("process block OK"); } parent = block; @@ -165,7 +177,7 @@ fn bench(c: &mut Criterion) { (0..4).for_each(|_| { let block = gen_always_success_block(&mut blocks, &parent, shared3); chain3 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -179,7 +191,10 @@ fn bench(c: &mut Criterion) { .take(7) .for_each(|block| { chain1 - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + Arc::new(block), + Switch::DISABLE_ALL, + ) .expect("process block OK"); }); (chain1.clone(), blocks) @@ -187,7 +202,10 @@ fn bench(c: &mut Criterion) { |(chain, blocks)| { blocks.into_iter().skip(8).for_each(|block| { chain - .internal_process_block(Arc::new(block), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch( + Arc::new(block), + Switch::DISABLE_EXTENSION, + ) .expect("process block OK"); }); }, diff --git a/benches/benches/benchmarks/overall.rs b/benches/benches/benchmarks/overall.rs index 2f966e0318..103cab0893 100644 --- a/benches/benches/benchmarks/overall.rs +++ b/benches/benches/benchmarks/overall.rs @@ -1,7 +1,7 @@ use crate::benchmarks::util::{create_2out_transaction, create_secp_tx, secp_cell}; use ckb_app_config::NetworkConfig; use ckb_app_config::{BlockAssemblerConfig, TxPoolConfig}; -use ckb_chain::chain::{ChainController, ChainService}; +use ckb_chain::{start_chain_services, ChainController}; use ckb_chain_spec::consensus::{ConsensusBuilder, ProposalWindow}; use ckb_dao_utils::genesis_dao_data; use ckb_jsonrpc_types::JsonBytes; @@ -133,8 +133,7 @@ pub fn setup_chain(txs_size: usize) -> (Shared, ChainController) { let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - let chain_controller = chain_service.start(Some("ChainService")); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); (shared, chain_controller) } @@ -219,7 +218,10 @@ fn bench(c: &mut Criterion) { .expect("header verified"); chain - .internal_process_block(Arc::new(block), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch( + Arc::new(block), + Switch::DISABLE_EXTENSION, + ) .expect("process_block"); i -= 1; } diff --git a/benches/benches/benchmarks/resolve.rs b/benches/benches/benchmarks/resolve.rs index 29ce56bc8c..37ec9d11c3 100644 --- a/benches/benches/benchmarks/resolve.rs +++ b/benches/benches/benchmarks/resolve.rs @@ -1,6 +1,6 @@ use crate::benchmarks::util::create_2out_transaction; use ckb_app_config::{BlockAssemblerConfig, TxPoolConfig}; -use ckb_chain::chain::{ChainController, ChainService}; +use ckb_chain::{start_chain_services, ChainController}; use ckb_chain_spec::{ChainSpec, IssuedCell}; use ckb_jsonrpc_types::JsonBytes; use ckb_resource::Resource; @@ -96,8 +96,7 @@ pub fn setup_chain(txs_size: usize) -> (Shared, ChainController) { .tx_pool_config(tx_pool_config) .build() .unwrap(); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - let chain_controller = chain_service.start(Some("ChainService")); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); // FIXME: global cache !!! let _ret = setup_system_cell_cache( diff --git a/benches/benches/benchmarks/secp_2in2out.rs b/benches/benches/benchmarks/secp_2in2out.rs index 69c0705f4f..03ebab1685 100644 --- a/benches/benches/benchmarks/secp_2in2out.rs +++ b/benches/benches/benchmarks/secp_2in2out.rs @@ -32,7 +32,7 @@ fn bench(c: &mut Criterion) { (0..20).for_each(|_| { let block = gen_secp_block(&mut blocks, &parent, shared2); chain2 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -44,7 +44,10 @@ fn bench(c: &mut Criterion) { |(chain, blocks)| { blocks.into_iter().skip(1).for_each(|block| { chain - .internal_process_block(Arc::new(block), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch( + Arc::new(block), + Switch::DISABLE_EXTENSION, + ) .expect("process block OK"); }); }, @@ -77,14 +80,14 @@ fn bench(c: &mut Criterion) { (0..5).for_each(|i| { let block = gen_secp_block(&mut blocks, &parent, shared2); chain2 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) .expect("process block OK"); if i < 2 { chain3 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -96,7 +99,7 @@ fn bench(c: &mut Criterion) { (0..2).for_each(|_| { let block = gen_secp_block(&mut blocks, &parent, shared3); chain3 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -110,7 +113,10 @@ fn bench(c: &mut Criterion) { .take(5) .for_each(|block| { chain1 - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + Arc::new(block), + Switch::DISABLE_ALL, + ) .expect("process block OK"); }); (chain1.clone(), blocks) @@ -118,7 +124,7 @@ fn bench(c: &mut Criterion) { |(chain, blocks)| { blocks.into_iter().skip(6).for_each(|block| { chain - .process_block(Arc::new(block)) + .blocking_process_block(Arc::new(block)) .expect("process block OK"); }); }, @@ -152,11 +158,17 @@ fn bench(c: &mut Criterion) { let block = gen_secp_block(&mut blocks, &parent, shared2); let arc_block = Arc::new(block.clone()); chain2 - .internal_process_block(Arc::clone(&arc_block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + Arc::clone(&arc_block), + Switch::DISABLE_ALL, + ) .expect("process block OK"); if i < 2 { chain3 - .internal_process_block(arc_block, Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + arc_block, + Switch::DISABLE_ALL, + ) .expect("process block OK"); } parent = block; @@ -165,7 +177,7 @@ fn bench(c: &mut Criterion) { (0..4).for_each(|_| { let block = gen_secp_block(&mut blocks, &parent, shared3); chain3 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -179,7 +191,10 @@ fn bench(c: &mut Criterion) { .take(7) .for_each(|block| { chain1 - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + Arc::new(block), + Switch::DISABLE_ALL, + ) .expect("process block OK"); }); (chain1.clone(), blocks) @@ -187,7 +202,10 @@ fn bench(c: &mut Criterion) { |(chain, blocks)| { blocks.into_iter().skip(8).for_each(|block| { chain - .internal_process_block(Arc::new(block), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch( + Arc::new(block), + Switch::DISABLE_EXTENSION, + ) .expect("process block OK"); }); }, diff --git a/benches/benches/benchmarks/util.rs b/benches/benches/benchmarks/util.rs index 8c21dddc3b..3e91d27e35 100644 --- a/benches/benches/benchmarks/util.rs +++ b/benches/benches/benchmarks/util.rs @@ -1,4 +1,4 @@ -use ckb_chain::chain::{ChainController, ChainService}; +use ckb_chain::{start_chain_services, ChainController}; use ckb_chain_spec::consensus::{ConsensusBuilder, ProposalWindow}; use ckb_crypto::secp::Privkey; use ckb_dao::DaoCalculator; @@ -78,9 +78,9 @@ pub fn new_always_success_chain(txs_size: usize, chains_num: usize) -> Chains { .consensus(consensus.clone()) .build() .unwrap(); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); - chains.push((chain_service.start::<&str>(None), shared)); + chains.push((chain_controller, shared)); } chains @@ -296,9 +296,9 @@ pub fn new_secp_chain(txs_size: usize, chains_num: usize) -> Chains { .consensus(consensus.clone()) .build() .unwrap(); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); - chains.push((chain_service.start::<&str>(None), shared)); + chains.push((chain_controller, shared)); } chains diff --git a/chain/Cargo.toml b/chain/Cargo.toml index 1d6041af22..5fc05d661a 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -24,9 +24,19 @@ ckb-proposal-table = { path = "../util/proposal-table", version = "= 0.116.0-pre ckb-error = { path = "../error", version = "= 0.116.0-pre" } ckb-app-config = { path = "../util/app-config", version = "= 0.116.0-pre" } ckb-channel = { path = "../util/channel", version = "= 0.116.0-pre" } +ckb-db = { path = "../db", version = "= 0.116.0-pre" } +ckb-db-schema = { path = "../db-schema", version = "= 0.116.0-pre" } faux = { version = "^0.1", optional = true } ckb-merkle-mountain-range = "0.5.2" is_sorted = "0.1.1" +ckb-constant = { path = "../util/constant", version = "= 0.116.0-pre" } +ckb-util = { path = "../util", version = "= 0.116.0-pre" } +crossbeam = "0.8.2" +ckb-network = { path = "../network", version = "= 0.116.0-pre" } +ckb-tx-pool = { path = "../tx-pool", version = "= 0.116.0-pre" } +minstant = "0.1.4" +dashmap = "4.0" +either = "1.11.0" [dev-dependencies] ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.116.0-pre" } @@ -37,7 +47,8 @@ ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.116.0-pre" ckb-network = { path = "../network", version = "= 0.116.0-pre" } lazy_static = "1.4" tempfile.workspace = true -ckb-systemtime = { path = "../util/systemtime", version = "= 0.116.0-pre" ,features = ["enable_faketime"]} +ckb-systemtime = { path = "../util/systemtime", version = "= 0.116.0-pre", features = ["enable_faketime"] } +ckb-logger-service = { path = "../util/logger-service", version = "= 0.116.0-pre" } [features] default = [] diff --git a/chain/src/chain_controller.rs b/chain/src/chain_controller.rs new file mode 100644 index 0000000000..5660f7934e --- /dev/null +++ b/chain/src/chain_controller.rs @@ -0,0 +1,135 @@ +//! CKB chain controller. +#![allow(missing_docs)] + +use crate::utils::orphan_block_pool::OrphanBlockPool; +use crate::{LonelyBlock, ProcessBlockRequest, RemoteBlock, TruncateRequest, VerifyResult}; +use ckb_channel::Sender; +use ckb_error::{Error, InternalErrorKind}; +use ckb_logger::{self, error}; +use ckb_store::ChainDB; +use ckb_types::{ + core::{service::Request, BlockView}, + packed::Byte32, +}; +use ckb_verification_traits::Switch; +use std::sync::atomic::AtomicBool; +use std::sync::Arc; + +/// Controller to the chain service. +/// +/// The controller is internally reference-counted and can be freely cloned. +/// +/// A controller can invoke ChainService methods. +#[cfg_attr(feature = "mock", faux::create)] +#[derive(Clone)] +pub struct ChainController { + process_block_sender: Sender, + truncate_sender: Sender, + orphan_block_broker: Arc, + + is_verifying_unverified_blocks_on_startup: Arc, +} + +#[cfg_attr(feature = "mock", faux::methods)] +impl ChainController { + pub(crate) fn new( + process_block_sender: Sender, + truncate_sender: Sender, + orphan_block_broker: Arc, + is_verifying_unverified_blocks_on_startup: Arc, + ) -> Self { + ChainController { + process_block_sender, + truncate_sender, + orphan_block_broker, + is_verifying_unverified_blocks_on_startup, + } + } + + pub fn is_verifying_unverified_blocks_on_startup(&self) -> bool { + self.is_verifying_unverified_blocks_on_startup + .load(std::sync::atomic::Ordering::Acquire) + } + + pub fn asynchronous_process_remote_block(&self, remote_block: RemoteBlock) { + let lonely_block = LonelyBlock { + block: remote_block.block, + verify_callback: Some(remote_block.verify_callback), + switch: None, + }; + self.asynchronous_process_lonely_block(lonely_block); + } + + pub fn asynchronous_process_lonely_block(&self, lonely_block: LonelyBlock) { + if Request::call(&self.process_block_sender, lonely_block).is_none() { + error!("Chain service has gone") + } + } + + /// MinerRpc::submit_block and `ckb import` need this blocking way to process block + pub fn blocking_process_block(&self, block: Arc) -> VerifyResult { + self.blocking_process_block_internal(block, None) + } + + /// `IntegrationTestRpcImpl::process_block_without_verify` need this + pub fn blocking_process_block_with_switch( + &self, + block: Arc, + switch: Switch, + ) -> VerifyResult { + self.blocking_process_block_internal(block, Some(switch)) + } + + fn blocking_process_block_internal( + &self, + block: Arc, + switch: Option, + ) -> VerifyResult { + let (verify_result_tx, verify_result_rx) = ckb_channel::oneshot::channel::(); + + let verify_callback = { + move |result: VerifyResult| { + if let Err(err) = verify_result_tx.send(result) { + error!( + "blocking send verify_result failed: {}, this shouldn't happen", + err + ) + } + } + }; + + let lonely_block = LonelyBlock { + block, + switch, + verify_callback: Some(Box::new(verify_callback)), + }; + + self.asynchronous_process_lonely_block(lonely_block); + verify_result_rx.recv().unwrap_or_else(|err| { + Err(InternalErrorKind::System + .other(format!("blocking recv verify_result failed: {}", err)) + .into()) + }) + } + + /// Truncate chain to specified target + /// + /// Should use for testing only + pub fn truncate(&self, target_tip_hash: Byte32) -> Result<(), Error> { + Request::call(&self.truncate_sender, target_tip_hash).unwrap_or_else(|| { + Err(InternalErrorKind::System + .other("Chain service has gone") + .into()) + }) + } + + /// `Relayer::reconstruct_block` need this + pub fn get_orphan_block(&self, store: &ChainDB, hash: &Byte32) -> Option> { + self.orphan_block_broker.get_block(store, hash) + } + + /// `NetRpcImpl::sync_state` rpc need this + pub fn orphan_blocks_len(&self) -> usize { + self.orphan_block_broker.len() + } +} diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs new file mode 100644 index 0000000000..a1b3a05e2a --- /dev/null +++ b/chain/src/chain_service.rs @@ -0,0 +1,152 @@ +//! CKB chain service. +#![allow(missing_docs)] + +use crate::orphan_broker::OrphanBroker; +use crate::{LonelyBlock, ProcessBlockRequest}; +use ckb_channel::{select, Receiver}; +use ckb_error::{Error, InternalErrorKind}; +use ckb_logger::{self, debug, error, info, warn}; +use ckb_shared::block_status::BlockStatus; +use ckb_shared::shared::Shared; +use ckb_stop_handler::new_crossbeam_exit_rx; +use ckb_types::core::{service::Request, BlockView}; +use ckb_verification::{BlockVerifier, NonContextualBlockTxsVerifier}; +use ckb_verification_traits::Verifier; + +/// Chain background service to receive LonelyBlock and only do `non_contextual_verify` +pub(crate) struct ChainService { + shared: Shared, + process_block_rx: Receiver, + orphan_broker: OrphanBroker, +} +impl ChainService { + /// Create a new ChainService instance with shared. + pub(crate) fn new( + shared: Shared, + process_block_rx: Receiver, + consume_orphan: OrphanBroker, + ) -> ChainService { + ChainService { + shared, + process_block_rx, + orphan_broker: consume_orphan, + } + } + + /// Receive block from `process_block_rx` and do `non_contextual_verify` + pub(crate) fn start_process_block(self) { + let signal_receiver = new_crossbeam_exit_rx(); + + let clean_expired_orphan_timer = + crossbeam::channel::tick(std::time::Duration::from_secs(60)); + + loop { + select! { + recv(self.process_block_rx) -> msg => match msg { + Ok(Request { responder, arguments: lonely_block }) => { + // asynchronous_process_block doesn't interact with tx-pool, + // no need to pause tx-pool's chunk_process here. + let _trace_now = minstant::Instant::now(); + self.asynchronous_process_block(lonely_block); + if let Some(handle) = ckb_metrics::handle(){ + handle.ckb_chain_async_process_block_duration.observe(_trace_now.elapsed().as_secs_f64()) + } + let _ = responder.send(()); + }, + _ => { + error!("process_block_receiver closed"); + break; + }, + }, + recv(clean_expired_orphan_timer) -> _ => { + self.orphan_broker.clean_expired_orphans(); + }, + recv(signal_receiver) -> _ => { + info!("ChainService received exit signal, exit now"); + break; + } + } + } + } + + fn non_contextual_verify(&self, block: &BlockView) -> Result<(), Error> { + let consensus = self.shared.consensus(); + BlockVerifier::new(consensus).verify(block).map_err(|e| { + debug!("[process_block] BlockVerifier error {:?}", e); + e + })?; + + NonContextualBlockTxsVerifier::new(consensus) + .verify(block) + .map_err(|e| { + debug!( + "[process_block] NonContextualBlockTxsVerifier error {:?}", + e + ); + e + }) + .map(|_| ()) + } + + // `self.non_contextual_verify` is very fast. + fn asynchronous_process_block(&self, lonely_block: LonelyBlock) { + let block_number = lonely_block.block().number(); + let block_hash = lonely_block.block().hash(); + // Skip verifying a genesis block if its hash is equal to our genesis hash, + // otherwise, return error and ban peer. + if block_number < 1 { + if self.shared.genesis_hash() != block_hash { + warn!( + "receive 0 number block: 0-{}, expect genesis hash: {}", + block_hash, + self.shared.genesis_hash() + ); + self.shared + .insert_block_status(lonely_block.block().hash(), BlockStatus::BLOCK_INVALID); + let error = InternalErrorKind::System + .other("Invalid genesis block received") + .into(); + lonely_block.execute_callback(Err(error)); + } else { + warn!("receive 0 number block: 0-{}", block_hash); + lonely_block.execute_callback(Ok(false)); + } + return; + } + + if lonely_block.switch().is_none() + || matches!(lonely_block.switch(), Some(switch) if !switch.disable_non_contextual()) + { + let result = self.non_contextual_verify(lonely_block.block()); + if let Err(err) = result { + error!( + "block {}-{} verify failed: {:?}", + block_number, block_hash, err + ); + self.shared + .insert_block_status(lonely_block.block().hash(), BlockStatus::BLOCK_INVALID); + lonely_block.execute_callback(Err(err)); + return; + } + } + + if let Err(err) = self.insert_block(&lonely_block) { + error!( + "insert block {}-{} failed: {:?}", + block_number, block_hash, err + ); + self.shared.block_status_map().remove(&block_hash); + lonely_block.execute_callback(Err(err)); + return; + } + + self.orphan_broker.process_lonely_block(lonely_block.into()); + } + + fn insert_block(&self, lonely_block: &LonelyBlock) -> Result<(), ckb_error::Error> { + let db_txn = self.shared.store().begin_transaction(); + db_txn.insert_block(lonely_block.block())?; + db_txn.commit()?; + Ok(()) + } +} diff --git a/chain/src/chain.rs b/chain/src/consume_unverified.rs similarity index 64% rename from chain/src/chain.rs rename to chain/src/consume_unverified.rs index c1915ed48e..a60a34347a 100644 --- a/chain/src/chain.rs +++ b/chain/src/consume_unverified.rs @@ -1,414 +1,261 @@ -//! CKB chain service. -#![allow(missing_docs)] - -use ckb_channel::{self as channel, select, Sender}; -use ckb_error::{Error, InternalErrorKind}; +use crate::{delete_unverified_block, UnverifiedBlock}; +use crate::{utils::forkchanges::ForkChanges, GlobalIndex, TruncateRequest, VerifyResult}; +use ckb_channel::{select, Receiver}; +use ckb_error::{is_internal_db_error, Error, InternalErrorKind}; +use ckb_logger::internal::{log_enabled, trace}; use ckb_logger::Level::Trace; -use ckb_logger::{ - self, debug, error, info, log_enabled, log_enabled_target, trace, trace_target, warn, -}; +use ckb_logger::{debug, error, info, log_enabled_target, trace_target}; use ckb_merkle_mountain_range::leaf_index_to_mmr_size; use ckb_proposal_table::ProposalTable; -use ckb_shared::shared::Shared; -use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; +use ckb_shared::block_status::BlockStatus; +use ckb_shared::Shared; use ckb_store::{attach_block_cell, detach_block_cell, ChainStore, StoreTransaction}; use ckb_systemtime::unix_time_as_millis; -use ckb_types::{ - core::{ - cell::{ - resolve_transaction, BlockCellProvider, HeaderChecker, OverlayCellProvider, - ResolvedTransaction, - }, - hardfork::HardForks, - service::Request, - BlockExt, BlockNumber, BlockView, Cycle, HeaderView, - }, - packed::{Byte32, ProposalShortId}, - utilities::merkle_mountain_range::ChainRootMMR, - U256, +use ckb_tx_pool::TxPoolController; +use ckb_types::core::cell::{ + resolve_transaction, BlockCellProvider, HeaderChecker, OverlayCellProvider, ResolvedTransaction, }; +use ckb_types::core::{service::Request, BlockExt, BlockNumber, BlockView, Cycle, HeaderView}; +use ckb_types::packed::Byte32; +use ckb_types::utilities::merkle_mountain_range::ChainRootMMR; +use ckb_types::H256; use ckb_verification::cache::Completed; -use ckb_verification::{BlockVerifier, InvalidParentError, NonContextualBlockTxsVerifier}; +use ckb_verification::InvalidParentError; use ckb_verification_contextual::{ContextualBlockVerifier, VerifyContext}; -use ckb_verification_traits::{Switch, Verifier}; -#[cfg(debug_assertions)] -use is_sorted::IsSorted; -use std::collections::{HashSet, VecDeque}; +use ckb_verification_traits::Switch; +use dashmap::DashSet; +use std::cmp; +use std::collections::HashSet; use std::sync::Arc; -use std::time::Instant; -use std::{cmp, thread}; - -type ProcessBlockRequest = Request<(Arc, Switch), Result>; -type TruncateRequest = Request>; - -/// Controller to the chain service. -/// -/// The controller is internally reference-counted and can be freely cloned. -/// -/// A controller can invoke [`ChainService`] methods. -#[cfg_attr(feature = "mock", faux::create)] -#[derive(Clone)] -pub struct ChainController { - process_block_sender: Sender, - truncate_sender: Sender, // Used for testing only -} - -#[cfg_attr(feature = "mock", faux::methods)] -impl ChainController { - pub fn new( - process_block_sender: Sender, - truncate_sender: Sender, - ) -> Self { - ChainController { - process_block_sender, - truncate_sender, - } - } - /// Inserts the block into database. - /// - /// Expects the block's header to be valid and already verified. - /// - /// If the block already exists, does nothing and false is returned. - /// - /// [BlockVerifier] [NonContextualBlockTxsVerifier] [ContextualBlockVerifier] will performed - pub fn process_block(&self, block: Arc) -> Result { - self.internal_process_block(block, Switch::NONE) - } - - /// Internal method insert block for test - /// - /// switch bit flags for particular verify, make easier to generating test data - pub fn internal_process_block( - &self, - block: Arc, - switch: Switch, - ) -> Result { - Request::call(&self.process_block_sender, (block, switch)).unwrap_or_else(|| { - Err(InternalErrorKind::System - .other("Chain service has gone") - .into()) - }) - } - /// Truncate chain to specified target - /// - /// Should use for testing only - pub fn truncate(&self, target_tip_hash: Byte32) -> Result<(), Error> { - Request::call(&self.truncate_sender, target_tip_hash).unwrap_or_else(|| { - Err(InternalErrorKind::System - .other("Chain service has gone") - .into()) - }) - } +pub(crate) struct ConsumeUnverifiedBlockProcessor { + pub(crate) shared: Shared, + pub(crate) is_pending_verify: Arc>, + pub(crate) proposal_table: ProposalTable, } -/// The struct represent fork -#[derive(Debug, Default)] -pub struct ForkChanges { - /// Blocks attached to index after forks - pub(crate) attached_blocks: VecDeque, - /// Blocks detached from index after forks - pub(crate) detached_blocks: VecDeque, - /// HashSet with proposal_id detached to index after forks - pub(crate) detached_proposal_id: HashSet, - /// to be updated exts - pub(crate) dirty_exts: VecDeque, -} +pub(crate) struct ConsumeUnverifiedBlocks { + tx_pool_controller: TxPoolController, -impl ForkChanges { - /// blocks attached to index after forks - pub fn attached_blocks(&self) -> &VecDeque { - &self.attached_blocks - } + unverified_block_rx: Receiver, + truncate_block_rx: Receiver, - /// blocks detached from index after forks - pub fn detached_blocks(&self) -> &VecDeque { - &self.detached_blocks - } - - /// proposal_id detached to index after forks - pub fn detached_proposal_id(&self) -> &HashSet { - &self.detached_proposal_id - } - - /// are there any block should be detached - pub fn has_detached(&self) -> bool { - !self.detached_blocks.is_empty() - } - - /// cached verified attached block num - pub fn verified_len(&self) -> usize { - self.attached_blocks.len() - self.dirty_exts.len() - } - - /// assertion for make sure attached_blocks and detached_blocks are sorted - #[cfg(debug_assertions)] - pub fn is_sorted(&self) -> bool { - IsSorted::is_sorted_by_key(&mut self.attached_blocks().iter(), |blk| { - blk.header().number() - }) && IsSorted::is_sorted_by_key(&mut self.detached_blocks().iter(), |blk| { - blk.header().number() - }) - } - - pub fn during_hardfork(&self, hardfork_switch: &HardForks) -> bool { - let hardfork_during_detach = - self.check_if_hardfork_during_blocks(hardfork_switch, &self.detached_blocks); - let hardfork_during_attach = - self.check_if_hardfork_during_blocks(hardfork_switch, &self.attached_blocks); - - hardfork_during_detach || hardfork_during_attach - } - - fn check_if_hardfork_during_blocks( - &self, - hardfork: &HardForks, - blocks: &VecDeque, - ) -> bool { - if blocks.is_empty() { - false - } else { - // This method assumes that the input blocks are sorted and unique. - let rfc_0049 = hardfork.ckb2023.rfc_0049(); - let epoch_first = blocks.front().unwrap().epoch().number(); - let epoch_next = blocks - .back() - .unwrap() - .epoch() - .minimum_epoch_number_after_n_blocks(1); - epoch_first < rfc_0049 && rfc_0049 <= epoch_next - } - } -} - -pub(crate) struct GlobalIndex { - pub(crate) number: BlockNumber, - pub(crate) hash: Byte32, - pub(crate) unseen: bool, + stop_rx: Receiver<()>, + processor: ConsumeUnverifiedBlockProcessor, } -impl GlobalIndex { - pub(crate) fn new(number: BlockNumber, hash: Byte32, unseen: bool) -> GlobalIndex { - GlobalIndex { - number, - hash, - unseen, +impl ConsumeUnverifiedBlocks { + pub(crate) fn new( + shared: Shared, + unverified_blocks_rx: Receiver, + truncate_block_rx: Receiver, + proposal_table: ProposalTable, + is_pending_verify: Arc>, + stop_rx: Receiver<()>, + ) -> Self { + ConsumeUnverifiedBlocks { + tx_pool_controller: shared.tx_pool_controller().to_owned(), + unverified_block_rx: unverified_blocks_rx, + truncate_block_rx, + stop_rx, + processor: ConsumeUnverifiedBlockProcessor { + shared, + is_pending_verify, + proposal_table, + }, } } - pub(crate) fn forward(&mut self, hash: Byte32) { - self.number -= 1; - self.hash = hash; - } -} - -/// Chain background service -/// -/// The ChainService provides a single-threaded background executor. -pub struct ChainService { - shared: Shared, - proposal_table: ProposalTable, -} - -impl ChainService { - /// Create a new ChainService instance with shared and initial proposal_table. - pub fn new(shared: Shared, proposal_table: ProposalTable) -> ChainService { - ChainService { - shared, - proposal_table, - } - } + pub(crate) fn start(mut self) { + loop { + let _trace_begin_loop = minstant::Instant::now(); + select! { + recv(self.unverified_block_rx) -> msg => match msg { + Ok(unverified_task) => { + // process this unverified block + if let Some(handle) = ckb_metrics::handle() { + handle.ckb_chain_consume_unverified_block_waiting_block_duration.observe(_trace_begin_loop.elapsed().as_secs_f64()) + } + let _ = self.tx_pool_controller.suspend_chunk_process(); - /// start background single-threaded service with specified thread_name. - pub fn start(mut self, thread_name: Option) -> ChainController { - let signal_receiver = new_crossbeam_exit_rx(); - let (process_block_sender, process_block_receiver) = channel::bounded(0); - let (truncate_sender, truncate_receiver) = channel::bounded(0); + let _trace_now = minstant::Instant::now(); + self.processor.consume_unverified_blocks(unverified_task); + if let Some(handle) = ckb_metrics::handle() { + handle.ckb_chain_consume_unverified_block_duration.observe(_trace_now.elapsed().as_secs_f64()) + } - // Mainly for test: give an empty thread_name - let mut thread_builder = thread::Builder::new(); - if let Some(name) = thread_name { - thread_builder = thread_builder.name(name.to_string()); - } - let tx_control = self.shared.tx_pool_controller().clone(); - - let chain_jh = thread_builder - .spawn(move || loop { - select! { - recv(process_block_receiver) -> msg => match msg { - Ok(Request { responder, arguments: (block, verify) }) => { - let instant = Instant::now(); - - let _ = tx_control.suspend_chunk_process(); - let _ = responder.send(self.process_block(block, verify)); - let _ = tx_control.continue_chunk_process(); - - if let Some(metrics) = ckb_metrics::handle() { - metrics - .ckb_block_process_duration - .observe(instant.elapsed().as_secs_f64()); - } - }, - _ => { - error!("process_block_receiver closed"); - break; - }, + let _ = self.tx_pool_controller.continue_chunk_process(); }, - recv(truncate_receiver) -> msg => match msg { - Ok(Request { responder, arguments: target_tip_hash }) => { - let _ = tx_control.suspend_chunk_process(); - let _ = responder.send(self.truncate(&target_tip_hash)); - let _ = tx_control.continue_chunk_process(); - }, - _ => { - error!("truncate_receiver closed"); - break; - }, + Err(err) => { + error!("unverified_block_rx err: {}", err); + return; }, - recv(signal_receiver) -> _ => { - info!("ChainService received exit signal, exit now"); - break; - } + }, + recv(self.truncate_block_rx) -> msg => match msg { + Ok(Request { responder, arguments: target_tip_hash }) => { + let _ = self.tx_pool_controller.suspend_chunk_process(); + let _ = responder.send(self.processor.truncate(&target_tip_hash)); + let _ = self.tx_pool_controller.continue_chunk_process(); + }, + Err(err) => { + info!("truncate_block_tx has been closed,err: {}", err); + return; + }, + }, + recv(self.stop_rx) -> _ => { + info!("consume_unverified_blocks thread received exit signal, exit now"); + break; } - }) - .expect("Start ChainService failed"); - - register_thread("ChainService", chain_jh); - ChainController::new(process_block_sender, truncate_sender) - } - - fn make_fork_for_truncate(&self, target: &HeaderView, current_tip: &HeaderView) -> ForkChanges { - let mut fork = ForkChanges::default(); - let store = self.shared.store(); - for bn in (target.number() + 1)..=current_tip.number() { - let hash = store.get_block_hash(bn).expect("index checked"); - let old_block = store.get_block(&hash).expect("index checked"); - fork.detached_blocks.push_back(old_block); + } } - is_sorted_assert(&fork); - fork } +} - // Truncate the main chain - // Use for testing only, can only truncate less than 50000 blocks each time - pub(crate) fn truncate(&mut self, target_tip_hash: &Byte32) -> Result<(), Error> { - let snapshot = Arc::clone(&self.shared.snapshot()); - assert!(snapshot.is_main_chain(target_tip_hash)); - - let target_tip_header = snapshot.get_block_header(target_tip_hash).expect("checked"); - let target_block_ext = snapshot.get_block_ext(target_tip_hash).expect("checked"); - let target_epoch_ext = snapshot - .get_block_epoch_index(target_tip_hash) - .and_then(|index| snapshot.get_epoch_ext(&index)) - .expect("checked"); - let origin_proposals = snapshot.proposals(); - - let block_count = snapshot - .tip_header() - .number() - .saturating_sub(target_tip_header.number()); - - if block_count > 5_0000 { - let err = format!( - "trying to truncate too many blocks: {}, exceed 50000", - block_count - ); - return Err(InternalErrorKind::Database.other(err).into()); - } - let mut fork = self.make_fork_for_truncate(&target_tip_header, snapshot.tip_header()); +impl ConsumeUnverifiedBlockProcessor { + pub(crate) fn consume_unverified_blocks(&mut self, unverified_block: UnverifiedBlock) { + let UnverifiedBlock { + block, + switch, + verify_callback, + parent_header, + } = unverified_block; + let block_hash = block.hash(); + // process this unverified block + let verify_result = self.verify_block(&block, &parent_header, switch); + match &verify_result { + Ok(_) => { + let log_now = std::time::Instant::now(); + self.shared.remove_block_status(&block_hash); + let log_elapsed_remove_block_status = log_now.elapsed(); + self.shared.remove_header_view(&block_hash); + debug!( + "block {} remove_block_status cost: {:?}, and header_view cost: {:?}", + block_hash, + log_elapsed_remove_block_status, + log_now.elapsed() + ); + } + Err(err) => { + error!("verify block {} failed: {}", block_hash, err); - let db_txn = self.shared.store().begin_transaction(); - self.rollback(&fork, &db_txn)?; + let tip = self + .shared + .store() + .get_tip_header() + .expect("tip_header must exist"); + let tip_ext = self + .shared + .store() + .get_block_ext(&tip.hash()) + .expect("tip header's ext must exist"); - db_txn.insert_tip_header(&target_tip_header)?; - db_txn.insert_current_epoch_ext(&target_epoch_ext)?; + self.shared.set_unverified_tip(ckb_shared::HeaderIndex::new( + tip.clone().number(), + tip.clone().hash(), + tip_ext.total_difficulty, + )); - // Currently, we only move the target tip header here, we don't delete the block for performance - // TODO: delete the blocks if we need in the future + self.delete_unverified_block(&block); - db_txn.commit()?; + if !is_internal_db_error(err) { + self.shared + .insert_block_status(block_hash.clone(), BlockStatus::BLOCK_INVALID); + } else { + error!("internal db error, remove block status: {}", block_hash); + self.shared.remove_block_status(&block_hash); + } - self.update_proposal_table(&fork); - let (detached_proposal_id, new_proposals) = self - .proposal_table - .finalize(origin_proposals, target_tip_header.number()); - fork.detached_proposal_id = detached_proposal_id; + error!( + "set_unverified tip to {}-{}, because verify {} failed: {}", + tip.number(), + tip.hash(), + block_hash, + err + ); + } + } - let new_snapshot = self.shared.new_snapshot( - target_tip_header, - target_block_ext.total_difficulty, - target_epoch_ext, - new_proposals, - ); + self.is_pending_verify.remove(&block_hash); - self.shared.store_snapshot(Arc::clone(&new_snapshot)); + if let Some(callback) = verify_callback { + callback(verify_result); + } + } - // NOTE: Dont update tx-pool when truncate - Ok(()) + fn delete_unverified_block(&self, block: &BlockView) { + delete_unverified_block( + self.shared.store(), + block.hash(), + block.number(), + block.parent_hash(), + ) } - // visible pub just for test - #[doc(hidden)] - pub fn process_block(&mut self, block: Arc, switch: Switch) -> Result { - let block_number = block.number(); + fn verify_block( + &mut self, + block: &BlockView, + parent_header: &HeaderView, + switch: Option, + ) -> VerifyResult { + let switch: Switch = switch.unwrap_or_else(|| { + let mut assume_valid_target = self.shared.assume_valid_target(); + match *assume_valid_target { + Some(ref target) => { + // if the target has been reached, delete it + if target + == &ckb_types::prelude::Unpack::::unpack(&BlockView::hash(block)) + { + assume_valid_target.take(); + Switch::NONE + } else { + Switch::DISABLE_SCRIPT + } + } + None => Switch::NONE, + } + }); + let block_hash = block.hash(); + let parent_hash = block.parent_hash(); - debug!("Begin processing block: {}-{}", block_number, block_hash); - if block_number < 1 { - warn!("Receive 0 number block: 0-{}", block_hash); + { + let parent_status = self.shared.get_block_status(&parent_hash); + if parent_status.eq(&BlockStatus::BLOCK_INVALID) { + return Err(InternalErrorKind::Other + .other(format!( + "block: {}'s parent: {} previously verified failed", + block_hash, parent_hash + )) + .into()); + } } - self.insert_block(block, switch).map(|ret| { - debug!("Finish processing block"); - ret - }) - } + let parent_ext = self.shared.store().get_block_ext(&parent_hash).ok_or( + InternalErrorKind::Other.other(format!( + "block: {}'s parent: {}'s block ext not found", + block_hash, parent_hash + )), + )?; - fn non_contextual_verify(&self, block: &BlockView) -> Result<(), Error> { - let consensus = self.shared.consensus(); - BlockVerifier::new(consensus).verify(block).map_err(|e| { - debug!("[process_block] BlockVerifier error {:?}", e); - e - })?; - - NonContextualBlockTxsVerifier::new(consensus) - .verify(block) - .map_err(|e| { + if let Some(ext) = self.shared.store().get_block_ext(&block.hash()) { + if let Some(verified) = ext.verified { debug!( - "[process_block] NonContextualBlockTxsVerifier error {:?}", - e + "block {}-{} has been verified, previously verified result: {}", + block.number(), + block.hash(), + verified ); - e - }) - .map(|_| ()) - } - - fn insert_block(&mut self, block: Arc, switch: Switch) -> Result { - let db_txn = Arc::new(self.shared.store().begin_transaction()); - let txn_snapshot = db_txn.get_snapshot(); - let _snapshot_tip_hash = db_txn.get_update_for_tip_hash(&txn_snapshot); - - // insert_block are assumed be executed in single thread - if txn_snapshot.block_exists(&block.header().hash()) { - return Ok(false); - } - // non-contextual verify - if !switch.disable_non_contextual() { - self.non_contextual_verify(&block)?; + return if verified { + Ok(false) + } else { + Err(InternalErrorKind::Other + .other("block previously verified failed") + .into()) + }; + } } - let mut total_difficulty = U256::zero(); - let mut fork = ForkChanges::default(); - - let parent_ext = txn_snapshot - .get_block_ext(&block.data().header().raw().parent_hash()) - .expect("parent already store"); - - let parent_header = txn_snapshot - .get_block_header(&block.data().header().raw().parent_hash()) - .expect("parent already store"); - let cannon_total_difficulty = parent_ext.total_difficulty.to_owned() + block.header().difficulty(); @@ -419,16 +266,6 @@ impl ChainService { .into()); } - db_txn.insert_block(&block)?; - - let next_block_epoch = self - .shared - .consensus() - .next_epoch_ext(&parent_header, &txn_snapshot.borrow_as_data_loader()) - .expect("epoch should be stored"); - let new_epoch = next_block_epoch.is_head(); - let epoch = next_block_epoch.epoch(); - let ext = BlockExt { received_at: unix_time_as_millis(), total_difficulty: cannon_total_difficulty.clone(), @@ -439,46 +276,60 @@ impl ChainService { txs_sizes: None, }; - db_txn.insert_block_epoch_index( - &block.header().hash(), - &epoch.last_block_hash_in_previous_epoch(), - )?; - if new_epoch { - db_txn.insert_epoch_ext(&epoch.last_block_hash_in_previous_epoch(), &epoch)?; - } - let shared_snapshot = Arc::clone(&self.shared.snapshot()); let origin_proposals = shared_snapshot.proposals(); let current_tip_header = shared_snapshot.tip_header(); - let current_total_difficulty = shared_snapshot.total_difficulty().to_owned(); - debug!( - "Current difficulty = {:#x}, cannon = {:#x}", - current_total_difficulty, cannon_total_difficulty, - ); // is_better_than let new_best_block = cannon_total_difficulty > current_total_difficulty; + let mut fork = ForkChanges::default(); + + let next_block_epoch = self + .shared + .consensus() + .next_epoch_ext(parent_header, &self.shared.store().borrow_as_data_loader()) + .expect("epoch should be stored"); + let new_epoch = next_block_epoch.is_head(); + let epoch = next_block_epoch.epoch(); + + let db_txn = Arc::new(self.shared.store().begin_transaction()); + let txn_snapshot = db_txn.get_snapshot(); + let _snapshot_tip_hash = db_txn.get_update_for_tip_hash(&txn_snapshot); + + db_txn.insert_block_epoch_index( + &block.header().hash(), + &epoch.last_block_hash_in_previous_epoch(), + )?; + if new_epoch { + db_txn.insert_epoch_ext(&epoch.last_block_hash_in_previous_epoch(), &epoch)?; + } + if new_best_block { - debug!( - "Newly found best block : {} => {:#x}, difficulty diff = {:#x}", + info!( + "[verify block] new best block found: {} => {:#x}, difficulty diff = {:#x}, unverified_tip: {}", block.header().number(), block.header().hash(), - &cannon_total_difficulty - ¤t_total_difficulty + &cannon_total_difficulty - ¤t_total_difficulty, + self.shared.get_unverified_tip().number(), ); - self.find_fork(&mut fork, current_tip_header.number(), &block, ext); + self.find_fork(&mut fork, current_tip_header.number(), block, ext); self.rollback(&fork, &db_txn)?; // update and verify chain root // MUST update index before reconcile_main_chain + let begin_reconcile_main_chain = std::time::Instant::now(); self.reconcile_main_chain(Arc::clone(&db_txn), &mut fork, switch)?; + trace!( + "reconcile_main_chain cost {:?}", + begin_reconcile_main_chain.elapsed() + ); db_txn.insert_tip_header(&block.header())?; if new_epoch || fork.has_detached() { db_txn.insert_current_epoch_ext(&epoch)?; } - total_difficulty = cannon_total_difficulty.clone(); } else { db_txn.insert_block_ext(&block.header().hash(), &ext)?; } @@ -491,7 +342,7 @@ impl ChainService { tip_header.number(), tip_header.hash(), tip_header.epoch(), - total_difficulty, + cannon_total_difficulty, block.transactions().len() ); @@ -503,7 +354,7 @@ impl ChainService { let new_snapshot = self.shared - .new_snapshot(tip_header, total_difficulty, epoch, new_proposals); + .new_snapshot(tip_header, cannon_total_difficulty, epoch, new_proposals); self.shared.store_snapshot(Arc::clone(&new_snapshot)); @@ -515,15 +366,14 @@ impl ChainService { fork.detached_proposal_id().clone(), new_snapshot, ) { - error!("Notify update_tx_pool_for_reorg error {}", e); + error!("[verify block] notify update_tx_pool_for_reorg error {}", e); } } - let block_ref: &BlockView = █ self.shared .notify_controller() - .notify_new_block(block_ref.clone()); - if log_enabled!(ckb_logger::Level::Debug) { + .notify_new_block(block.to_owned()); + if log_enabled!(ckb_logger::Level::Trace) { self.print_chain(10); } if let Some(metrics) = ckb_metrics::handle() { @@ -532,7 +382,7 @@ impl ChainService { } else { self.shared.refresh_snapshot(); info!( - "uncle: {}, hash: {:#x}, epoch: {:#}, total_diff: {:#x}, txs: {}", + "[verify block] uncle: {}, hash: {:#x}, epoch: {:#}, total_diff: {:#x}, txs: {}", block.header().number(), block.header().hash(), block.header().epoch(), @@ -542,13 +392,11 @@ impl ChainService { let tx_pool_controller = self.shared.tx_pool_controller(); if tx_pool_controller.service_started() { - let block_ref: &BlockView = █ - if let Err(e) = tx_pool_controller.notify_new_uncle(block_ref.as_uncle()) { - error!("Notify new_uncle error {}", e); + if let Err(e) = tx_pool_controller.notify_new_uncle(block.as_uncle()) { + error!("[verify block] notify new_uncle error {}", e); } } } - Ok(true) } @@ -585,7 +433,7 @@ impl ChainService { let proposal_start = cmp::max(1, (new_tip + 1).saturating_sub(proposal_window.farthest())); - debug!("Reload_proposal_table [{}, {}]", proposal_start, common); + debug!("reload_proposal_table [{}, {}]", proposal_start, common); for bn in proposal_start..=common { let blk = self .shared @@ -776,7 +624,13 @@ impl ChainService { { if !switch.disable_all() { if found_error.is_none() { + let log_now = std::time::Instant::now(); let resolved = self.resolve_block_transactions(&txn, b, &verify_context); + debug!( + "resolve_block_transactions {} cost: {:?}", + b.hash(), + log_now.elapsed() + ); match resolved { Ok(resolved) => { let verified = { @@ -787,7 +641,14 @@ impl ChainService { Arc::clone(&txs_verify_cache), &mmr, ); - contextual_block_verifier.verify(&resolved, b) + let log_now = std::time::Instant::now(); + let verify_result = contextual_block_verifier.verify(&resolved, b); + debug!( + "contextual_block_verifier {} cost: {:?}", + b.hash(), + log_now.elapsed() + ); + verify_result }; match verified { Ok((cycles, cache_entries)) => { @@ -939,13 +800,13 @@ impl ChainService { fn print_error(&self, b: &BlockView, err: &Error) { error!( - "Block verify error. Block number: {}, hash: {}, error: {:?}", + "block verify error, block number: {}, hash: {}, error: {:?}", b.header().number(), b.header().hash(), err ); if log_enabled!(ckb_logger::Level::Trace) { - trace!("Block {}", b.data()); + trace!("block {}", b); } } @@ -968,6 +829,64 @@ impl ChainService { debug!("}}"); } + + fn make_fork_for_truncate(&self, target: &HeaderView, current_tip: &HeaderView) -> ForkChanges { + let mut fork = ForkChanges::default(); + let store = self.shared.store(); + for bn in (target.number() + 1)..=current_tip.number() { + let hash = store.get_block_hash(bn).expect("index checked"); + let old_block = store.get_block(&hash).expect("index checked"); + fork.detached_blocks.push_back(old_block); + } + is_sorted_assert(&fork); + fork + } + + // Truncate the main chain + // Use for testing only + pub(crate) fn truncate(&mut self, target_tip_hash: &Byte32) -> Result<(), Error> { + let snapshot = Arc::clone(&self.shared.snapshot()); + assert!(snapshot.is_main_chain(target_tip_hash)); + + let target_tip_header = snapshot.get_block_header(target_tip_hash).expect("checked"); + let target_block_ext = snapshot.get_block_ext(target_tip_hash).expect("checked"); + let target_epoch_ext = snapshot + .get_block_epoch_index(target_tip_hash) + .and_then(|index| snapshot.get_epoch_ext(&index)) + .expect("checked"); + let origin_proposals = snapshot.proposals(); + let mut fork = self.make_fork_for_truncate(&target_tip_header, snapshot.tip_header()); + + let db_txn = self.shared.store().begin_transaction(); + self.rollback(&fork, &db_txn)?; + + db_txn.insert_tip_header(&target_tip_header)?; + db_txn.insert_current_epoch_ext(&target_epoch_ext)?; + + for blk in fork.attached_blocks() { + db_txn.delete_block(blk)?; + } + db_txn.commit()?; + + self.update_proposal_table(&fork); + let (detached_proposal_id, new_proposals) = self + .proposal_table + .finalize(origin_proposals, target_tip_header.number()); + fork.detached_proposal_id = detached_proposal_id; + + let new_snapshot = self.shared.new_snapshot( + target_tip_header, + target_block_ext.total_difficulty, + target_epoch_ext, + new_proposals, + ); + + self.shared.store_snapshot(Arc::clone(&new_snapshot)); + + // NOTE: Dont update tx-pool when truncate + + Ok(()) + } } #[cfg(debug_assertions)] diff --git a/chain/src/init.rs b/chain/src/init.rs new file mode 100644 index 0000000000..4dc9d2d919 --- /dev/null +++ b/chain/src/init.rs @@ -0,0 +1,135 @@ +#![allow(missing_docs)] + +//! Bootstrap InitLoadUnverified, PreloadUnverifiedBlock, ChainService and ConsumeUnverified threads. +use crate::chain_service::ChainService; +use crate::consume_unverified::ConsumeUnverifiedBlocks; +use crate::init_load_unverified::InitLoadUnverified; +use crate::orphan_broker::OrphanBroker; +use crate::preload_unverified_blocks_channel::PreloadUnverifiedBlocksChannel; +use crate::utils::orphan_block_pool::OrphanBlockPool; +use crate::{chain_controller::ChainController, LonelyBlockHash, UnverifiedBlock}; +use ckb_channel::{self as channel, SendError}; +use ckb_constant::sync::BLOCK_DOWNLOAD_WINDOW; +use ckb_logger::warn; +use ckb_shared::ChainServicesBuilder; +use ckb_stop_handler::register_thread; +use ckb_types::packed::Byte32; +use dashmap::DashSet; +use std::sync::atomic::AtomicBool; +use std::sync::Arc; +use std::thread; + +const ORPHAN_BLOCK_SIZE: usize = BLOCK_DOWNLOAD_WINDOW as usize; + +pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { + let orphan_blocks_broker = Arc::new(OrphanBlockPool::with_capacity(ORPHAN_BLOCK_SIZE)); + + let (truncate_block_tx, truncate_block_rx) = channel::bounded(1); + + let (preload_unverified_stop_tx, preload_unverified_stop_rx) = ckb_channel::bounded::<()>(1); + + let (preload_unverified_tx, preload_unverified_rx) = + channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 10); + + let (unverified_queue_stop_tx, unverified_queue_stop_rx) = ckb_channel::bounded::<()>(1); + let (unverified_block_tx, unverified_block_rx) = channel::bounded::(128usize); + + let is_pending_verify: Arc> = Arc::new(DashSet::new()); + + let consumer_unverified_thread = thread::Builder::new() + .name("consume_unverified_blocks".into()) + .spawn({ + let shared = builder.shared.clone(); + let is_pending_verify = Arc::clone(&is_pending_verify); + move || { + let consume_unverified = ConsumeUnverifiedBlocks::new( + shared, + unverified_block_rx, + truncate_block_rx, + builder.proposal_table, + is_pending_verify, + unverified_queue_stop_rx, + ); + + consume_unverified.start(); + } + }) + .expect("start unverified_queue consumer thread should ok"); + + let preload_unverified_block_thread = thread::Builder::new() + .name("preload_unverified_block".into()) + .spawn({ + let shared = builder.shared.clone(); + move || { + let preload_unverified_block = PreloadUnverifiedBlocksChannel::new( + shared, + preload_unverified_rx, + unverified_block_tx, + preload_unverified_stop_rx, + ); + preload_unverified_block.start() + } + }) + .expect("start preload_unverified_block should ok"); + + let (process_block_tx, process_block_rx) = channel::bounded(0); + + let is_verifying_unverified_blocks_on_startup = Arc::new(AtomicBool::new(true)); + + let chain_controller = ChainController::new( + process_block_tx, + truncate_block_tx, + Arc::clone(&orphan_blocks_broker), + Arc::clone(&is_verifying_unverified_blocks_on_startup), + ); + + let init_load_unverified_thread = thread::Builder::new() + .name("init_load_unverified_blocks".into()) + .spawn({ + let chain_controller = chain_controller.clone(); + let shared = builder.shared.clone(); + + move || { + let init_load_unverified: InitLoadUnverified = InitLoadUnverified::new( + shared, + chain_controller, + is_verifying_unverified_blocks_on_startup, + ); + init_load_unverified.start(); + } + }) + .expect("start unverified_queue consumer thread should ok"); + + let consume_orphan = OrphanBroker::new( + builder.shared.clone(), + orphan_blocks_broker, + preload_unverified_tx, + is_pending_verify, + ); + + let chain_service: ChainService = + ChainService::new(builder.shared, process_block_rx, consume_orphan); + let chain_service_thread = thread::Builder::new() + .name("ChainService".into()) + .spawn({ + move || { + chain_service.start_process_block(); + + let _ = init_load_unverified_thread.join(); + + if preload_unverified_stop_tx.send(()).is_err(){ + warn!("trying to notify preload unverified thread to stop, but preload_unverified_stop_tx already closed"); + } + let _ = preload_unverified_block_thread.join(); + + if let Err(SendError(_)) = unverified_queue_stop_tx.send(()) { + warn!("trying to notify consume unverified thread to stop, but unverified_queue_stop_tx already closed"); + } + let _ = consumer_unverified_thread.join(); + } + }) + .expect("start chain_service thread should ok"); + register_thread("ChainService", chain_service_thread); + + chain_controller +} diff --git a/chain/src/init_load_unverified.rs b/chain/src/init_load_unverified.rs new file mode 100644 index 0000000000..e2c4ebae00 --- /dev/null +++ b/chain/src/init_load_unverified.rs @@ -0,0 +1,122 @@ +use crate::utils::orphan_block_pool::EXPIRED_EPOCH; +use crate::{ChainController, LonelyBlock}; +use ckb_constant::sync::BLOCK_DOWNLOAD_WINDOW; +use ckb_db::{Direction, IteratorMode}; +use ckb_db_schema::COLUMN_NUMBER_HASH; +use ckb_logger::info; +use ckb_shared::Shared; +use ckb_stop_handler::has_received_stop_signal; +use ckb_store::ChainStore; +use ckb_types::core::{BlockNumber, BlockView}; +use ckb_types::packed; +use ckb_types::prelude::{Entity, FromSliceShouldBeOk, Pack, Reader}; +use std::cmp; +use std::sync::atomic::AtomicBool; +use std::sync::Arc; + +pub(crate) struct InitLoadUnverified { + shared: Shared, + chain_controller: ChainController, + is_verifying_unverified_blocks_on_startup: Arc, +} + +impl InitLoadUnverified { + pub(crate) fn new( + shared: Shared, + chain_controller: ChainController, + is_verifying_unverified_blocks_on_startup: Arc, + ) -> Self { + InitLoadUnverified { + shared, + chain_controller, + is_verifying_unverified_blocks_on_startup, + } + } + + fn find_unverified_block_hashes(&self, check_unverified_number: u64) -> Vec { + let pack_number: packed::Uint64 = check_unverified_number.pack(); + let prefix = pack_number.as_slice(); + + // If a block has `COLUMN_NUMBER_HASH` but not `BlockExt`, + // it indicates an unverified block inserted during the last shutdown. + let unverified_hashes: Vec = self + .shared + .store() + .get_iter( + COLUMN_NUMBER_HASH, + IteratorMode::From(prefix, Direction::Forward), + ) + .take_while(|(key, _)| key.starts_with(prefix)) + .map(|(key_number_hash, _v)| { + let reader = + packed::NumberHashReader::from_slice_should_be_ok(key_number_hash.as_ref()); + let unverified_block_hash = reader.block_hash().to_entity(); + unverified_block_hash + }) + .filter(|hash| self.shared.store().get_block_ext(hash).is_none()) + .collect::>(); + unverified_hashes + } + + pub(crate) fn start(&self) { + info!( + "finding unverified blocks, current tip: {}-{}", + self.shared.snapshot().tip_number(), + self.shared.snapshot().tip_hash() + ); + + self.find_and_verify_unverified_blocks(); + + self.is_verifying_unverified_blocks_on_startup + .store(false, std::sync::atomic::Ordering::Release); + info!("find unverified blocks finished"); + } + + fn find_unverified_blocks(&self, f: F) + where + F: Fn(&packed::Byte32), + { + let tip_number: BlockNumber = self.shared.snapshot().tip_number(); + let start_check_number = cmp::max( + 1, + tip_number.saturating_sub(EXPIRED_EPOCH * self.shared.consensus().max_epoch_length()), + ); + let end_check_number = tip_number + BLOCK_DOWNLOAD_WINDOW * 10; + + for check_unverified_number in start_check_number..=end_check_number { + if has_received_stop_signal() { + info!("init_unverified_blocks thread received exit signal, exit now"); + return; + } + + // start checking `check_unverified_number` have COLUMN_NUMBER_HASH value in db? + let unverified_hashes: Vec = + self.find_unverified_block_hashes(check_unverified_number); + + for unverified_hash in unverified_hashes { + f(&unverified_hash); + } + } + } + + fn find_and_verify_unverified_blocks(&self) { + self.find_unverified_blocks(|unverified_hash| { + let unverified_block: BlockView = self + .shared + .store() + .get_block(unverified_hash) + .expect("unverified block must be in db"); + + if has_received_stop_signal() { + return; + } + + self.chain_controller + .asynchronous_process_lonely_block(LonelyBlock { + block: Arc::new(unverified_block), + switch: None, + verify_callback: None, + }); + }); + } +} diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 5898633b83..b656243f2c 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -1,3 +1,5 @@ +#![allow(missing_docs)] + //! CKB chain service. //! //! [`ChainService`] background base on database, handle block importing, @@ -5,7 +7,236 @@ //! //! [`ChainService`]: chain/struct.ChainService.html //! [`ChainController`]: chain/struct.ChainController.html +use ckb_error::Error; +use ckb_shared::types::BlockNumberAndHash; +use ckb_types::core::service::Request; +use ckb_types::core::{BlockNumber, BlockView, EpochNumber, HeaderView}; +use ckb_types::packed::Byte32; +use ckb_verification_traits::Switch; +use std::sync::Arc; -pub mod chain; +mod chain_controller; +mod chain_service; +pub mod consume_unverified; +mod init; +mod init_load_unverified; +mod orphan_broker; +mod preload_unverified_blocks_channel; #[cfg(test)] mod tests; +mod utils; + +pub use chain_controller::ChainController; +use ckb_logger::{error, info}; +use ckb_store::{ChainDB, ChainStore}; +use ckb_types::prelude::{Pack, Unpack}; +use ckb_types::H256; +use either::Either; +pub use init::start_chain_services; + +type ProcessBlockRequest = Request; +type TruncateRequest = Request>; + +/// VerifyResult is the result type to represent the result of block verification +/// +/// Ok(true) : it's a newly verified block +/// Ok(false): it's a block which has been verified before +/// Err(err) : it's a block which failed to verify +pub type VerifyResult = Result; + +/// VerifyCallback is the callback type to be called after block verification +pub type VerifyCallback = Box; + +/// RemoteBlock is received from ckb-sync and ckb-relayer +pub struct RemoteBlock { + /// block + pub block: Arc, + + /// Relayer and Synchronizer will have callback to ban peer + pub verify_callback: VerifyCallback, +} + +/// LonelyBlock is the block which we have not check weather its parent is stored yet +pub struct LonelyBlock { + /// block + pub block: Arc, + + /// The Switch to control the verification process + pub switch: Option, + + /// The optional verify_callback + pub verify_callback: Option, +} + +/// LonelyBlock is the block which we have not check weather its parent is stored yet +pub struct LonelyBlockHash { + /// block + pub block_number_and_hash: Either>, + + pub parent_hash: Byte32, + + pub epoch_number: EpochNumber, + + /// The Switch to control the verification process + pub switch: Option, + + /// The optional verify_callback + pub verify_callback: Option, +} + +impl From for LonelyBlockHash { + fn from(val: LonelyBlock) -> Self { + let LonelyBlock { + block, + switch, + verify_callback, + } = val; + let block_hash_h256: H256 = block.hash().unpack(); + let block_number: BlockNumber = block.number(); + let parent_hash_h256: H256 = block.parent_hash().unpack(); + let block_hash = block_hash_h256.pack(); + let parent_hash = parent_hash_h256.pack(); + + let epoch_number: EpochNumber = block.epoch().number(); + + LonelyBlockHash { + block_number_and_hash: if block.data().serialized_size_without_uncle_proposals() > 12800 + { + Either::Right(block) + } else { + Either::Left(BlockNumberAndHash { + number: block_number, + hash: block_hash, + }) + }, + parent_hash, + epoch_number, + switch, + verify_callback, + } + } +} + +impl LonelyBlockHash { + pub fn execute_callback(self, verify_result: VerifyResult) { + if let Some(verify_callback) = self.verify_callback { + verify_callback(verify_result); + } + } + + pub fn number_hash(&self) -> BlockNumberAndHash { + match self.block_number_and_hash.as_ref() { + Either::Left(block_number_and_hash) => block_number_and_hash.to_owned(), + Either::Right(block) => BlockNumberAndHash::new(block.number(), block.hash()), + } + } + + pub fn epoch_number(&self) -> EpochNumber { + self.epoch_number + } + + pub fn hash(&self) -> Byte32 { + self.number_hash().hash() + } + + pub fn parent_hash(&self) -> Byte32 { + self.parent_hash.clone() + } + + pub fn number(&self) -> BlockNumber { + self.number_hash().number() + } +} + +impl LonelyBlock { + pub(crate) fn block(&self) -> &Arc { + &self.block + } + + pub fn switch(&self) -> Option { + self.switch + } + + pub fn execute_callback(self, verify_result: VerifyResult) { + if let Some(verify_callback) = self.verify_callback { + verify_callback(verify_result); + } + } +} + +pub(crate) struct GlobalIndex { + pub(crate) number: BlockNumber, + pub(crate) hash: Byte32, + pub(crate) unseen: bool, +} + +impl GlobalIndex { + pub(crate) fn new(number: BlockNumber, hash: Byte32, unseen: bool) -> GlobalIndex { + GlobalIndex { + number, + hash, + unseen, + } + } + + pub(crate) fn forward(&mut self, hash: Byte32) { + self.number -= 1; + self.hash = hash; + } +} + +/// UnverifiedBlock will be consumed by ConsumeUnverified thread +struct UnverifiedBlock { + // block + block: Arc, + // the switch to control the verification process + switch: Option, + // verify callback + verify_callback: Option, + // parent header + parent_header: HeaderView, +} + +pub(crate) fn delete_unverified_block( + store: &ChainDB, + block_hash: Byte32, + block_number: BlockNumber, + parent_hash: Byte32, +) { + info!( + "parent: {}, deleting this block {}-{}", + parent_hash, block_number, block_hash, + ); + + let db_txn = store.begin_transaction(); + let block_op: Option = db_txn.get_block(&block_hash); + match block_op { + Some(block) => { + if let Err(err) = db_txn.delete_block(&block) { + error!( + "delete block {}-{} failed {:?}", + block_number, block_hash, err + ); + return; + } + if let Err(err) = db_txn.commit() { + error!( + "commit delete block {}-{} failed {:?}", + block_number, block_hash, err + ); + return; + } + + info!( + "parent: {}, deleted this block {}-{}", + parent_hash, block_number, block_hash, + ); + } + None => { + error!( + "want to delete block {}-{}, but it not found in db", + block_number, block_hash + ); + } + } +} diff --git a/chain/src/orphan_broker.rs b/chain/src/orphan_broker.rs new file mode 100644 index 0000000000..c23d5221a6 --- /dev/null +++ b/chain/src/orphan_broker.rs @@ -0,0 +1,216 @@ +#![allow(missing_docs)] + +use crate::utils::orphan_block_pool::{OrphanBlockPool, ParentHash}; +use crate::{delete_unverified_block, LonelyBlockHash, VerifyResult}; +use ckb_channel::Sender; +use ckb_error::InternalErrorKind; +use ckb_logger::internal::trace; +use ckb_logger::{debug, error, info}; +use ckb_shared::block_status::BlockStatus; +use ckb_shared::Shared; +use ckb_store::ChainStore; +use ckb_types::{packed::Byte32, U256}; +use dashmap::DashSet; +use std::sync::Arc; + +pub(crate) struct OrphanBroker { + shared: Shared, + + orphan_blocks_broker: Arc, + is_pending_verify: Arc>, + preload_unverified_tx: Sender, +} + +impl OrphanBroker { + pub(crate) fn new( + shared: Shared, + orphan_block_pool: Arc, + preload_unverified_tx: Sender, + is_pending_verify: Arc>, + ) -> OrphanBroker { + OrphanBroker { + shared: shared.clone(), + orphan_blocks_broker: orphan_block_pool, + is_pending_verify, + preload_unverified_tx, + } + } + + fn search_orphan_leader(&self, leader_hash: ParentHash) { + let leader_status = self.shared.get_block_status(&leader_hash); + + if leader_status.eq(&BlockStatus::BLOCK_INVALID) { + let descendants: Vec = self + .orphan_blocks_broker + .remove_blocks_by_parent(&leader_hash); + for descendant in descendants { + self.process_invalid_block(descendant); + } + return; + } + + let leader_is_pending_verify = self.is_pending_verify.contains(&leader_hash); + if !leader_is_pending_verify && !leader_status.contains(BlockStatus::BLOCK_STORED) { + trace!( + "orphan leader: {} not stored {:?} and not in is_pending_verify: {}", + leader_hash, + leader_status, + leader_is_pending_verify + ); + return; + } + + let descendants: Vec = self + .orphan_blocks_broker + .remove_blocks_by_parent(&leader_hash); + if descendants.is_empty() { + error!( + "leader {} does not have any descendants, this shouldn't happen", + leader_hash + ); + return; + } + self.accept_descendants(descendants); + } + + fn search_orphan_leaders(&self) { + for leader_hash in self.orphan_blocks_broker.clone_leaders() { + self.search_orphan_leader(leader_hash); + } + } + + fn delete_block(&self, lonely_block: &LonelyBlockHash) { + let block_hash = lonely_block.hash(); + let block_number = lonely_block.number(); + let parent_hash = lonely_block.parent_hash(); + + delete_unverified_block(self.shared.store(), block_hash, block_number, parent_hash); + } + + fn process_invalid_block(&self, lonely_block: LonelyBlockHash) { + let block_hash = lonely_block.hash(); + let block_number = lonely_block.number(); + let parent_hash = lonely_block.parent_hash(); + + self.delete_block(&lonely_block); + + self.shared + .insert_block_status(block_hash.clone(), BlockStatus::BLOCK_INVALID); + + let err: VerifyResult = Err(InternalErrorKind::Other + .other(format!( + "parent {} is invalid, so block {}-{} is invalid too", + parent_hash, block_number, block_hash + )) + .into()); + lonely_block.execute_callback(err); + } + + pub(crate) fn process_lonely_block(&self, lonely_block: LonelyBlockHash) { + let block_hash = lonely_block.hash(); + let block_number = lonely_block.number(); + let parent_hash = lonely_block.parent_hash(); + let parent_is_pending_verify = self.is_pending_verify.contains(&parent_hash); + let parent_status = self.shared.get_block_status(&parent_hash); + if parent_is_pending_verify || parent_status.contains(BlockStatus::BLOCK_STORED) { + debug!( + "parent {} has stored: {:?} or is_pending_verify: {}, processing descendant directly {}-{}", + parent_hash, + parent_status, + parent_is_pending_verify, + block_number, + block_hash, + ); + self.process_descendant(lonely_block); + } else if parent_status.eq(&BlockStatus::BLOCK_INVALID) { + self.process_invalid_block(lonely_block); + } else { + self.orphan_blocks_broker.insert(lonely_block); + } + + self.search_orphan_leaders(); + + if let Some(metrics) = ckb_metrics::handle() { + metrics + .ckb_chain_orphan_count + .set(self.orphan_blocks_broker.len() as i64) + } + } + + pub(crate) fn clean_expired_orphans(&self) { + debug!("clean expired orphans"); + let tip_epoch_number = self + .shared + .store() + .get_tip_header() + .expect("tip header") + .epoch() + .number(); + let expired_orphans = self + .orphan_blocks_broker + .clean_expired_blocks(tip_epoch_number); + for expired_orphan in expired_orphans { + self.delete_block(&expired_orphan); + self.shared.remove_header_view(&expired_orphan.hash()); + self.shared.remove_block_status(&expired_orphan.hash()); + info!( + "cleaned expired orphan: {}-{}", + expired_orphan.number(), + expired_orphan.hash() + ); + } + } + + fn send_unverified_block(&self, lonely_block: LonelyBlockHash) { + let block_number = lonely_block.number(); + let block_hash = lonely_block.hash(); + + if let Some(metrics) = ckb_metrics::handle() { + metrics + .ckb_chain_preload_unverified_block_ch_len + .set(self.preload_unverified_tx.len() as i64) + } + + match self.preload_unverified_tx.send(lonely_block) { + Ok(_) => { + debug!( + "process desendant block success {}-{}", + block_number, block_hash + ); + } + Err(_) => { + info!("send unverified_block_tx failed, the receiver has been closed"); + return; + } + }; + if block_number > self.shared.snapshot().tip_number() { + self.shared.set_unverified_tip(ckb_shared::HeaderIndex::new( + block_number, + block_hash.clone(), + U256::from(0u64), + )); + + if let Some(handle) = ckb_metrics::handle() { + handle.ckb_chain_unverified_tip.set(block_number as i64); + } + debug!( + "set unverified_tip to {}-{}, while unverified_tip - verified_tip = {}", + block_number.clone(), + block_hash.clone(), + block_number.saturating_sub(self.shared.snapshot().tip_number()) + ) + } + } + + fn process_descendant(&self, lonely_block: LonelyBlockHash) { + self.is_pending_verify.insert(lonely_block.hash()); + + self.send_unverified_block(lonely_block) + } + + fn accept_descendants(&self, descendants: Vec) { + for descendant_block in descendants { + self.process_descendant(descendant_block); + } + } +} diff --git a/chain/src/preload_unverified_blocks_channel.rs b/chain/src/preload_unverified_blocks_channel.rs new file mode 100644 index 0000000000..b75753b50d --- /dev/null +++ b/chain/src/preload_unverified_blocks_channel.rs @@ -0,0 +1,132 @@ +use crate::{LonelyBlockHash, UnverifiedBlock}; +use ckb_channel::{Receiver, Sender}; +use ckb_logger::{debug, error, info}; +use ckb_shared::Shared; +use ckb_store::ChainStore; +use ckb_types::core::HeaderView; +use crossbeam::select; +use either::Either; +use std::cell::Cell; +use std::sync::Arc; + +pub(crate) struct PreloadUnverifiedBlocksChannel { + shared: Shared, + preload_unverified_rx: Receiver, + + unverified_block_tx: Sender, + + stop_rx: Receiver<()>, + + // after we load a block from store, we put block.parent_header into this cell + prev_header: Cell, +} + +impl PreloadUnverifiedBlocksChannel { + pub(crate) fn new( + shared: Shared, + preload_unverified_rx: Receiver, + unverified_block_tx: Sender, + stop_rx: Receiver<()>, + ) -> Self { + let tip_hash = shared.snapshot().tip_hash(); + + let tip_header = shared + .store() + .get_block_header(&tip_hash) + .expect("must get tip header"); + + PreloadUnverifiedBlocksChannel { + shared, + preload_unverified_rx, + unverified_block_tx, + stop_rx, + prev_header: Cell::new(tip_header), + } + } + + pub(crate) fn start(&self) { + loop { + select! { + recv(self.preload_unverified_rx) -> msg => match msg { + Ok(preload_unverified_block_task) =>{ + self.preload_unverified_channel(preload_unverified_block_task); + }, + Err(err) =>{ + error!("recv preload_task_rx failed, err: {:?}", err); + break; + } + }, + recv(self.stop_rx) -> _ => { + info!("preload_unverified_blocks thread received exit signal, exit now"); + break; + } + } + } + } + + fn preload_unverified_channel(&self, task: LonelyBlockHash) { + let block_number = task.number(); + let block_hash = task.hash(); + let unverified_block: UnverifiedBlock = self.load_full_unverified_block_by_hash(task); + + if let Some(metrics) = ckb_metrics::handle() { + metrics + .ckb_chain_unverified_block_ch_len + .set(self.unverified_block_tx.len() as i64) + }; + + if self.unverified_block_tx.send(unverified_block).is_err() { + info!( + "send unverified_block to unverified_block_tx failed, the receiver has been closed" + ); + } else { + debug!("preload unverified block {}-{}", block_number, block_hash,); + } + } + + fn load_full_unverified_block_by_hash(&self, task: LonelyBlockHash) -> UnverifiedBlock { + let _trace_timecost = ckb_metrics::handle() + .map(|metrics| metrics.ckb_chain_load_full_unverified_block.start_timer()); + + let LonelyBlockHash { + block_number_and_hash, + parent_hash, + epoch_number: _epoch_number, + switch, + verify_callback, + } = task; + + let block = { + match block_number_and_hash { + Either::Left(number_and_hash) => { + let block_view = self + .shared + .store() + .get_block(&number_and_hash.hash()) + .expect("block stored"); + Arc::new(block_view) + } + Either::Right(block) => block, + } + }; + + let parent_header = { + let prev_header = self.prev_header.replace(block.header()); + if prev_header.hash() == parent_hash { + prev_header + } else { + self.shared + .store() + .get_block_header(&parent_hash) + .expect("parent header stored") + } + }; + + UnverifiedBlock { + block, + switch, + verify_callback, + parent_header, + } + } +} diff --git a/chain/src/tests/basic.rs b/chain/src/tests/basic.rs index e8ad1bf182..b1d2947a82 100644 --- a/chain/src/tests/basic.rs +++ b/chain/src/tests/basic.rs @@ -1,5 +1,5 @@ -use crate::chain::ChainController; use crate::tests::util::start_chain; +use crate::ChainController; use ckb_chain_spec::consensus::{Consensus, ConsensusBuilder}; use ckb_dao_utils::genesis_dao_data; use ckb_error::assert_error_eq; @@ -34,7 +34,7 @@ fn repeat_process_block() { let block = Arc::new(chain.blocks().last().unwrap().clone()); assert!(chain_controller - .internal_process_block(Arc::clone(&block), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch(Arc::clone(&block), Switch::DISABLE_EXTENSION) .expect("process block ok")); assert_eq!( shared @@ -46,7 +46,7 @@ fn repeat_process_block() { ); assert!(!chain_controller - .internal_process_block(Arc::clone(&block), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch(Arc::clone(&block), Switch::DISABLE_EXTENSION) .expect("process block ok")); assert_eq!( shared @@ -58,6 +58,59 @@ fn repeat_process_block() { ); } +#[test] +fn process_genesis_block() { + let tx = TransactionBuilder::default() + .witness(Script::default().into_witness()) + .input(CellInput::new(OutPoint::null(), 0)) + .outputs(vec![ + CellOutputBuilder::default() + .capacity(capacity_bytes!(100_000_000).pack()) + .build(); + 100 + ]) + .outputs_data(vec![Bytes::new(); 100].pack()) + .build(); + let always_success_tx = create_always_success_tx(); + + let dao = genesis_dao_data(vec![&tx, &always_success_tx]).unwrap(); + + let genesis_block = BlockBuilder::default() + .transaction(tx.clone()) + .transaction(always_success_tx.clone()) + .compact_target(difficulty_to_compact(U256::from(1000u64)).pack()) + .dao(dao.clone()) + .build(); + + let consensus = ConsensusBuilder::default() + .genesis_block(genesis_block) + .build(); + let (chain_controller, shared, _parent) = start_chain(Some(consensus)); + + let block = Arc::new(shared.consensus().genesis_block().clone()); + + let result = chain_controller.blocking_process_block(Arc::clone(&block)); + assert!(!result.expect("process block ok")); + assert_eq!( + shared + .store() + .get_block_ext(&block.header().hash()) + .unwrap() + .verified, + Some(true) + ); + + let different_genesis_block = BlockBuilder::default() + .transaction(tx) + .transaction(always_success_tx) + // Difficulty is changed here + .compact_target(difficulty_to_compact(U256::from(999u64)).pack()) + .dao(dao) + .build(); + let result = chain_controller.blocking_process_block(Arc::new(different_genesis_block)); + assert!(result.is_err()); +} + #[test] fn test_genesis_transaction_spend() { // let data: Vec = ; @@ -108,7 +161,7 @@ fn test_genesis_transaction_spend() { for block in &chain.blocks()[0..10] { assert!(chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .is_ok()); } @@ -165,7 +218,7 @@ fn test_transaction_spend_in_same_block() { for block in chain.blocks() { chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -208,7 +261,7 @@ fn test_transaction_spend_in_same_block() { parent_number4, epoch.number_with_fraction(parent_number4), parent_hash4, - 2 + 2, )), mem_cell_data: None, mem_cell_data_hash: None, @@ -239,13 +292,13 @@ fn test_transaction_conflict_in_same_block() { for block in chain.blocks().iter().take(3) { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) .expect("process block ok"); } assert_error_eq!( OutPointError::Dead(OutPoint::new(tx1_hash, 0)), chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(chain.blocks()[3].clone()), Switch::DISABLE_EXTENSION ) @@ -279,13 +332,13 @@ fn test_transaction_conflict_in_different_blocks() { for block in chain.blocks().iter().take(4) { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) .expect("process block ok"); } assert_error_eq!( OutPointError::Unknown(OutPoint::new(tx1_hash, 0)), chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(chain.blocks()[4].clone()), Switch::DISABLE_EXTENSION ) @@ -316,13 +369,13 @@ fn test_invalid_out_point_index_in_same_block() { for block in chain.blocks().iter().take(3) { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) .expect("process block ok"); } assert_error_eq!( OutPointError::Unknown(OutPoint::new(tx1_hash, 1)), chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(chain.blocks()[3].clone()), Switch::DISABLE_EXTENSION ) @@ -354,14 +407,14 @@ fn test_invalid_out_point_index_in_different_blocks() { for block in chain.blocks().iter().take(4) { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) .expect("process block ok"); } assert_error_eq!( OutPointError::Unknown(OutPoint::new(tx1_hash, 1)), chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(chain.blocks()[4].clone()), Switch::DISABLE_EXTENSION ) @@ -426,13 +479,13 @@ fn test_chain_fork_by_total_difficulty() { for block in chain1.blocks() { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); } for block in chain2.blocks() { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); } assert_eq!( @@ -469,7 +522,7 @@ fn test_chain_fork_by_first_received() { for chain in vec![chain1.clone(), chain2.clone(), chain3.clone()] { for block in chain.blocks() { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); } } @@ -530,7 +583,7 @@ fn prepare_context_chain( .build(); chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); chain1.push(new_block.clone()); mock_store.insert_block(&new_block, &epoch); @@ -570,7 +623,7 @@ fn prepare_context_chain( .build(); chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); chain2.push(new_block.clone()); mock_store.insert_block(&new_block, &epoch); diff --git a/chain/src/tests/block_assembler.rs b/chain/src/tests/block_assembler.rs index d5f34c3188..3e0638d2e3 100644 --- a/chain/src/tests/block_assembler.rs +++ b/chain/src/tests/block_assembler.rs @@ -1,10 +1,10 @@ -use crate::chain::{ChainController, ChainService}; use crate::tests::util::dummy_network; +use crate::{start_chain_services, ChainController}; use ckb_app_config::BlockAssemblerConfig; use ckb_chain_spec::consensus::Consensus; use ckb_dao_utils::genesis_dao_data; use ckb_jsonrpc_types::ScriptHashType; -use ckb_shared::{Shared, SharedBuilder, Snapshot}; +use ckb_shared::{ChainServicesBuilder, Shared, SharedBuilder, Snapshot}; use ckb_store::ChainStore; use ckb_tx_pool::{block_assembler::CandidateUncles, PlugTarget, TxEntry}; use ckb_types::{ @@ -47,8 +47,9 @@ fn start_chain(consensus: Option) -> (ChainController, Shared) { let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - let chain_controller = chain_service.start::<&str>(None); + let chain_services_builder: ChainServicesBuilder = pack.take_chain_services_builder(); + let chain_controller: ChainController = start_chain_services(chain_services_builder); + (chain_controller, shared) } @@ -142,7 +143,7 @@ fn test_block_template_timestamp() { let block = gen_block(&genesis, 0, &epoch); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .unwrap(); let mut block_template = shared @@ -209,13 +210,13 @@ fn test_prepare_uncles() { let block1_1 = gen_block(&block0_1.header(), 10, &epoch); chain_controller - .internal_process_block(Arc::new(block0_1), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block0_1), Switch::DISABLE_ALL) .unwrap(); chain_controller - .internal_process_block(Arc::new(block0_0.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block0_0.clone()), Switch::DISABLE_ALL) .unwrap(); chain_controller - .internal_process_block(Arc::new(block1_1.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block1_1.clone()), Switch::DISABLE_ALL) .unwrap(); let mut block_template = shared @@ -239,7 +240,7 @@ fn test_prepare_uncles() { let block2_1 = gen_block(&block1_1.header(), 10, &epoch); chain_controller - .internal_process_block(Arc::new(block2_1.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block2_1.clone()), Switch::DISABLE_ALL) .unwrap(); let mut block_template = shared @@ -263,7 +264,7 @@ fn test_prepare_uncles() { let block3_1 = gen_block(&block2_1.header(), 10, &epoch); chain_controller - .internal_process_block(Arc::new(block3_1), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block3_1), Switch::DISABLE_ALL) .unwrap(); let mut block_template = shared @@ -282,6 +283,8 @@ fn test_prepare_uncles() { #[test] fn test_candidate_uncles_retain() { + let _log_guard = ckb_logger_service::init_for_test("debug").expect("init log"); + let mut consensus = Consensus::default(); consensus.genesis_epoch_ext.set_length(5); let epoch = consensus.genesis_epoch_ext().clone(); @@ -299,13 +302,13 @@ fn test_candidate_uncles_retain() { let block1_1 = gen_block(&block0_1.header(), 10, &epoch); chain_controller - .internal_process_block(Arc::new(block0_1), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block0_1), Switch::DISABLE_ALL) .unwrap(); chain_controller - .internal_process_block(Arc::new(block0_0.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block0_0.clone()), Switch::DISABLE_ALL) .unwrap(); chain_controller - .internal_process_block(Arc::new(block1_1.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block1_1.clone()), Switch::DISABLE_ALL) .unwrap(); candidate_uncles.insert(block0_0.as_uncle()); @@ -326,7 +329,7 @@ fn test_candidate_uncles_retain() { let block2_0 = gen_block(&block1_0.header(), 13, &epoch); for block in vec![block1_0, block2_0.clone()] { chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } @@ -346,7 +349,7 @@ fn test_candidate_uncles_retain() { let block3_0 = gen_block(&block2_0.header(), 10, &epoch); chain_controller - .internal_process_block(Arc::new(block3_0.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block3_0.clone()), Switch::DISABLE_ALL) .unwrap(); { @@ -413,7 +416,7 @@ fn test_package_basic() { for _i in 0..4 { let block = gen_block(&parent_header, 11, &epoch); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block"); parent_header = block.header().to_owned(); blocks.push(block); @@ -520,7 +523,7 @@ fn test_package_multi_best_scores() { for _i in 0..4 { let block = gen_block(&parent_header, 11, &epoch); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block"); parent_header = block.header().to_owned(); blocks.push(block); @@ -621,6 +624,8 @@ fn test_package_multi_best_scores() { #[test] fn test_package_low_fee_descendants() { + let _log_guard = ckb_logger_service::init_for_test("debug").expect("init log"); + let mut consensus = Consensus::default(); consensus.genesis_epoch_ext.set_length(5); let epoch = consensus.genesis_epoch_ext().clone(); @@ -636,7 +641,7 @@ fn test_package_low_fee_descendants() { for _i in 0..4 { let block = gen_block(&parent_header, 11, &epoch); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block"); parent_header = block.header().to_owned(); blocks.push(block); diff --git a/chain/src/tests/delay_verify.rs b/chain/src/tests/delay_verify.rs index 77ed3780b7..bd36fa558f 100644 --- a/chain/src/tests/delay_verify.rs +++ b/chain/src/tests/delay_verify.rs @@ -46,7 +46,7 @@ fn test_dead_cell_in_same_block() { for block in chain1.blocks() { chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -55,7 +55,7 @@ fn test_dead_cell_in_same_block() { for block in chain2.blocks().iter().take(switch_fork_number + 1) { chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -65,7 +65,7 @@ fn test_dead_cell_in_same_block() { assert_error_eq!( OutPointError::Dead(OutPoint::new(tx1_hash, 0)), chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(chain2.blocks()[switch_fork_number + 1].clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -107,7 +107,7 @@ fn test_dead_cell_in_different_block() { for block in chain1.blocks() { chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -116,7 +116,7 @@ fn test_dead_cell_in_different_block() { for block in chain2.blocks().iter().take(switch_fork_number + 2) { chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -126,7 +126,7 @@ fn test_dead_cell_in_different_block() { assert_error_eq!( OutPointError::Unknown(OutPoint::new(tx1_hash, 0)), chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(chain2.blocks()[switch_fork_number + 2].clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -169,7 +169,7 @@ fn test_invalid_out_point_index_in_same_block() { for block in chain1.blocks() { chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -178,7 +178,7 @@ fn test_invalid_out_point_index_in_same_block() { for block in chain2.blocks().iter().take(switch_fork_number + 1) { chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -188,7 +188,7 @@ fn test_invalid_out_point_index_in_same_block() { assert_error_eq!( OutPointError::Unknown(OutPoint::new(tx1_hash, 1)), chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(chain2.blocks()[switch_fork_number + 1].clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -232,7 +232,7 @@ fn test_invalid_out_point_index_in_different_blocks() { for block in chain1.blocks() { chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -241,7 +241,7 @@ fn test_invalid_out_point_index_in_different_blocks() { for block in chain2.blocks().iter().take(switch_fork_number + 2) { chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -251,7 +251,7 @@ fn test_invalid_out_point_index_in_different_blocks() { assert_error_eq!( OutPointError::Unknown(OutPoint::new(tx1_hash, 1)), chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(chain2.blocks()[switch_fork_number + 2].clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -295,7 +295,7 @@ fn test_full_dead_transaction() { .build(); chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -373,7 +373,7 @@ fn test_full_dead_transaction() { .build() }; chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(new_block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -456,7 +456,7 @@ fn test_full_dead_transaction() { .build() }; chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(new_block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -528,7 +528,7 @@ fn test_full_dead_transaction() { .build() }; chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(new_block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) diff --git a/chain/src/tests/dep_cell.rs b/chain/src/tests/dep_cell.rs index cac812d6ae..64e3fbe7d4 100644 --- a/chain/src/tests/dep_cell.rs +++ b/chain/src/tests/dep_cell.rs @@ -152,7 +152,7 @@ fn test_package_txs_with_deps() { ) .build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } @@ -168,7 +168,7 @@ fn test_package_txs_with_deps() { let block: Block = block_template.clone().into(); let block = block.as_advanced_builder().build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } @@ -298,7 +298,7 @@ fn test_package_txs_with_deps_unstable_sort() { ) .build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } @@ -314,7 +314,7 @@ fn test_package_txs_with_deps_unstable_sort() { let block: Block = block_template.clone().into(); let block = block.as_advanced_builder().build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } @@ -437,7 +437,7 @@ fn test_package_txs_with_deps2() { ) .build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } // skip gap @@ -452,7 +452,7 @@ fn test_package_txs_with_deps2() { let block: Block = block_template.clone().into(); let block = block.as_advanced_builder().build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } @@ -562,7 +562,7 @@ fn test_package_txs_with_deps_priority() { let block: Block = block_template.clone().into(); let block = block.as_advanced_builder().build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } @@ -578,7 +578,7 @@ fn test_package_txs_with_deps_priority() { let block: Block = block_template.clone().into(); let block = block.as_advanced_builder().build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } diff --git a/chain/src/tests/find_fork.rs b/chain/src/tests/find_fork.rs index f25c04de5e..e5b8ad39ce 100644 --- a/chain/src/tests/find_fork.rs +++ b/chain/src/tests/find_fork.rs @@ -1,5 +1,8 @@ -use crate::chain::{ChainService, ForkChanges}; +use crate::consume_unverified::ConsumeUnverifiedBlockProcessor; +use crate::utils::forkchanges::ForkChanges; +use crate::{start_chain_services, UnverifiedBlock}; use ckb_chain_spec::consensus::{Consensus, ProposalWindow}; +use ckb_proposal_table::ProposalTable; use ckb_shared::SharedBuilder; use ckb_store::ChainStore; use ckb_systemtime::unix_time_as_millis; @@ -11,9 +14,31 @@ use ckb_types::{ U256, }; use ckb_verification_traits::Switch; +use dashmap::DashSet; use std::collections::HashSet; use std::sync::Arc; +fn process_block( + consume_unverified_block_processor: &mut ConsumeUnverifiedBlockProcessor, + blk: &BlockView, + switch: Switch, +) { + let store = consume_unverified_block_processor.shared.store(); + let db_txn = store.begin_transaction(); + db_txn.insert_block(blk).unwrap(); + db_txn.commit().unwrap(); + + let parent_header = store.get_block_header(&blk.parent_hash()).unwrap(); + let unverified_block = UnverifiedBlock { + block: Arc::new(blk.to_owned()), + switch: Some(switch), + verify_callback: None, + parent_header, + }; + + consume_unverified_block_processor.consume_unverified_blocks(unverified_block); +} + // 0--1--2--3--4 // \ // \ @@ -21,8 +46,10 @@ use std::sync::Arc; #[test] fn test_find_fork_case1() { let builder = SharedBuilder::with_temp_db(); - let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let consensus = Consensus::default(); + let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); + let (shared, mut _pack) = builder.consensus(consensus).build().unwrap(); + let genesis = shared .store() .get_block_header(&shared.store().get_block_hash(0).unwrap()) @@ -40,18 +67,32 @@ fn test_find_fork_case1() { fork2.gen_empty_block_with_diff(90u64, &mock_store); } + let is_pending_verify = Arc::new(DashSet::new()); + + let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { + shared: shared.clone(), + is_pending_verify, + proposal_table, + }; + // fork1 total_difficulty 400 for blk in fork1.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + println!("proceb1, fork1 block: {}-{}", blk.number(), blk.hash()); + process_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } // fork2 total_difficulty 270 for blk in fork2.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + println!("procb2, fork1 block: {}-{}", blk.number(), blk.hash()); + process_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } let tip_number = { shared.snapshot().tip_number() }; @@ -72,7 +113,7 @@ fn test_find_fork_case1() { let mut fork = ForkChanges::default(); - chain_service.find_fork(&mut fork, tip_number, fork2.tip(), ext); + consume_unverified_block_processor.find_fork(&mut fork, tip_number, fork2.tip(), ext); let detached_blocks: HashSet = fork1.blocks().clone().into_iter().collect(); let attached_blocks: HashSet = fork2.blocks().clone().into_iter().collect(); @@ -93,8 +134,8 @@ fn test_find_fork_case1() { #[test] fn test_find_fork_case2() { let builder = SharedBuilder::with_temp_db(); - let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let consensus = Consensus::default(); + let (shared, _pack) = builder.consensus(consensus.clone()).build().unwrap(); let genesis = shared .store() @@ -111,19 +152,29 @@ fn test_find_fork_case2() { for _ in 0..2 { fork2.gen_empty_block_with_diff(90u64, &mock_store); } + let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); + let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { + shared: shared.clone(), + is_pending_verify: Arc::new(DashSet::new()), + proposal_table, + }; // fork1 total_difficulty 400 for blk in fork1.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + process_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } // fork2 total_difficulty 280 for blk in fork2.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + process_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } let tip_number = { shared.snapshot().tip_number() }; @@ -144,7 +195,7 @@ fn test_find_fork_case2() { let mut fork = ForkChanges::default(); - chain_service.find_fork(&mut fork, tip_number, fork2.tip(), ext); + consume_unverified_block_processor.find_fork(&mut fork, tip_number, fork2.tip(), ext); let detached_blocks: HashSet = fork1.blocks()[1..].iter().cloned().collect(); let attached_blocks: HashSet = fork2.blocks().clone().into_iter().collect(); @@ -165,8 +216,8 @@ fn test_find_fork_case2() { #[test] fn test_find_fork_case3() { let builder = SharedBuilder::with_temp_db(); - let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let consensus = Consensus::default(); + let (shared, _pack) = builder.consensus(consensus.clone()).build().unwrap(); let genesis = shared .store() @@ -184,19 +235,28 @@ fn test_find_fork_case3() { for _ in 0..5 { fork2.gen_empty_block_with_diff(40u64, &mock_store) } - + let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); + let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { + shared: shared.clone(), + is_pending_verify: Arc::new(DashSet::new()), + proposal_table, + }; // fork1 total_difficulty 240 for blk in fork1.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + process_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } // fork2 total_difficulty 200 for blk in fork2.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + process_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } let tip_number = { shared.snapshot().tip_number() }; @@ -216,7 +276,7 @@ fn test_find_fork_case3() { }; let mut fork = ForkChanges::default(); - chain_service.find_fork(&mut fork, tip_number, fork2.tip(), ext); + consume_unverified_block_processor.find_fork(&mut fork, tip_number, fork2.tip(), ext); let detached_blocks: HashSet = fork1.blocks().clone().into_iter().collect(); let attached_blocks: HashSet = fork2.blocks().clone().into_iter().collect(); @@ -237,8 +297,8 @@ fn test_find_fork_case3() { #[test] fn test_find_fork_case4() { let builder = SharedBuilder::with_temp_db(); - let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let consensus = Consensus::default(); + let (shared, _pack) = builder.consensus(consensus.clone()).build().unwrap(); let genesis = shared .store() @@ -256,19 +316,29 @@ fn test_find_fork_case4() { for _ in 0..2 { fork2.gen_empty_block_with_diff(80u64, &mock_store); } + let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); + let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { + shared: shared.clone(), + is_pending_verify: Arc::new(DashSet::new()), + proposal_table, + }; // fork1 total_difficulty 200 for blk in fork1.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + process_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } // fork2 total_difficulty 160 for blk in fork2.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + process_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } let tip_number = { shared.snapshot().tip_number() }; @@ -289,7 +359,7 @@ fn test_find_fork_case4() { let mut fork = ForkChanges::default(); - chain_service.find_fork(&mut fork, tip_number, fork2.tip(), ext); + consume_unverified_block_processor.find_fork(&mut fork, tip_number, fork2.tip(), ext); let detached_blocks: HashSet = fork1.blocks().clone().into_iter().collect(); let attached_blocks: HashSet = fork2.blocks().clone().into_iter().collect(); @@ -306,8 +376,9 @@ fn test_find_fork_case4() { // this case is create for issuse from https://github.com/nervosnetwork/ckb/pull/1470 #[test] fn repeatedly_switch_fork() { - let (shared, _) = SharedBuilder::with_temp_db() - .consensus(Consensus::default()) + let consensus = Consensus::default(); + let (shared, mut pack) = SharedBuilder::with_temp_db() + .consensus(consensus.clone()) .build() .unwrap(); let genesis = shared @@ -318,11 +389,7 @@ fn repeatedly_switch_fork() { let mut fork1 = MockChain::new(genesis.clone(), shared.consensus()); let mut fork2 = MockChain::new(genesis, shared.consensus()); - let (shared, mut pack) = SharedBuilder::with_temp_db() - .consensus(Consensus::default()) - .build() - .unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); for _ in 0..2 { fork1.gen_empty_block_with_nonce(1u128, &mock_store); @@ -331,17 +398,27 @@ fn repeatedly_switch_fork() { for _ in 0..2 { fork2.gen_empty_block_with_nonce(2u128, &mock_store); } + let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); + let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { + shared: shared.clone(), + is_pending_verify: Arc::new(DashSet::new()), + proposal_table, + }; for blk in fork1.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + process_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } for blk in fork2.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + process_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } //switch fork1 @@ -360,8 +437,8 @@ fn repeatedly_switch_fork() { .nonce(1u128.pack()) .uncle(uncle) .build(); - chain_service - .process_block(Arc::new(new_block1.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(new_block1.clone()), Switch::DISABLE_ALL) .unwrap(); //switch fork2 @@ -379,8 +456,8 @@ fn repeatedly_switch_fork() { .nonce(2u128.pack()) .build(); parent = new_block2.clone(); - chain_service - .process_block(Arc::new(new_block2), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(new_block2), Switch::DISABLE_ALL) .unwrap(); let epoch = shared .consensus() @@ -394,8 +471,8 @@ fn repeatedly_switch_fork() { .epoch(epoch.number_with_fraction(parent.number() + 1).pack()) .nonce(2u128.pack()) .build(); - chain_service - .process_block(Arc::new(new_block3), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(new_block3), Switch::DISABLE_ALL) .unwrap(); //switch fork1 @@ -412,8 +489,8 @@ fn repeatedly_switch_fork() { .epoch(epoch.number_with_fraction(parent.number() + 1).pack()) .nonce(1u128.pack()) .build(); - chain_service - .process_block(Arc::new(new_block4.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(new_block4.clone()), Switch::DISABLE_ALL) .unwrap(); parent = new_block4; @@ -429,8 +506,8 @@ fn repeatedly_switch_fork() { .epoch(epoch.number_with_fraction(parent.number() + 1).pack()) .nonce(1u128.pack()) .build(); - chain_service - .process_block(Arc::new(new_block5), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(new_block5), Switch::DISABLE_ALL) .unwrap(); } @@ -448,7 +525,7 @@ fn test_fork_proposal_table() { }; let (shared, mut pack) = builder.consensus(consensus).build().unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); let genesis = shared .store() @@ -466,8 +543,8 @@ fn test_fork_proposal_table() { } for blk in mock.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); } @@ -483,8 +560,8 @@ fn test_fork_proposal_table() { } for blk in mock.blocks().iter().skip(3) { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); } @@ -495,7 +572,7 @@ fn test_fork_proposal_table() { assert_eq!( &vec![ packed::ProposalShortId::new([0u8, 0, 0, 0, 0, 0, 0, 0, 0, 3]), - packed::ProposalShortId::new([1u8, 0, 0, 0, 0, 0, 0, 0, 0, 4]) + packed::ProposalShortId::new([1u8, 0, 0, 0, 0, 0, 0, 0, 0, 4]), ] .into_iter() .collect::>(), diff --git a/chain/src/tests/mod.rs b/chain/src/tests/mod.rs index cafc0d6a57..ea5909c044 100644 --- a/chain/src/tests/mod.rs +++ b/chain/src/tests/mod.rs @@ -8,6 +8,7 @@ mod load_code_with_snapshot; mod load_input_cell_data; mod load_input_data_hash_cell; mod non_contextual_block_txs_verify; +mod orphan_block_pool; mod reward; mod truncate; mod uncle; diff --git a/chain/src/tests/non_contextual_block_txs_verify.rs b/chain/src/tests/non_contextual_block_txs_verify.rs index b8317363a3..68178658d8 100644 --- a/chain/src/tests/non_contextual_block_txs_verify.rs +++ b/chain/src/tests/non_contextual_block_txs_verify.rs @@ -156,7 +156,7 @@ fn non_contextual_block_txs_verify() { let block = gen_block(&parent, vec![tx0, tx1], &shared, &mock_store); - let ret = chain_controller.process_block(Arc::new(block)); + let ret = chain_controller.blocking_process_block(Arc::new(block)); assert!(ret.is_err()); assert_eq!( format!("{}", ret.err().unwrap()), diff --git a/chain/src/tests/orphan_block_pool.rs b/chain/src/tests/orphan_block_pool.rs new file mode 100644 index 0000000000..bc0ba1ceb9 --- /dev/null +++ b/chain/src/tests/orphan_block_pool.rs @@ -0,0 +1,260 @@ +#![allow(dead_code)] +use crate::tests::util::start_chain; +use crate::{LonelyBlock, LonelyBlockHash}; +use ckb_chain_spec::consensus::ConsensusBuilder; +use ckb_systemtime::unix_time_as_millis; +use ckb_types::core::{BlockBuilder, EpochNumberWithFraction, HeaderView}; +use ckb_types::packed::Byte32; +use ckb_types::prelude::*; +use std::collections::HashSet; +use std::sync::Arc; +use std::thread; + +use crate::utils::orphan_block_pool::OrphanBlockPool; + +fn gen_lonely_block(parent_header: &HeaderView) -> LonelyBlock { + let number = parent_header.number() + 1; + let block = BlockBuilder::default() + .parent_hash(parent_header.hash()) + .timestamp(unix_time_as_millis().pack()) + .number(number.pack()) + .epoch(EpochNumberWithFraction::new(number / 1000, number % 1000, 1000).pack()) + .nonce((parent_header.nonce() + 1).pack()) + .build(); + LonelyBlock { + block: Arc::new(block), + switch: None, + verify_callback: None, + } +} + +fn assert_leaders_have_children(pool: &OrphanBlockPool) { + for leader in pool.clone_leaders() { + let children = pool.remove_blocks_by_parent(&leader); + assert!(!children.is_empty()); + // `remove_blocks_by_parent` will remove all children from the pool, + // so we need to put them back here. + for child in children { + pool.insert(child); + } + } +} + +fn assert_blocks_are_sorted(blocks: &[LonelyBlockHash]) { + let mut parent_hash = blocks[0].parent_hash(); + let mut windows = blocks.windows(2); + // Orphans are sorted in a breadth-first search manner. We iterate through them and + // check that this is the case. + // The `parent_or_sibling` may be a sibling or child of current `parent_hash`, + // and `child_or_sibling` may be a sibling or child of `parent_or_sibling`. + while let Some([parent_or_sibling, child_or_sibling]) = windows.next() { + // `parent_or_sibling` is a child of the block with current `parent_hash`. + // Make `parent_or_sibling`'s parent the current `parent_hash`. + if parent_or_sibling.parent_hash() != parent_hash { + parent_hash = parent_or_sibling.parent_hash(); + } + + // If `child_or_sibling`'s parent is not the current `parent_hash`, i.e. it is not a sibling of + // `parent_or_sibling`, then it must be a child of `parent_or_sibling`. + if child_or_sibling.parent_hash() != parent_hash { + assert_eq!(child_or_sibling.parent_hash(), parent_or_sibling.hash()); + // Move `parent_hash` forward. + parent_hash = child_or_sibling.parent_hash(); + } + } +} + +#[test] +fn test_remove_blocks_by_parent() { + let consensus = ConsensusBuilder::default().build(); + let block_number = 200; + let mut blocks = Vec::new(); + let mut parent = consensus.genesis_block().header(); + let pool = OrphanBlockPool::with_capacity(200); + for _ in 1..block_number { + let lonely_block = gen_lonely_block(&parent); + let new_block_clone = Arc::clone(lonely_block.block()); + let new_block = LonelyBlock { + block: Arc::clone(&new_block_clone), + switch: None, + verify_callback: None, + }; + blocks.push(new_block_clone); + + parent = new_block.block().header(); + pool.insert(new_block.into()); + } + + let orphan = pool.remove_blocks_by_parent(&consensus.genesis_block().hash()); + + assert_eq!(orphan[0].parent_hash(), consensus.genesis_block().hash()); + assert_blocks_are_sorted(orphan.as_slice()); + + let orphan_set: HashSet<_> = orphan.into_iter().map(|b| b.hash()).collect(); + let blocks_set: HashSet<_> = blocks.into_iter().map(|b| b.hash()).collect(); + assert_eq!(orphan_set, blocks_set) +} + +#[test] +fn test_remove_blocks_by_parent_and_get_block_should_not_deadlock() { + let consensus = ConsensusBuilder::default().build(); + + let (_chain_controller, shared, _parent) = start_chain(Some(consensus.clone())); + + let pool = OrphanBlockPool::with_capacity(1024); + let mut header = consensus.genesis_block().header(); + let mut hashes = Vec::new(); + for _ in 1..1024 { + let lonely_block = gen_lonely_block(&header); + let new_block = lonely_block.block(); + let new_block_clone = LonelyBlock { + block: Arc::clone(new_block), + switch: None, + verify_callback: None, + }; + pool.insert(new_block_clone.into()); + header = new_block.header(); + hashes.push(header.hash()); + } + + let pool_arc1 = Arc::new(pool); + let pool_arc2 = Arc::clone(&pool_arc1); + + let thread1 = thread::spawn(move || { + pool_arc1.remove_blocks_by_parent(&consensus.genesis_block().hash()); + }); + + for hash in hashes.iter().rev() { + pool_arc2.get_block(shared.store(), hash); + } + + thread1.join().unwrap(); +} + +#[test] +fn test_leaders() { + let consensus = ConsensusBuilder::default().build(); + let block_number = 20; + let mut blocks = Vec::new(); + let mut parent = consensus.genesis_block().header(); + let pool = OrphanBlockPool::with_capacity(20); + for i in 0..block_number - 1 { + let lonely_block = gen_lonely_block(&parent); + let new_block = LonelyBlock { + block: Arc::clone(lonely_block.block()), + switch: None, + verify_callback: None, + }; + blocks.push(lonely_block); + parent = new_block.block().header(); + if i % 5 != 0 { + pool.insert(new_block.into()); + } + } + assert_leaders_have_children(&pool); + assert_eq!(pool.len(), 15); + assert_eq!(pool.leaders_len(), 4); + + pool.insert( + LonelyBlock { + block: Arc::clone(blocks[5].block()), + switch: None, + verify_callback: None, + } + .into(), + ); + assert_leaders_have_children(&pool); + assert_eq!(pool.len(), 16); + assert_eq!(pool.leaders_len(), 3); + + pool.insert( + LonelyBlock { + block: Arc::clone(blocks[10].block()), + switch: None, + verify_callback: None, + } + .into(), + ); + assert_leaders_have_children(&pool); + assert_eq!(pool.len(), 17); + assert_eq!(pool.leaders_len(), 2); + + // index 0 doesn't in the orphan pool, so do nothing + let orphan = pool.remove_blocks_by_parent(&consensus.genesis_block().hash()); + assert!(orphan.is_empty()); + assert_eq!(pool.len(), 17); + assert_eq!(pool.leaders_len(), 2); + + pool.insert( + LonelyBlock { + block: Arc::clone(blocks[0].block()), + switch: None, + verify_callback: None, + } + .into(), + ); + assert_leaders_have_children(&pool); + assert_eq!(pool.len(), 18); + assert_eq!(pool.leaders_len(), 2); + + let orphan = pool.remove_blocks_by_parent(&consensus.genesis_block().hash()); + assert_eq!(pool.len(), 3); + assert_eq!(pool.leaders_len(), 1); + + pool.insert( + LonelyBlock { + block: Arc::clone(blocks[15].block()), + switch: None, + verify_callback: None, + } + .into(), + ); + assert_leaders_have_children(&pool); + assert_eq!(pool.len(), 4); + assert_eq!(pool.leaders_len(), 1); + + let orphan_1 = pool.remove_blocks_by_parent(&blocks[14].block.hash()); + + let orphan_set: HashSet = orphan + .into_iter() + .map(|b| b.hash()) + .chain(orphan_1.into_iter().map(|b| b.hash())) + .collect(); + let blocks_set: HashSet = blocks.into_iter().map(|b| b.block().hash()).collect(); + assert_eq!(orphan_set, blocks_set); + assert_eq!(pool.len(), 0); + assert_eq!(pool.leaders_len(), 0); +} + +#[test] +fn test_remove_expired_blocks() { + let consensus = ConsensusBuilder::default().build(); + let block_number = 20; + let mut parent = consensus.genesis_block().header(); + let pool = OrphanBlockPool::with_capacity(block_number); + + let deprecated = EpochNumberWithFraction::new(10, 0, 10); + + for _ in 1..block_number { + let new_block = BlockBuilder::default() + .parent_hash(parent.hash()) + .timestamp(unix_time_as_millis().pack()) + .number((parent.number() + 1).pack()) + .epoch(deprecated.clone().pack()) + .nonce((parent.nonce() + 1).pack()) + .build(); + + parent = new_block.header(); + let lonely_block = LonelyBlock { + block: Arc::new(new_block), + switch: None, + verify_callback: None, + }; + pool.insert(lonely_block.into()); + } + assert_eq!(pool.leaders_len(), 1); + + let v = pool.clean_expired_blocks(20_u64); + assert_eq!(v.len(), 19); + assert_eq!(pool.leaders_len(), 0); +} diff --git a/chain/src/tests/reward.rs b/chain/src/tests/reward.rs index 73de141c86..876a1495bf 100644 --- a/chain/src/tests/reward.rs +++ b/chain/src/tests/reward.rs @@ -229,7 +229,7 @@ fn finalize_reward() { parent = block.header().clone(); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) .expect("process block ok"); blocks.push(block); } @@ -266,7 +266,7 @@ fn finalize_reward() { parent = block.header(); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) .expect("process block ok"); let (target, reward) = RewardCalculator::new(shared.consensus(), shared.snapshot().as_ref()) @@ -300,6 +300,6 @@ fn finalize_reward() { ); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_EXTENSION) .expect("process block ok"); } diff --git a/chain/src/tests/truncate.rs b/chain/src/tests/truncate.rs index a9c892c7ee..57fec63256 100644 --- a/chain/src/tests/truncate.rs +++ b/chain/src/tests/truncate.rs @@ -1,4 +1,4 @@ -use crate::chain::ChainService; +use crate::start_chain_services; use ckb_chain_spec::consensus::Consensus; use ckb_shared::SharedBuilder; use ckb_store::ChainStore; @@ -11,7 +11,7 @@ fn test_truncate() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); let genesis = shared .store() @@ -26,8 +26,8 @@ fn test_truncate() { } for blk in mock.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); } @@ -38,12 +38,12 @@ fn test_truncate() { } for blk in mock.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); } - chain_service.truncate(&target.hash()).unwrap(); + chain_controller.truncate(target.hash()).unwrap(); assert_eq!(shared.snapshot().tip_header(), &target); } diff --git a/chain/src/tests/uncle.rs b/chain/src/tests/uncle.rs index 3d8d4da0a0..fe23f5cf34 100644 --- a/chain/src/tests/uncle.rs +++ b/chain/src/tests/uncle.rs @@ -1,4 +1,4 @@ -use crate::chain::ChainService; +use crate::start_chain_services; use ckb_chain_spec::consensus::Consensus; use ckb_shared::SharedBuilder; use ckb_store::ChainStore; @@ -10,7 +10,8 @@ use std::sync::Arc; fn test_get_block_body_after_inserting() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); + let genesis = shared .store() .get_block_header(&shared.store().get_block_hash(0).unwrap()) @@ -26,15 +27,15 @@ fn test_get_block_body_after_inserting() { } for blk in fork1.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); let len = shared.snapshot().get_block_body(&blk.hash()).len(); assert_eq!(len, 1, "[fork1] snapshot.get_block_body({})", blk.hash(),); } for blk in fork2.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); let snapshot = shared.snapshot(); assert!(snapshot.get_block_header(&blk.hash()).is_some()); diff --git a/chain/src/tests/util.rs b/chain/src/tests/util.rs index 0d42b0def6..f29cd97ad7 100644 --- a/chain/src/tests/util.rs +++ b/chain/src/tests/util.rs @@ -1,4 +1,4 @@ -use crate::chain::{ChainController, ChainService}; +use crate::{start_chain_services, ChainController}; use ckb_app_config::TxPoolConfig; use ckb_app_config::{BlockAssemblerConfig, NetworkConfig}; use ckb_chain_spec::consensus::{Consensus, ConsensusBuilder}; @@ -85,8 +85,7 @@ pub(crate) fn start_chain_with_tx_pool_config( let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - let chain_controller = chain_service.start::<&str>(None); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); let parent = { let snapshot = shared.snapshot(); snapshot diff --git a/chain/src/utils/forkchanges.rs b/chain/src/utils/forkchanges.rs new file mode 100644 index 0000000000..561ae94545 --- /dev/null +++ b/chain/src/utils/forkchanges.rs @@ -0,0 +1,85 @@ +use ckb_types::core::hardfork::HardForks; +use ckb_types::core::{BlockExt, BlockView}; +use ckb_types::packed::ProposalShortId; +#[cfg(debug_assertions)] +use is_sorted::IsSorted; +use std::collections::{HashSet, VecDeque}; + +/// The struct represent fork +#[derive(Debug, Default)] +pub struct ForkChanges { + /// Blocks attached to index after forks + pub(crate) attached_blocks: VecDeque, + /// Blocks detached from index after forks + pub(crate) detached_blocks: VecDeque, + /// HashSet with proposal_id detached to index after forks + pub(crate) detached_proposal_id: HashSet, + /// to be updated exts + pub(crate) dirty_exts: VecDeque, +} + +impl ForkChanges { + /// blocks attached to index after forks + pub fn attached_blocks(&self) -> &VecDeque { + &self.attached_blocks + } + + /// blocks detached from index after forks + pub fn detached_blocks(&self) -> &VecDeque { + &self.detached_blocks + } + + /// proposal_id detached to index after forks + pub fn detached_proposal_id(&self) -> &HashSet { + &self.detached_proposal_id + } + + /// are there any block should be detached + pub fn has_detached(&self) -> bool { + !self.detached_blocks.is_empty() + } + + /// cached verified attached block num + pub fn verified_len(&self) -> usize { + self.attached_blocks.len() - self.dirty_exts.len() + } + + /// assertion for make sure attached_blocks and detached_blocks are sorted + #[cfg(debug_assertions)] + pub fn is_sorted(&self) -> bool { + IsSorted::is_sorted_by_key(&mut self.attached_blocks().iter(), |blk| { + blk.header().number() + }) && IsSorted::is_sorted_by_key(&mut self.detached_blocks().iter(), |blk| { + blk.header().number() + }) + } + + pub fn during_hardfork(&self, hardfork_switch: &HardForks) -> bool { + let hardfork_during_detach = + self.check_if_hardfork_during_blocks(hardfork_switch, &self.detached_blocks); + let hardfork_during_attach = + self.check_if_hardfork_during_blocks(hardfork_switch, &self.attached_blocks); + + hardfork_during_detach || hardfork_during_attach + } + + fn check_if_hardfork_during_blocks( + &self, + hardfork: &HardForks, + blocks: &VecDeque, + ) -> bool { + if blocks.is_empty() { + false + } else { + // This method assumes that the input blocks are sorted and unique. + let rfc_0049 = hardfork.ckb2023.rfc_0049(); + let epoch_first = blocks.front().unwrap().epoch().number(); + let epoch_next = blocks + .back() + .unwrap() + .epoch() + .minimum_epoch_number_after_n_blocks(1); + epoch_first < rfc_0049 && rfc_0049 <= epoch_next + } + } +} diff --git a/chain/src/utils/mod.rs b/chain/src/utils/mod.rs new file mode 100644 index 0000000000..efdc1e092a --- /dev/null +++ b/chain/src/utils/mod.rs @@ -0,0 +1,2 @@ +pub mod forkchanges; +pub mod orphan_block_pool; diff --git a/sync/src/orphan_block_pool.rs b/chain/src/utils/orphan_block_pool.rs similarity index 70% rename from sync/src/orphan_block_pool.rs rename to chain/src/utils/orphan_block_pool.rs index 20d6eda26d..602cd6adba 100644 --- a/sync/src/orphan_block_pool.rs +++ b/chain/src/utils/orphan_block_pool.rs @@ -1,27 +1,27 @@ -use ckb_logger::{debug, error}; -use ckb_types::core::EpochNumber; -use ckb_types::{core, packed}; +use crate::LonelyBlockHash; +use ckb_logger::debug; +use ckb_store::{ChainDB, ChainStore}; +use ckb_types::core::{BlockView, EpochNumber}; +use ckb_types::packed; use ckb_util::{parking_lot::RwLock, shrink_to_fit}; use std::collections::{HashMap, HashSet, VecDeque}; +use std::sync::Arc; pub type ParentHash = packed::Byte32; -const SHRINK_THRESHOLD: usize = 100; -// Orphan pool will remove expired blocks whose epoch is less than tip_epoch - EXPIRED_EPOCH, -const EXPIRED_EPOCH: u64 = 6; +const SHRINK_THRESHOLD: usize = 100; +pub const EXPIRED_EPOCH: u64 = 6; #[derive(Default)] struct InnerPool { // Group by blocks in the pool by the parent hash. - blocks: HashMap>, + blocks: HashMap>, // The map tells the parent hash when given the hash of a block in the pool. // // The block is in the orphan pool if and only if the block hash exists as a key in this map. parents: HashMap, // Leaders are blocks not in the orphan pool but having at least a child in the pool. leaders: HashSet, - // block size of pool - block_size: usize, } impl InnerPool { @@ -30,26 +30,16 @@ impl InnerPool { blocks: HashMap::with_capacity(capacity), parents: HashMap::new(), leaders: HashSet::new(), - block_size: 0, } } - fn insert(&mut self, block: core::BlockView) { - let hash = block.header().hash(); - let parent_hash = block.data().header().raw().parent_hash(); - - self.block_size = self - .block_size - .checked_add(block.data().total_size()) - .unwrap_or_else(|| { - error!("orphan pool block size add overflow"); - usize::MAX - }); + fn insert(&mut self, lonely_block: LonelyBlockHash) { + let hash = lonely_block.hash(); + let parent_hash = lonely_block.parent_hash(); self.blocks .entry(parent_hash.clone()) .or_default() - .insert(hash.clone(), block); - + .insert(hash.clone(), lonely_block); // Out-of-order insertion needs to be deduplicated self.leaders.remove(&hash); // It is a possible optimization to make the judgment in advance, @@ -63,7 +53,7 @@ impl InnerPool { self.parents.insert(hash, parent_hash); } - pub fn remove_blocks_by_parent(&mut self, parent_hash: &ParentHash) -> Vec { + pub fn remove_blocks_by_parent(&mut self, parent_hash: &ParentHash) -> Vec { // try remove leaders first if !self.leaders.remove(parent_hash) { return Vec::new(); @@ -72,7 +62,7 @@ impl InnerPool { let mut queue: VecDeque = VecDeque::new(); queue.push_back(parent_hash.to_owned()); - let mut removed: Vec = Vec::new(); + let mut removed: Vec = Vec::new(); while let Some(parent_hash) = queue.pop_front() { if let Some(orphaned) = self.blocks.remove(&parent_hash) { let (hashes, blocks): (Vec<_>, Vec<_>) = orphaned.into_iter().unzip(); @@ -84,13 +74,6 @@ impl InnerPool { } } - self.block_size = self - .block_size - .checked_sub(removed.iter().map(|b| b.data().total_size()).sum::()) - .unwrap_or_else(|| { - error!("orphan pool block size sub overflow"); - 0 - }); debug!("orphan pool pop chain len: {}", removed.len()); debug_assert_ne!( removed.len(), @@ -104,23 +87,23 @@ impl InnerPool { removed } - pub fn get_block(&self, hash: &packed::Byte32) -> Option { + pub fn get_block(&self, hash: &packed::Byte32) -> Option<&LonelyBlockHash> { self.parents.get(hash).and_then(|parent_hash| { self.blocks .get(parent_hash) - .and_then(|blocks| blocks.get(hash).cloned()) + .and_then(|blocks| blocks.get(hash)) }) } /// cleanup expired blocks(epoch + EXPIRED_EPOCH < tip_epoch) - pub fn clean_expired_blocks(&mut self, tip_epoch: EpochNumber) -> Vec { + pub fn clean_expired_blocks(&mut self, tip_epoch: EpochNumber) -> Vec { let mut result = vec![]; for hash in self.leaders.clone().iter() { if self.need_clean(hash, tip_epoch) { // remove items in orphan pool and return hash to callee(clean header map) let descendants = self.remove_blocks_by_parent(hash); - result.extend(descendants.iter().map(|block| block.hash())); + result.extend(descendants); } } result @@ -131,9 +114,9 @@ impl InnerPool { self.blocks .get(parent_hash) .and_then(|map| { - map.iter() - .next() - .map(|(_, block)| block.header().epoch().number() + EXPIRED_EPOCH < tip_epoch) + map.iter().next().map(|(_, lonely_block)| { + lonely_block.epoch_number() + EXPIRED_EPOCH < tip_epoch + }) }) .unwrap_or_default() } @@ -155,19 +138,21 @@ impl OrphanBlockPool { } /// Insert orphaned block, for which we have already requested its parent block - pub fn insert(&self, block: core::BlockView) { - self.inner.write().insert(block); + pub fn insert(&self, lonely_block: LonelyBlockHash) { + self.inner.write().insert(lonely_block); } - pub fn remove_blocks_by_parent(&self, parent_hash: &ParentHash) -> Vec { + pub fn remove_blocks_by_parent(&self, parent_hash: &ParentHash) -> Vec { self.inner.write().remove_blocks_by_parent(parent_hash) } - pub fn get_block(&self, hash: &packed::Byte32) -> Option { - self.inner.read().get_block(hash) + pub fn get_block(&self, store: &ChainDB, hash: &packed::Byte32) -> Option> { + let inner = self.inner.read(); + let lonely_block_hash: &LonelyBlockHash = inner.get_block(hash)?; + store.get_block(&lonely_block_hash.hash()).map(Arc::new) } - pub fn clean_expired_blocks(&self, epoch: EpochNumber) -> Vec { + pub fn clean_expired_blocks(&self, epoch: EpochNumber) -> Vec { self.inner.write().clean_expired_blocks(epoch) } @@ -175,14 +160,6 @@ impl OrphanBlockPool { self.inner.read().parents.len() } - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - pub fn total_size(&self) -> usize { - self.inner.read().block_size - } - pub fn clone_leaders(&self) -> Vec { self.inner.read().leaders.iter().cloned().collect() } diff --git a/ckb-bin/Cargo.toml b/ckb-bin/Cargo.toml index 4ac68e71a9..1c61622459 100644 --- a/ckb-bin/Cargo.toml +++ b/ckb-bin/Cargo.toml @@ -25,10 +25,10 @@ ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.116.0-pre" ckb-chain = { path = "../chain", version = "= 0.116.0-pre" } ckb-shared = { path = "../shared", version = "= 0.116.0-pre" } ckb-store = { path = "../store", version = "= 0.116.0-pre" } -ckb-chain-spec = {path = "../spec", version = "= 0.116.0-pre"} +ckb-chain-spec = { path = "../spec", version = "= 0.116.0-pre" } ckb-miner = { path = "../miner", version = "= 0.116.0-pre" } -ckb-network = { path = "../network", version = "= 0.116.0-pre"} -ckb-resource = { path = "../resource", version = "= 0.116.0-pre"} +ckb-network = { path = "../network", version = "= 0.116.0-pre" } +ckb-resource = { path = "../resource", version = "= 0.116.0-pre" } ctrlc = { version = "3.1", features = ["termination"] } ckb-instrument = { path = "../util/instrument", version = "= 0.116.0-pre", features = ["progress_bar"] } ckb-build-info = { path = "../util/build-info", version = "= 0.116.0-pre" } @@ -45,6 +45,7 @@ sentry = { version = "0.26.0", optional = true } is-terminal = "0.4.7" fdlimit = "0.2.1" ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.116.0-pre" } +tokio = { version = "1", features = ["sync"] } [target.'cfg(not(target_os="windows"))'.dependencies] daemonize = { version = "0.5.0" } @@ -53,7 +54,7 @@ colored = "2.0" [features] deadlock_detection = ["ckb-util/deadlock_detection"] -profiling = ["ckb-memory-tracker/profiling"] +profiling = ["ckb-memory-tracker/profiling", "ckb-shared/stats"] with_sentry = ["sentry", "ckb-launcher/with_sentry", "ckb-network/with_sentry", "ckb-app-config/with_sentry", "ckb-logger-service/with_sentry"] with_dns_seeding = ["ckb-network/with_dns_seeding"] portable = ["ckb-launcher/portable"] diff --git a/ckb-bin/src/subcommand/import.rs b/ckb-bin/src/subcommand/import.rs index d6fba348c3..0b3eabc175 100644 --- a/ckb-bin/src/subcommand/import.rs +++ b/ckb-bin/src/subcommand/import.rs @@ -1,6 +1,5 @@ use ckb_app_config::{ExitCode, ImportArgs}; use ckb_async_runtime::Handle; -use ckb_chain::chain::ChainService; use ckb_instrument::Import; use ckb_shared::SharedBuilder; @@ -13,10 +12,9 @@ pub fn import(args: ImportArgs, async_handle: Handle) -> Result<(), ExitCode> { async_handle, args.consensus, )?; - let (shared, mut pack) = builder.build()?; + let (_shared, mut pack) = builder.build()?; - let chain_service = ChainService::new(shared, pack.take_proposal_table()); - let chain_controller = chain_service.start::<&str>(Some("ImportChainService")); + let chain_controller = ckb_chain::start_chain_services(pack.take_chain_services_builder()); // manual drop tx_pool_builder and relay_tx_receiver pack.take_tx_pool_builder(); diff --git a/ckb-bin/src/subcommand/replay.rs b/ckb-bin/src/subcommand/replay.rs index ac7da08fb2..fcadb6a23a 100644 --- a/ckb-bin/src/subcommand/replay.rs +++ b/ckb-bin/src/subcommand/replay.rs @@ -1,9 +1,9 @@ use ckb_app_config::{ExitCode, ReplayArgs}; use ckb_async_runtime::Handle; -use ckb_chain::chain::ChainService; +use ckb_chain::ChainController; use ckb_chain_iter::ChainIterator; use ckb_instrument::{ProgressBar, ProgressStyle}; -use ckb_shared::{Shared, SharedBuilder}; +use ckb_shared::{ChainServicesBuilder, Shared, SharedBuilder}; use ckb_store::ChainStore; use ckb_verification_traits::Switch; use std::sync::Arc; @@ -46,13 +46,14 @@ pub fn replay(args: ReplayArgs, async_handle: Handle) -> Result<(), ExitCode> { async_handle, args.consensus, )?; - let (tmp_shared, mut pack) = shared_builder.tx_pool_config(args.config.tx_pool).build()?; - let chain = ChainService::new(tmp_shared, pack.take_proposal_table()); + let (_tmp_shared, mut pack) = shared_builder.tx_pool_config(args.config.tx_pool).build()?; + let chain_service_builder: ChainServicesBuilder = pack.take_chain_services_builder(); + let chain_controller = ckb_chain::start_chain_services(chain_service_builder); if let Some((from, to)) = args.profile { - profile(shared, chain, from, to); + profile(shared, chain_controller, from, to); } else if args.sanity_check { - sanity_check(shared, chain, args.full_verification); + sanity_check(shared, chain_controller, args.full_verification); } } tmp_db_dir.close().map_err(|err| { @@ -63,16 +64,16 @@ pub fn replay(args: ReplayArgs, async_handle: Handle) -> Result<(), ExitCode> { Ok(()) } -fn profile(shared: Shared, mut chain: ChainService, from: Option, to: Option) { +fn profile(shared: Shared, chain_controller: ChainController, from: Option, to: Option) { let tip_number = shared.snapshot().tip_number(); let from = from.map(|v| std::cmp::max(1, v)).unwrap_or(1); let to = to .map(|v| std::cmp::min(v, tip_number)) .unwrap_or(tip_number); - process_range_block(&shared, &mut chain, 1..from); - println!("Start profiling; re-process blocks {from}..{to}:"); + process_range_block(&shared, chain_controller.clone(), 1..from); + println!("Start profiling, re-process blocks {from}..{to}:"); let now = std::time::Instant::now(); - let tx_count = process_range_block(&shared, &mut chain, from..=to); + let tx_count = process_range_block(&shared, chain_controller, from..=to); let duration = std::time::Instant::now().saturating_duration_since(now); if duration.as_secs() >= MIN_PROFILING_TIME { println!( @@ -97,7 +98,7 @@ fn profile(shared: Shared, mut chain: ChainService, from: Option, to: Optio fn process_range_block( shared: &Shared, - chain: &mut ChainService, + chain_controller: ChainController, range: impl Iterator, ) -> usize { let mut tx_count = 0; @@ -108,12 +109,14 @@ fn process_range_block( .and_then(|hash| snapshot.get_block(&hash)) .expect("read block from store"); tx_count += block.transactions().len().saturating_sub(1); - chain.process_block(Arc::new(block), Switch::NONE).unwrap(); + chain_controller + .blocking_process_block_with_switch(Arc::new(block), Switch::NONE) + .unwrap(); } tx_count } -fn sanity_check(shared: Shared, mut chain: ChainService, full_verification: bool) { +fn sanity_check(shared: Shared, chain_controller: ChainController, full_verification: bool) { let tip_header = shared.snapshot().tip_header().clone(); let chain_iter = ChainIterator::new(shared.store()); let pb = ProgressBar::new(chain_iter.len()); @@ -132,7 +135,8 @@ fn sanity_check(shared: Shared, mut chain: ChainService, full_verification: bool let mut cursor = shared.consensus().genesis_block().header(); for block in chain_iter { let header = block.header(); - if let Err(e) = chain.process_block(Arc::new(block), switch) { + if let Err(e) = chain_controller.blocking_process_block_with_switch(Arc::new(block), switch) + { eprintln!( "Replay sanity-check error: {:?} at block({}-{})", e, diff --git a/ckb-bin/src/subcommand/run.rs b/ckb-bin/src/subcommand/run.rs index 094b29bbb3..c043ce7995 100644 --- a/ckb-bin/src/subcommand/run.rs +++ b/ckb-bin/src/subcommand/run.rs @@ -4,6 +4,7 @@ use ckb_async_runtime::Handle; use ckb_build_info::Version; use ckb_launcher::Launcher; use ckb_logger::info; + use ckb_stop_handler::{broadcast_exit_signals, wait_all_ckb_services_exit}; use ckb_types::core::cell::setup_system_cell_cache; @@ -42,7 +43,8 @@ pub fn run(args: RunArgs, version: Version, async_handle: Handle) -> Result<(), launcher.check_assume_valid_target(&shared); - let chain_controller = launcher.start_chain_service(&shared, pack.take_proposal_table()); + let chain_controller = + launcher.start_chain_service(&shared, pack.take_chain_services_builder()); launcher.start_block_filter(&shared); diff --git a/devtools/block_sync/draw_sync_chart.py b/devtools/block_sync/draw_sync_chart.py new file mode 100755 index 0000000000..e9b164a440 --- /dev/null +++ b/devtools/block_sync/draw_sync_chart.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python3 +import matplotlib.pyplot as plt +import re +import datetime +import tqdm +import argparse + +from matplotlib.ticker import MultipleLocator + +def parse_sync_statics(log_file): + """ + parse sync statics from log file + sample: + 2023-09-01 06:54:45.096 +00:00 verify_blocks INFO ckb_chain::chain block: 811224, hash: 0x00f54aaadd1a36339e69a10624dec3250658100ffd5773a7e9f228bb9a96187e, epoch: 514(841/1800), total_diff: 0x59a4a071ba9f0de59d, txs: 1 + """ + duration = [] + height = [] + base_timestamp = 0 + + print("reading file: ", log_file) + total_lines = len(open(log_file, 'r').readlines()) + print("total lines: ", total_lines) + + with open(log_file, 'r') as f: + # pbar = tqdm.tqdm(total=total_lines) + for line_idx, line in enumerate(f): + # pbar.update(1) + if line_idx == 0: + timestamp_str = re.search(r'^(\S+ \S+)', line).group(1) # Extract the timestamp string + timestamp = datetime.datetime.strptime(timestamp_str, "%Y-%m-%d %H:%M:%S.%f").timestamp() + base_timestamp = timestamp + + + if line.find('INFO ckb_chain::chain block: ') != -1: + + block_number = int(re.search(r'block: (\d+)', line).group(1)) # Extract the block number using regex + + if line_idx == 0 or block_number % 10_000 == 0: + timestamp_str = re.search(r'^(\S+ \S+)', line).group(1) # Extract the timestamp string + timestamp = datetime.datetime.strptime(timestamp_str, "%Y-%m-%d %H:%M:%S.%f").timestamp() + timestamp = int(timestamp - base_timestamp) + duration.append(timestamp / 60 / 60) + height.append(block_number) + + # pbar.close() + + return duration, height + + +parser = argparse.ArgumentParser( + description='Draw CKB Sync progress Chart. Usage: ./draw_sync_chart.py --ckb_log ./run1.log ./run2.log --label branch_develop branch_async --result_path /tmp/compare_result.png') +parser.add_argument('--ckb_log', metavar='ckb_log_file', type=str, + action='store', nargs='+', required=True, + help='the ckb node log file path') +parser.add_argument('--label', metavar='label', type=str, + action='store', nargs='+', required=True, + help='what label should be put on the chart') +parser.add_argument('--result_path', type=str, nargs=1, action='store', + help='where to save the result chart') + +args = parser.parse_args() +assert len(args.ckb_log) == len(args.label) + +tasks = zip(args.ckb_log, args.label) + +result_path = args.result_path[0] +fig, ax = plt.subplots(1, 1, figsize=(10, 8)) + +lgs = [] + +def process_task(task): + ckb_log_file, label = task + print("ckb_log_file: ", ckb_log_file) + print("label: ", label) + duration, height = parse_sync_statics(ckb_log_file) + return (duration, height, label) + + +tasks = [(ckb_log_file, label) for ckb_log_file, label in tasks] + + +import multiprocessing +with multiprocessing.Pool() as pool: + results = pool.map(process_task, tasks) + +alabels = [] + +import matplotlib.ticker as ticker + +vlabels = [] + +for duration, height, label in results: +# for ckb_log_file, label in tasks: +# print("ckb_log_file: ", ckb_log_file) +# print("label: ", label) +# duration, height = parse_sync_statics(ckb_log_file) + + + lg = ax.scatter(duration, height, s=1, label=label) + ax.plot(duration, height, label=label) + + + lgs.append(lg) + + ax.hlines([11_500_000], 0, max(duration), colors="gray", linestyles="dashed") + + for i, h in enumerate(height): + if i == len(height) -1 : + alabels.append(((duration[i],h),label)) + + if h == 11_500_000: + vlabels.append((duration[i],h)) + + + ax.get_yaxis().get_major_formatter().set_scientific(False) + ax.get_yaxis().get_major_formatter().set_useOffset(False) + + ax.margins(0) + + ax.set_axisbelow(True) + + ax.xaxis.grid(color='gray', linestyle='solid', which='major') + ax.yaxis.grid(color='gray', linestyle='solid', which='major') + + ax.xaxis.grid(color='gray', linestyle='dashed', which='minor') + ax.yaxis.grid(color='gray', linestyle='dashed', which='minor') + + xminorLocator = MultipleLocator(1.0) + ax.xaxis.set_major_locator(xminorLocator) + + yminorLocator = MultipleLocator(500_000) + ax.yaxis.set_major_locator(yminorLocator) + + + # plt.xticks(ax.get_xticks(), ax.get_xticklabels(which='both')) + # plt.setp(ax.get_xticklabels(which='both'), rotation=30, horizontalalignment='right') + +# sort alabsle by .0.1 +alabels.sort(key=lambda x: x[0][0]) +vlabels.sort(key=lambda x: x[0]) + +lheight=40 +loffset=-40 +count=len(alabels) +for (duration,h), label in alabels: + + ax.annotate(label, + fontsize=8, + xy=(duration, h), xycoords='data', + xytext=(loffset, lheight), textcoords='offset points', + bbox=dict(boxstyle="round", fc="0.9"), + arrowprops=dict(arrowstyle="->"), + horizontalalignment='center', verticalalignment='bottom') + loffset += round(80/count,0) + if loffset <0: + lheight += 20 + elif loffset > 0: + lheight -= 20 + +for index, (duration, h) in enumerate(vlabels): + ax.vlines([duration], 0, h, colors="black", linestyles="dashed") + voff=-60 + if index % 2 == 0: + voff=-75 + ax.annotate(round(duration, 1), + fontsize=8, + xy=(duration, 0), xycoords='data', + xytext=(0, voff), textcoords='offset points', + bbox=dict(boxstyle="round", fc="0.9"), + arrowprops=dict(arrowstyle="-"), + horizontalalignment='center', verticalalignment='bottom') + + +plt.axhline(y=11_500_000, color='blue', linestyle='--') + +# plt.legend(tuple(lgs), tuple(args.label), loc='upper left', shadow=True) +plt.title('CKB Block Sync progress Chart') +plt.xlabel('Timecost (hours)') +plt.ylabel('Block Height') +plt.savefig(result_path, bbox_inches='tight', dpi=300) diff --git a/docs/ckb_async_block_sync.mermaid b/docs/ckb_async_block_sync.mermaid new file mode 100644 index 0000000000..eb28cd0eb0 --- /dev/null +++ b/docs/ckb_async_block_sync.mermaid @@ -0,0 +1,81 @@ +sequenceDiagram + autonumber + participant Sr as Synchronizer::received + participant BP as BlockProcess + participant Sp as Synchronizer::poll + participant C as main thread + participant PU as PreloadUnverified thread + participant CV as ConsumeUnverifiedBlocks thread + + box crate:ckb-sync + participant Sr + participant Sp + participant BP + end + + box crate:ckb-chain + participant C + participant PU + participant CV + end + + Note left of Sr: synchronizer received
Block(122) from remote peer + Note over Sr: try_process SyncMessageUnionReader::SendBlock + Sr ->>+ BP: BlockProcess::execute(Block(122)) + BP ->>+ C: asynchronous_process_block(Block(122)) + Note over C: non_contextual_verify(Block(122)) + Note over C: insert_block(Block(122)) + Note over C: OrphanBroker.process_lonly_block(Block(122)) + + alt parent is BLOCK_STORED or parent is_pending_veryfing + Note over C: OrphanBroker.process_lonly_block(Block(122)) + Note over C: increase unverified_tip to Block(122) + C ->>+ PU: send Block(122) to PreloadUnverified via channel + else parent not found + Note over C: OrphanBroker.process_lonly_block(Block(122)) + Note over C: insert Block(122) to OrphanBroker + end + C ->>+ PU: send Block(123) to PreloadUnverified via channel + C ->>- BP: return + BP ->>- Sr: return + Note left of Sr: synchronizer received
Block(123) from remote peer + Note over Sr: try_process SyncMessageUnionReader::SendBlock + Sr ->>+ BP: BlockProcess::execute(Block(123)) + BP ->>+ C: asynchronous_process_block(Block(123)) + Note over C: non_contextual_verify(Block(123)) + Note over C: insert_block(Block(123)) + Note over C: OrphanBroker.process_lonly_block(Block(123)) + alt parent is BLOCK_STORED or parent is_pending_veryfing + Note over C: OrphanBroker.process_lonly_block(Block(123)) + Note over C: increase unverified_tip to Block(123) + C ->>+ PU: send Block(123) to PreloadUnverified via channel + else parent not found + Note over C: OrphanBroker.process_lonly_block(Block(123)) + Note over C: insert Block(123) to OrphanBroker + end + C ->>- BP: return + BP ->>- Sr: return + + loop load unverified + Note over PU: receive LonelyBlockHash + Note over PU: load UnverifiedBlock from db + PU ->>+ CV: send UnverifiedBlock to ConsumeUnverifiedBlocks + end + + loop Consume Unverified Blocks + Note over CV: start verify UnverifiedBlock if the channel is not empty + Note over CV: Verify Block in CKB VM + + alt Block is Valid + Note over CV: remove Block block_status and HeaderMap + else Block is Invalid + Note over CV: mark block as BLOCK_INVALID in block_status_map + Note over CV: Decrease Unverified TIP + end + + opt Execute Callback + Note over CV: execute callback to punish the malicious peer if block is invalid + Note over CV: callback: Box) + Send + Sync> + + end + end diff --git a/docs/ckb_sync.mermaid b/docs/ckb_sync.mermaid new file mode 100644 index 0000000000..c24a7f0640 --- /dev/null +++ b/docs/ckb_sync.mermaid @@ -0,0 +1,50 @@ +sequenceDiagram + autonumber + + participant S as Synchronizer + participant BP as BlockProcess + participant C as ChainService + + + box crate:ckb_sync + participant S + participant BP + end + + + box crate:ckb_chain + participant C + end + + Note left of S: synchronizer received
Block(122) from remote peer + + Note over S: try_process SyncMessageUnionReader::SendBlock + + + S->>+BP: BlockProcess::execute(Block(122)) + BP->>+C: process_block(Block(122)) + Note over BP: waiting ChainService to return
the result of process_block(Block(123)) + Note over C: insert_block(Block(122)) + C->>-BP: return result of process_block(Block(122)) + BP->>-S: return result of BlockProcess::execute(Block(122)) + + alt block is Valid + Note over S: going on + else block is Invalid + Note over S: punish the malicious peer + end + + Note left of S: synchronizer received
Block(123) from remote peer + Note over S: try_process SyncMessageUnionReader::SendBlock + S->>+BP: BlockProcess::execute(Block(123)) + BP->>+C: process_block(Block(123)) + Note over BP: waiting ChainService to return
the result of process_block(Block(123)) + Note over C: insert_block(Block(123)) + C->>-BP: return result of process_block(Block(123)) + BP->>-S: return result of BlockProcess::execute(Block(123)) + + alt block is Valid + Note over S: going on + else block is Invalid + Note over S: punish the malicious peer + end diff --git a/error/src/lib.rs b/error/src/lib.rs index 20db9982dc..2c2dfa575e 100644 --- a/error/src/lib.rs +++ b/error/src/lib.rs @@ -92,3 +92,24 @@ impl fmt::Debug for AnyError { self.0.fmt(f) } } +/// Return whether the error's kind is `InternalErrorKind::Database` +/// +/// ### Panic +/// +/// Panic if the error kind is `InternalErrorKind::DataCorrupted`. +/// If the database is corrupted, panic is better than handle it silently. +pub fn is_internal_db_error(error: &Error) -> bool { + if error.kind() == ErrorKind::Internal { + let error_kind = error + .downcast_ref::() + .expect("error kind checked") + .kind(); + if error_kind == InternalErrorKind::DataCorrupted { + panic!("{}", error) + } else { + return error_kind == InternalErrorKind::Database + || error_kind == InternalErrorKind::System; + } + } + false +} diff --git a/rpc/README.md b/rpc/README.md index 016b32b8b8..79c5821b5f 100644 --- a/rpc/README.md +++ b/rpc/README.md @@ -4144,7 +4144,10 @@ Response "low_time": "0x5dc", "normal_time": "0x4e2", "orphan_blocks_count": "0x0", - "orphan_blocks_size": "0x0" + "tip_hash": "0xa5f5c85987a15de25661e5a214f2c1449cd803f071acc7999820f25246471f40", + "tip_number": "0x400", + "unverified_tip_hash": "0xa5f5c85987a15de25661e5a214f2c1449cd803f071acc7999820f25246471f40", + "unverified_tip_number": "0x400" } } ``` @@ -6859,7 +6862,13 @@ The overall chain synchronization state of this local node. If this number is too high, it indicates that block download has stuck at some block. -* `orphan_blocks_size`: [`Uint64`](#type-uint64) - The size of all download orphan blocks +* `tip_hash`: [`H256`](#type-h256) - The block hash of current tip block + +* `tip_number`: [`Uint64`](#type-uint64) - The block number of current tip block + +* `unverified_tip_hash`: [`H256`](#type-h256) - The block hash of current unverified tip block + +* `unverified_tip_number`: [`Uint64`](#type-uint64) - The block number of current unverified tip block ### Type `Timestamp` diff --git a/rpc/src/module/miner.rs b/rpc/src/module/miner.rs index 814f12f91d..5641d84d13 100644 --- a/rpc/src/module/miner.rs +++ b/rpc/src/module/miner.rs @@ -1,6 +1,6 @@ use crate::error::RPCError; use async_trait::async_trait; -use ckb_chain::chain::ChainController; +use ckb_chain::ChainController; use ckb_jsonrpc_types::{Block, BlockTemplate, Uint64, Version}; use ckb_logger::{debug, error, info, warn}; use ckb_network::{NetworkController, PeerIndex, SupportProtocols, TargetSession}; @@ -278,7 +278,7 @@ impl MinerRpc for MinerRpcImpl { // Verify and insert block let is_new = self .chain - .process_block(Arc::clone(&block)) + .blocking_process_block(Arc::clone(&block)) .map_err(|err| handle_submit_error(&work_id, &err))?; info!( "end to submit block, work_id = {}, is_new = {}, block = #{}({})", diff --git a/rpc/src/module/net.rs b/rpc/src/module/net.rs index 658011f80e..28022e304c 100644 --- a/rpc/src/module/net.rs +++ b/rpc/src/module/net.rs @@ -1,5 +1,6 @@ use crate::error::RPCError; use async_trait::async_trait; +use ckb_chain::ChainController; use ckb_jsonrpc_types::{ BannedAddr, LocalNode, LocalNodeProtocol, NodeAddress, PeerSyncState, RemoteNode, RemoteNodeProtocol, SyncState, Timestamp, @@ -7,6 +8,7 @@ use ckb_jsonrpc_types::{ use ckb_network::{extract_peer_id, multiaddr::Multiaddr, NetworkController}; use ckb_sync::SyncShared; use ckb_systemtime::unix_time_as_millis; +use ckb_types::prelude::Unpack; use jsonrpc_core::Result; use jsonrpc_utils::rpc; use std::sync::Arc; @@ -369,7 +371,10 @@ pub trait NetRpc { /// "low_time": "0x5dc", /// "normal_time": "0x4e2", /// "orphan_blocks_count": "0x0", - /// "orphan_blocks_size": "0x0" + /// "tip_hash": "0xa5f5c85987a15de25661e5a214f2c1449cd803f071acc7999820f25246471f40", + /// "tip_number": "0x400", + /// "unverified_tip_hash": "0xa5f5c85987a15de25661e5a214f2c1449cd803f071acc7999820f25246471f40", + /// "unverified_tip_number": "0x400" /// } /// } /// ``` @@ -538,6 +543,7 @@ pub trait NetRpc { pub(crate) struct NetRpcImpl { pub network_controller: NetworkController, pub sync_shared: Arc, + pub chain_controller: Arc, } #[async_trait] @@ -716,17 +722,22 @@ impl NetRpc for NetRpcImpl { fn sync_state(&self) -> Result { let chain = self.sync_shared.active_chain(); - let state = chain.shared().state(); + let shared = chain.shared(); + let state = chain.state(); let (fast_time, normal_time, low_time) = state.read_inflight_blocks().division_point(); let best_known = state.shared_best_header(); + let unverified_tip = shared.get_unverified_tip(); let sync_state = SyncState { ibd: chain.is_initial_block_download(), best_known_block_number: best_known.number().into(), best_known_block_timestamp: best_known.timestamp().into(), - orphan_blocks_count: (state.orphan_pool().len() as u64).into(), - orphan_blocks_size: (state.orphan_pool().total_size() as u64).into(), + orphan_blocks_count: (self.chain_controller.orphan_blocks_len() as u64).into(), inflight_blocks_count: (state.read_inflight_blocks().total_inflight_count() as u64) .into(), + unverified_tip_number: unverified_tip.number().into(), + unverified_tip_hash: unverified_tip.hash().unpack(), + tip_number: chain.tip_number().into(), + tip_hash: chain.tip_hash().unpack(), fast_time: fast_time.into(), normal_time: normal_time.into(), low_time: low_time.into(), diff --git a/rpc/src/module/test.rs b/rpc/src/module/test.rs index b5db29e0d7..72f2afbbc2 100644 --- a/rpc/src/module/test.rs +++ b/rpc/src/module/test.rs @@ -1,6 +1,6 @@ use crate::error::RPCError; use async_trait::async_trait; -use ckb_chain::chain::ChainController; +use ckb_chain::ChainController; use ckb_dao::DaoCalculator; use ckb_jsonrpc_types::{Block, BlockTemplate, Byte32, EpochNumberWithFraction, Transaction}; use ckb_logger::error; @@ -514,8 +514,7 @@ impl IntegrationTestRpc for IntegrationTestRpcImpl { let block: Arc = Arc::new(block.into_view()); let ret = self .chain - .internal_process_block(Arc::clone(&block), Switch::DISABLE_ALL); - + .blocking_process_block_with_switch(Arc::clone(&block), Switch::DISABLE_ALL); if broadcast { let content = packed::CompactBlock::build_from_block(&block, &HashSet::new()); let message = packed::RelayMessage::new_builder().set(content).build(); @@ -677,7 +676,7 @@ impl IntegrationTestRpcImpl { // insert block to chain self.chain - .process_block(Arc::clone(&block_view)) + .blocking_process_block(Arc::clone(&block_view)) .map_err(|err| RPCError::custom(RPCError::CKBInternalError, err.to_string()))?; // announce new block diff --git a/rpc/src/service_builder.rs b/rpc/src/service_builder.rs index 2daccf4c9b..20681d5484 100644 --- a/rpc/src/service_builder.rs +++ b/rpc/src/service_builder.rs @@ -9,7 +9,7 @@ use crate::module::{ }; use crate::{IoHandler, RPCError}; use ckb_app_config::{DBConfig, IndexerConfig, RpcConfig}; -use ckb_chain::chain::ChainController; +use ckb_chain::ChainController; use ckb_indexer::IndexerService; use ckb_indexer_sync::{new_secondary_db, PoolService}; use ckb_network::NetworkController; @@ -103,10 +103,12 @@ impl<'a> ServiceBuilder<'a> { mut self, network_controller: NetworkController, sync_shared: Arc, + chain_controller: Arc, ) -> Self { let methods = NetRpcImpl { network_controller, sync_shared, + chain_controller, }; set_rpc_module_methods!(self, "Net", net_enable, add_net_rpc_methods, methods) } diff --git a/rpc/src/tests/mod.rs b/rpc/src/tests/mod.rs index b59897bd22..5b3017d5d5 100644 --- a/rpc/src/tests/mod.rs +++ b/rpc/src/tests/mod.rs @@ -1,4 +1,4 @@ -use ckb_chain::chain::ChainController; +use ckb_chain::ChainController; use ckb_chain_spec::consensus::Consensus; use ckb_dao::DaoCalculator; use ckb_reward_calculator::RewardCalculator; diff --git a/rpc/src/tests/module/miner.rs b/rpc/src/tests/module/miner.rs index 14d1513be0..42f9bbb325 100644 --- a/rpc/src/tests/module/miner.rs +++ b/rpc/src/tests/module/miner.rs @@ -37,7 +37,7 @@ fn test_get_block_template_cache() { .build(); suite .chain_controller - .process_block(Arc::new(fork_block)) + .blocking_process_block(Arc::new(fork_block)) .expect("processing new block should be ok"); assert_eq!(response_old.result["uncles"].to_string(), "[]"); diff --git a/rpc/src/tests/setup.rs b/rpc/src/tests/setup.rs index f47b4433e1..c587a8c98f 100644 --- a/rpc/src/tests/setup.rs +++ b/rpc/src/tests/setup.rs @@ -5,7 +5,7 @@ use crate::{ use ckb_app_config::{ BlockAssemblerConfig, NetworkAlertConfig, NetworkConfig, RpcConfig, RpcModule, }; -use ckb_chain::chain::ChainService; +use ckb_chain::start_chain_services; use ckb_chain_spec::consensus::{Consensus, ConsensusBuilder}; use ckb_chain_spec::versionbits::{ActiveMode, Deployment, DeploymentPos}; use ckb_dao_utils::genesis_dao_data; @@ -88,8 +88,7 @@ pub(crate) fn setup_rpc_test_suite(height: u64, consensus: Option) -> })) .build() .unwrap(); - let chain_controller = - ChainService::new(shared.clone(), pack.take_proposal_table()).start::<&str>(None); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); // Start network services let temp_dir = tempfile::tempdir().expect("create tmp_dir failed"); @@ -132,7 +131,7 @@ pub(crate) fn setup_rpc_test_suite(height: u64, consensus: Option) -> for _ in 0..height { let block = next_block(&shared, &parent.header()); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) .expect("processing new block should be ok"); parent = block; } @@ -207,7 +206,11 @@ pub(crate) fn setup_rpc_test_suite(height: u64, consensus: Option) -> chain_controller.clone(), true, ) - .enable_net(network_controller.clone(), sync_shared) + .enable_net( + network_controller.clone(), + sync_shared, + Arc::new(chain_controller.clone()), + ) .enable_stats(shared.clone(), Arc::clone(&alert_notifier)) .enable_experiment(shared.clone()) .enable_integration_test( @@ -257,7 +260,7 @@ pub(crate) fn setup_rpc_test_suite(height: u64, consensus: Option) -> ) .build(); chain_controller - .internal_process_block(Arc::new(fork_block), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch(Arc::new(fork_block), Switch::DISABLE_EXTENSION) .expect("processing new block should be ok"); } diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 9cb16e9729..465ed2e2a2 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -27,11 +27,16 @@ ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.116.0-pre" } ckb-constant = { path = "../util/constant", version = "= 0.116.0-pre" } ckb-systemtime = { path = "../util/systemtime", version = "= 0.116.0-pre" } ckb-channel = { path = "../util/channel", version = "= 0.116.0-pre" } -ckb-app-config = {path = "../util/app-config", version = "= 0.116.0-pre"} +ckb-app-config = { path = "../util/app-config", version = "= 0.116.0-pre" } ckb-migrate = { path = "../util/migrate", version = "= 0.116.0-pre" } once_cell = "1.8.0" +ckb-util = { path = "../util", version = "= 0.116.0-pre" } +ckb-metrics = { path = "../util/metrics", version = "= 0.116.0-pre" } +bitflags = "1.0" +tokio = { version = "1", features = ["sync"] } tempfile.workspace = true - +sled = "0.34.7" +dashmap = "4.0" [dev-dependencies] ckb-systemtime = { path = "../util/systemtime", version = "= 0.116.0-pre", features = ["enable_faketime"] } @@ -39,3 +44,4 @@ ckb-systemtime = { path = "../util/systemtime", version = "= 0.116.0-pre", featu [features] portable = ["ckb-db/portable", "ckb-store/portable", "ckb-tx-pool/portable", "ckb-migrate/portable"] march-native = ["ckb-db/march-native", "ckb-store/march-native", "ckb-tx-pool/march-native", "ckb-migrate/march-native"] +stats = [] diff --git a/shared/src/block_status.rs b/shared/src/block_status.rs new file mode 100644 index 0000000000..a7092a45c3 --- /dev/null +++ b/shared/src/block_status.rs @@ -0,0 +1,17 @@ +//! Provide BlockStatus +#![allow(missing_docs)] +#![allow(clippy::bad_bit_mask)] + +use bitflags::bitflags; +bitflags! { + pub struct BlockStatus: u32 { + const UNKNOWN = 0; + + const HEADER_VALID = 1; + const BLOCK_RECEIVED = 1 | Self::HEADER_VALID.bits << 1; + const BLOCK_STORED = 1 | Self::BLOCK_RECEIVED.bits << 1; + const BLOCK_VALID = 1 | Self::BLOCK_STORED.bits << 1; + + const BLOCK_INVALID = 1 << 12; + } +} diff --git a/shared/src/chain_services_builder.rs b/shared/src/chain_services_builder.rs new file mode 100644 index 0000000000..3260971157 --- /dev/null +++ b/shared/src/chain_services_builder.rs @@ -0,0 +1,18 @@ +//! chain_services_builder provide ChainServicesBuilder to build Chain Services +#![allow(missing_docs)] +use crate::Shared; +use ckb_proposal_table::ProposalTable; + +pub struct ChainServicesBuilder { + pub shared: Shared, + pub proposal_table: ProposalTable, +} + +impl ChainServicesBuilder { + pub fn new(shared: Shared, proposal_table: ProposalTable) -> Self { + ChainServicesBuilder { + shared, + proposal_table, + } + } +} diff --git a/shared/src/lib.rs b/shared/src/lib.rs index 63bfa56a35..8c3c27b843 100644 --- a/shared/src/lib.rs +++ b/shared/src/lib.rs @@ -1,9 +1,16 @@ //! TODO(doc): @quake // num_cpus is used in proc_macro +pub mod chain_services_builder; pub mod shared; pub mod shared_builder; +pub use chain_services_builder::ChainServicesBuilder; pub use ckb_snapshot::{Snapshot, SnapshotMgr}; pub use shared::Shared; pub use shared_builder::{SharedBuilder, SharedPackage}; +pub mod block_status; +pub mod types; + +pub use types::header_map::HeaderMap; +pub use types::{HeaderIndex, HeaderIndexView}; diff --git a/shared/src/shared.rs b/shared/src/shared.rs index fc3e9fea04..94eb6065c9 100644 --- a/shared/src/shared.rs +++ b/shared/src/shared.rs @@ -1,6 +1,8 @@ -//! TODO(doc): @quake -use crate::{Snapshot, SnapshotMgr}; -use arc_swap::Guard; +//! Provide Shared +#![allow(missing_docs)] +use crate::block_status::BlockStatus; +use crate::{HeaderMap, Snapshot, SnapshotMgr}; +use arc_swap::{ArcSwap, Guard}; use ckb_async_runtime::Handle; use ckb_chain_spec::consensus::Consensus; use ckb_constant::store::TX_INDEX_UPPER_BOUND; @@ -8,6 +10,7 @@ use ckb_constant::sync::MAX_TIP_AGE; use ckb_db::{Direction, IteratorMode}; use ckb_db_schema::{COLUMN_BLOCK_BODY, COLUMN_NUMBER_HASH}; use ckb_error::{AnyError, Error}; +use ckb_logger::debug; use ckb_notify::NotifyController; use ckb_proposal_table::ProposalView; use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; @@ -18,9 +21,11 @@ use ckb_types::{ core::{BlockNumber, EpochExt, EpochNumber, HeaderView, Version}, packed::{self, Byte32}, prelude::*, - U256, + H256, U256, }; +use ckb_util::{shrink_to_fit, Mutex, MutexGuard}; use ckb_verification::cache::TxVerificationCache; +use dashmap::DashMap; use std::cmp; use std::collections::BTreeMap; use std::sync::atomic::{AtomicBool, Ordering}; @@ -32,6 +37,8 @@ const FREEZER_INTERVAL: Duration = Duration::from_secs(60); const THRESHOLD_EPOCH: EpochNumber = 2; const MAX_FREEZE_LIMIT: BlockNumber = 30_000; +pub const SHRINK_THRESHOLD: usize = 300; + /// An owned permission to close on a freezer thread pub struct FreezerClose { stopped: Arc, @@ -54,6 +61,12 @@ pub struct Shared { pub(crate) snapshot_mgr: Arc, pub(crate) async_handle: Handle, pub(crate) ibd_finished: Arc, + + pub assume_valid_target: Arc>>, + + pub header_map: Arc, + pub(crate) block_status_map: Arc>, + pub(crate) unverified_tip: Arc>, } impl Shared { @@ -68,7 +81,20 @@ impl Shared { snapshot_mgr: Arc, async_handle: Handle, ibd_finished: Arc, + + assume_valid_target: Arc>>, + header_map: Arc, + block_status_map: Arc>, ) -> Shared { + let header = store + .get_tip_header() + .unwrap_or(consensus.genesis_block().header()); + let unverified_tip = Arc::new(ArcSwap::new(Arc::new(crate::HeaderIndex::new( + header.number(), + header.hash(), + header.difficulty(), + )))); + Shared { store, tx_pool_controller, @@ -78,6 +104,10 @@ impl Shared { snapshot_mgr, async_handle, ibd_finished, + assume_valid_target, + header_map, + block_status_map, + unverified_tip, } } /// Spawn freeze background thread that periodically checks and moves ancient data from the kv database into the freezer. @@ -370,4 +400,71 @@ impl Shared { max_version.map(Into::into), ) } + + pub fn set_unverified_tip(&self, header: crate::HeaderIndex) { + self.unverified_tip.store(Arc::new(header)); + } + pub fn get_unverified_tip(&self) -> crate::HeaderIndex { + self.unverified_tip.load().as_ref().clone() + } + + pub fn header_map(&self) -> &HeaderMap { + &self.header_map + } + pub fn remove_header_view(&self, hash: &Byte32) { + self.header_map.remove(hash); + } + + pub fn block_status_map(&self) -> &DashMap { + &self.block_status_map + } + + pub fn get_block_status(&self, block_hash: &Byte32) -> BlockStatus { + match self.block_status_map().get(block_hash) { + Some(status_ref) => *status_ref.value(), + None => { + if self.header_map().contains_key(block_hash) { + BlockStatus::HEADER_VALID + } else { + let verified = self + .snapshot() + .get_block_ext(block_hash) + .map(|block_ext| block_ext.verified); + match verified { + None => BlockStatus::UNKNOWN, + Some(None) => BlockStatus::BLOCK_STORED, + Some(Some(true)) => BlockStatus::BLOCK_VALID, + Some(Some(false)) => BlockStatus::BLOCK_INVALID, + } + } + } + } + } + + pub fn contains_block_status( + &self, + block_hash: &Byte32, + status: BlockStatus, + ) -> bool { + self.get_block_status(block_hash).contains(status) + } + + pub fn insert_block_status(&self, block_hash: Byte32, status: BlockStatus) { + self.block_status_map.insert(block_hash, status); + } + + pub fn remove_block_status(&self, block_hash: &Byte32) { + let log_now = std::time::Instant::now(); + self.block_status_map.remove(block_hash); + debug!("remove_block_status cost {:?}", log_now.elapsed()); + shrink_to_fit!(self.block_status_map, SHRINK_THRESHOLD); + debug!( + "remove_block_status shrink_to_fit cost {:?}", + log_now.elapsed() + ); + } + + pub fn assume_valid_target(&self) -> MutexGuard> { + self.assume_valid_target.lock() + } } diff --git a/shared/src/shared_builder.rs b/shared/src/shared_builder.rs index 985add3ba0..0780aae79e 100644 --- a/shared/src/shared_builder.rs +++ b/shared/src/shared_builder.rs @@ -1,34 +1,35 @@ //! shared_builder provide SharedBuilder and SharedPacakge -use ckb_channel::Receiver; -use ckb_proposal_table::ProposalTable; -use ckb_tx_pool::service::TxVerificationResult; -use ckb_tx_pool::{TokioRwLock, TxEntry, TxPool, TxPoolServiceBuilder}; -use std::cmp::Ordering; - -use ckb_chain_spec::consensus::Consensus; -use ckb_chain_spec::SpecError; - -use crate::Shared; -use ckb_proposal_table::ProposalView; -use ckb_snapshot::{Snapshot, SnapshotMgr}; - +use crate::ChainServicesBuilder; +use crate::{HeaderMap, Shared}; use ckb_app_config::{ - BlockAssemblerConfig, DBConfig, ExitCode, NotifyConfig, StoreConfig, TxPoolConfig, + BlockAssemblerConfig, DBConfig, ExitCode, HeaderMapConfig, NotifyConfig, StoreConfig, + SyncConfig, TxPoolConfig, }; use ckb_async_runtime::{new_background_runtime, Handle}; +use ckb_chain_spec::consensus::Consensus; +use ckb_chain_spec::SpecError; +use ckb_channel::Receiver; use ckb_db::RocksDB; use ckb_db_schema::COLUMNS; use ckb_error::{Error, InternalErrorKind}; use ckb_logger::{error, info}; use ckb_migrate::migrate::Migrate; use ckb_notify::{NotifyController, NotifyService}; +use ckb_proposal_table::ProposalTable; +use ckb_proposal_table::ProposalView; +use ckb_snapshot::{Snapshot, SnapshotMgr}; use ckb_store::{ChainDB, ChainStore, Freezer}; +use ckb_tx_pool::{ + service::TxVerificationResult, TokioRwLock, TxEntry, TxPool, TxPoolServiceBuilder, +}; use ckb_types::core::hardfork::HardForks; -use ckb_types::core::service::PoolTransactionEntry; -use ckb_types::core::tx_pool::Reject; -use ckb_types::core::EpochExt; -use ckb_types::core::HeaderView; +use ckb_types::{ + core::service::PoolTransactionEntry, core::tx_pool::Reject, core::EpochExt, core::HeaderView, +}; +use ckb_util::Mutex; use ckb_verification::cache::init_cache; +use dashmap::DashMap; +use std::cmp::Ordering; use std::collections::HashSet; use std::path::{Path, PathBuf}; use std::sync::atomic::AtomicBool; @@ -42,9 +43,13 @@ pub struct SharedBuilder { consensus: Consensus, tx_pool_config: Option, store_config: Option, + sync_config: Option, block_assembler_config: Option, notify_config: Option, async_handle: Handle, + + header_map_memory_limit: Option, + header_map_tmp_dir: Option, } /// Open or create a rocksdb @@ -146,8 +151,11 @@ impl SharedBuilder { tx_pool_config: None, notify_config: None, store_config: None, + sync_config: None, block_assembler_config: None, async_handle, + header_map_memory_limit: None, + header_map_tmp_dir: None, }) } @@ -191,8 +199,12 @@ impl SharedBuilder { tx_pool_config: None, notify_config: None, store_config: None, + sync_config: None, block_assembler_config: None, async_handle: runtime.get_or_init(new_background_runtime).clone(), + + header_map_memory_limit: None, + header_map_tmp_dir: None, }) } } @@ -222,6 +234,18 @@ impl SharedBuilder { self } + /// TODO(doc): @eval-exec + pub fn sync_config(mut self, config: SyncConfig) -> Self { + self.sync_config = Some(config); + self + } + + /// TODO(doc): @eval-exec + pub fn header_map_tmp_dir(mut self, header_map_tmp_dir: Option) -> Self { + self.header_map_tmp_dir = header_map_tmp_dir; + self + } + /// TODO(doc): @quake pub fn block_assembler_config(mut self, config: Option) -> Self { self.block_assembler_config = config; @@ -325,14 +349,30 @@ impl SharedBuilder { consensus, tx_pool_config, store_config, + sync_config, block_assembler_config, notify_config, async_handle, + header_map_memory_limit, + header_map_tmp_dir, } = self; + let header_map_memory_limit = header_map_memory_limit + .unwrap_or(HeaderMapConfig::default().memory_limit.as_u64() as usize); + + let ibd_finished = Arc::new(AtomicBool::new(false)); + + let header_map = Arc::new(HeaderMap::new( + header_map_tmp_dir, + header_map_memory_limit, + &async_handle.clone(), + Arc::clone(&ibd_finished), + )); + let tx_pool_config = tx_pool_config.unwrap_or_default(); let notify_config = notify_config.unwrap_or_default(); let store_config = store_config.unwrap_or_default(); + let sync_config = sync_config.unwrap_or_default(); let consensus = Arc::new(consensus); let notify_controller = start_notify_service(notify_config, async_handle.clone()); @@ -365,7 +405,9 @@ impl SharedBuilder { register_tx_pool_callback(&mut tx_pool_builder, notify_controller.clone()); - let ibd_finished = Arc::new(AtomicBool::new(false)); + let block_status_map = Arc::new(DashMap::new()); + + let assume_valid_target = Arc::new(Mutex::new(sync_config.assume_valid_target)); let shared = Shared::new( store, tx_pool_controller, @@ -375,10 +417,15 @@ impl SharedBuilder { snapshot_mgr, async_handle, ibd_finished, + assume_valid_target, + header_map, + block_status_map, ); + let chain_services_builder = ChainServicesBuilder::new(shared.clone(), table); + let pack = SharedPackage { - table: Some(table), + chain_services_builder: Some(chain_services_builder), tx_pool_builder: Some(tx_pool_builder), relay_tx_receiver: Some(receiver), }; @@ -387,6 +434,53 @@ impl SharedBuilder { } } +/// SharedBuilder build returning the shared/package halves +/// The package structs used for init other component +pub struct SharedPackage { + chain_services_builder: Option, + tx_pool_builder: Option, + relay_tx_receiver: Option>, +} + +impl SharedPackage { + /// Takes the chain_services_builder out of the package, leaving a None in its place. + pub fn take_chain_services_builder(&mut self) -> ChainServicesBuilder { + self.chain_services_builder + .take() + .expect("take chain_services_builder") + } + + /// Takes the tx_pool_builder out of the package, leaving a None in its place. + pub fn take_tx_pool_builder(&mut self) -> TxPoolServiceBuilder { + self.tx_pool_builder.take().expect("take tx_pool_builder") + } + + /// Takes the relay_tx_receiver out of the package, leaving a None in its place. + pub fn take_relay_tx_receiver(&mut self) -> Receiver { + self.relay_tx_receiver + .take() + .expect("take relay_tx_receiver") + } +} + +fn start_notify_service(notify_config: NotifyConfig, handle: Handle) -> NotifyController { + NotifyService::new(notify_config, handle).start() +} + +fn build_store( + db: RocksDB, + store_config: StoreConfig, + ancient_path: Option, +) -> Result { + let store = if store_config.freezer_enable && ancient_path.is_some() { + let freezer = Freezer::open(ancient_path.expect("exist checked"))?; + ChainDB::new_with_freezer(db, freezer, store_config) + } else { + ChainDB::new(db, store_config) + }; + Ok(store) +} + fn register_tx_pool_callback(tx_pool_builder: &mut TxPoolServiceBuilder, notify: NotifyController) { let notify_pending = notify.clone(); @@ -439,48 +533,3 @@ fn register_tx_pool_callback(tx_pool_builder: &mut TxPoolServiceBuilder, notify: }, )); } - -fn start_notify_service(notify_config: NotifyConfig, handle: Handle) -> NotifyController { - NotifyService::new(notify_config, handle).start() -} - -fn build_store( - db: RocksDB, - store_config: StoreConfig, - ancient_path: Option, -) -> Result { - let store = if store_config.freezer_enable && ancient_path.is_some() { - let freezer = Freezer::open(ancient_path.expect("exist checked"))?; - ChainDB::new_with_freezer(db, freezer, store_config) - } else { - ChainDB::new(db, store_config) - }; - Ok(store) -} - -/// SharedBuilder build returning the shared/package halves -/// The package structs used for init other component -pub struct SharedPackage { - table: Option, - tx_pool_builder: Option, - relay_tx_receiver: Option>, -} - -impl SharedPackage { - /// Takes the proposal_table out of the package, leaving a None in its place. - pub fn take_proposal_table(&mut self) -> ProposalTable { - self.table.take().expect("take proposal_table") - } - - /// Takes the tx_pool_builder out of the package, leaving a None in its place. - pub fn take_tx_pool_builder(&mut self) -> TxPoolServiceBuilder { - self.tx_pool_builder.take().expect("take tx_pool_builder") - } - - /// Takes the relay_tx_receiver out of the package, leaving a None in its place. - pub fn take_relay_tx_receiver(&mut self) -> Receiver { - self.relay_tx_receiver - .take() - .expect("take relay_tx_receiver") - } -} diff --git a/sync/src/types/header_map/backend.rs b/shared/src/types/header_map/backend.rs similarity index 100% rename from sync/src/types/header_map/backend.rs rename to shared/src/types/header_map/backend.rs diff --git a/sync/src/types/header_map/backend_sled.rs b/shared/src/types/header_map/backend_sled.rs similarity index 100% rename from sync/src/types/header_map/backend_sled.rs rename to shared/src/types/header_map/backend_sled.rs diff --git a/sync/src/types/header_map/kernel_lru.rs b/shared/src/types/header_map/kernel_lru.rs similarity index 79% rename from sync/src/types/header_map/kernel_lru.rs rename to shared/src/types/header_map/kernel_lru.rs index f9d5eba2c7..46dba8eb35 100644 --- a/sync/src/types/header_map/kernel_lru.rs +++ b/shared/src/types/header_map/kernel_lru.rs @@ -1,7 +1,10 @@ use std::path; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; #[cfg(feature = "stats")] -use ckb_logger::trace; +use ckb_logger::info; +use ckb_metrics::HistogramTimer; #[cfg(feature = "stats")] use ckb_util::{Mutex, MutexGuard}; @@ -18,6 +21,8 @@ where pub(crate) backend: Backend, // Configuration memory_limit: usize, + // if ckb is in IBD mode, don't shrink memory map + ibd_finished: Arc, // Statistics #[cfg(feature = "stats")] stats: Mutex, @@ -43,7 +48,11 @@ impl HeaderMapKernel where Backend: KeyValueBackend, { - pub(crate) fn new

(tmpdir: Option

, memory_limit: usize) -> Self + pub(crate) fn new

( + tmpdir: Option

, + memory_limit: usize, + ibd_finished: Arc, + ) -> Self where P: AsRef, { @@ -56,6 +65,7 @@ where memory, backend, memory_limit, + ibd_finished, } } @@ -65,6 +75,7 @@ where memory, backend, memory_limit, + ibd_finished, stats: Mutex::new(HeaderMapKernelStats::new(50_000)), } } @@ -76,8 +87,15 @@ where self.stats().tick_primary_contain(); } if self.memory.contains_key(hash) { + if let Some(metrics) = ckb_metrics::handle() { + metrics.ckb_header_map_memory_hit_miss_count.hit.inc() + } return true; } + if let Some(metrics) = ckb_metrics::handle() { + metrics.ckb_header_map_memory_hit_miss_count.miss.inc(); + } + if self.backend.is_empty() { return false; } @@ -94,8 +112,16 @@ where self.stats().tick_primary_select(); } if let Some(view) = self.memory.get_refresh(hash) { + if let Some(metrics) = ckb_metrics::handle() { + metrics.ckb_header_map_memory_hit_miss_count.hit.inc(); + } return Some(view); } + + if let Some(metrics) = ckb_metrics::handle() { + metrics.ckb_header_map_memory_hit_miss_count.miss.inc(); + } + if self.backend.is_empty() { return None; } @@ -130,7 +156,9 @@ where self.trace(); self.stats().tick_primary_delete(); } - self.memory.remove(hash); + // If IBD is not finished, don't shrink memory map + let allow_shrink_to_fit = self.ibd_finished.load(Ordering::Relaxed); + self.memory.remove(hash, allow_shrink_to_fit); if self.backend.is_empty() { return; } @@ -138,12 +166,18 @@ where } pub(crate) fn limit_memory(&self) { + let _trace_timer: Option = ckb_metrics::handle() + .map(|handle| handle.ckb_header_map_limit_memory_duration.start_timer()); + if let Some(values) = self.memory.front_n(self.memory_limit) { tokio::task::block_in_place(|| { self.backend.insert_batch(&values); }); + + // If IBD is not finished, don't shrink memory map + let allow_shrink_to_fit = self.ibd_finished.load(Ordering::Relaxed); self.memory - .remove_batch(values.iter().map(|value| value.hash())); + .remove_batch(values.iter().map(|value| value.hash()), allow_shrink_to_fit); } } @@ -153,7 +187,7 @@ where let progress = stats.trace_progress(); let frequency = stats.frequency(); if progress % frequency == 0 { - trace!( + info!( "Header Map Statistics\ \n>\t| storage | length | limit | contain | select | insert | delete |\ \n>\t|---------+---------+---------+---------+------------+---------+---------|\ diff --git a/sync/src/types/header_map/memory.rs b/shared/src/types/header_map/memory.rs similarity index 73% rename from sync/src/types/header_map/memory.rs rename to shared/src/types/header_map/memory.rs index 0411e8c671..7a01b83891 100644 --- a/sync/src/types/header_map/memory.rs +++ b/shared/src/types/header_map/memory.rs @@ -1,4 +1,4 @@ -use crate::types::{HeaderIndexView, SHRINK_THRESHOLD}; +use crate::types::HeaderIndexView; use ckb_types::{ core::{BlockNumber, EpochNumberWithFraction}, packed::Byte32, @@ -7,6 +7,8 @@ use ckb_types::{ use ckb_util::{shrink_to_fit, LinkedHashMap, RwLock}; use std::default; +const SHRINK_THRESHOLD: usize = 300; + #[derive(Clone, Debug, PartialEq, Eq)] struct HeaderIndexViewInner { number: BlockNumber, @@ -93,14 +95,29 @@ impl MemoryMap { pub(crate) fn insert(&self, header: HeaderIndexView) -> Option<()> { let mut guard = self.0.write(); let (key, value) = header.into(); - guard.insert(key, value).map(|_| ()) + let ret = guard.insert(key, value); + if ret.is_none() { + if let Some(metrics) = ckb_metrics::handle() { + metrics.ckb_header_map_memory_count.inc(); + } + } + ret.map(|_| ()) } - pub(crate) fn remove(&self, key: &Byte32) -> Option { + pub(crate) fn remove(&self, key: &Byte32, shrink_to_fit: bool) -> Option { let mut guard = self.0.write(); let ret = guard.remove(key); - shrink_to_fit!(guard, SHRINK_THRESHOLD); - ret.map(|inner| (key.clone(), inner).into()) + + if shrink_to_fit { + shrink_to_fit!(guard, SHRINK_THRESHOLD); + } + ret.map(|inner| { + if let Some(metrics) = ckb_metrics::handle() { + metrics.ckb_header_map_memory_count.dec(); + } + + (key.clone(), inner).into() + }) } pub(crate) fn front_n(&self, size_limit: usize) -> Option> { @@ -120,11 +137,21 @@ impl MemoryMap { } } - pub(crate) fn remove_batch(&self, keys: impl Iterator) { + pub(crate) fn remove_batch(&self, keys: impl Iterator, shrink_to_fit: bool) { let mut guard = self.0.write(); + let mut keys_count = 0; for key in keys { - guard.remove(&key); + if let Some(_old_value) = guard.remove(&key) { + keys_count += 1; + } + } + + if let Some(metrics) = ckb_metrics::handle() { + metrics.ckb_header_map_memory_count.sub(keys_count) + } + + if shrink_to_fit { + shrink_to_fit!(guard, SHRINK_THRESHOLD); } - shrink_to_fit!(guard, SHRINK_THRESHOLD); } } diff --git a/sync/src/types/header_map/mod.rs b/shared/src/types/header_map/mod.rs similarity index 57% rename from sync/src/types/header_map/mod.rs rename to shared/src/types/header_map/mod.rs index 78939164b6..e7536e5cf2 100644 --- a/sync/src/types/header_map/mod.rs +++ b/shared/src/types/header_map/mod.rs @@ -2,10 +2,12 @@ use ckb_async_runtime::Handle; use ckb_logger::info; use ckb_stop_handler::{new_tokio_exit_rx, CancellationToken}; use ckb_types::packed::Byte32; +use std::sync::atomic::AtomicBool; use std::sync::Arc; use std::time::Duration; use std::{mem::size_of, path}; +use ckb_metrics::HistogramTimer; use tokio::time::MissedTickBehavior; mod backend; @@ -24,12 +26,17 @@ pub struct HeaderMap { inner: Arc>, } -const INTERVAL: Duration = Duration::from_millis(500); +const INTERVAL: Duration = Duration::from_millis(5000); const ITEM_BYTES_SIZE: usize = size_of::(); const WARN_THRESHOLD: usize = ITEM_BYTES_SIZE * 100_000; impl HeaderMap { - pub(crate) fn new

(tmpdir: Option

, memory_limit: usize, async_handle: &Handle) -> Self + pub fn new

( + tmpdir: Option

, + memory_limit: usize, + async_handle: &Handle, + ibd_finished: Arc, + ) -> Self where P: AsRef, { @@ -43,7 +50,7 @@ impl HeaderMap { ); } let size_limit = memory_limit / ITEM_BYTES_SIZE; - let inner = Arc::new(HeaderMapKernel::new(tmpdir, size_limit)); + let inner = Arc::new(HeaderMapKernel::new(tmpdir, size_limit, ibd_finished)); let map = Arc::clone(&inner); let stop_rx: CancellationToken = new_tokio_exit_rx(); @@ -66,19 +73,46 @@ impl HeaderMap { Self { inner } } - pub(crate) fn contains_key(&self, hash: &Byte32) -> bool { + pub fn contains_key(&self, hash: &Byte32) -> bool { + let _trace_timer: Option = ckb_metrics::handle().map(|metric| { + metric + .ckb_header_map_ops_duration + .with_label_values(&["contains_key"]) + .start_timer() + }); + self.inner.contains_key(hash) } - pub(crate) fn get(&self, hash: &Byte32) -> Option { + pub fn get(&self, hash: &Byte32) -> Option { + let _trace_timer: Option = ckb_metrics::handle().map(|metric| { + metric + .ckb_header_map_ops_duration + .with_label_values(&["get"]) + .start_timer() + }); self.inner.get(hash) } - pub(crate) fn insert(&self, view: HeaderIndexView) -> Option<()> { + pub fn insert(&self, view: HeaderIndexView) -> Option<()> { + let _trace_timer: Option = ckb_metrics::handle().map(|metric| { + metric + .ckb_header_map_ops_duration + .with_label_values(&["insert"]) + .start_timer() + }); + self.inner.insert(view) } - pub(crate) fn remove(&self, hash: &Byte32) { + pub fn remove(&self, hash: &Byte32) { + let _trace_timer: Option = ckb_metrics::handle().map(|metric| { + metric + .ckb_header_map_ops_duration + .with_label_values(&["remove"]) + .start_timer() + }); + self.inner.remove(hash) } } diff --git a/shared/src/types/mod.rs b/shared/src/types/mod.rs new file mode 100644 index 0000000000..22653eff68 --- /dev/null +++ b/shared/src/types/mod.rs @@ -0,0 +1,307 @@ +#![allow(missing_docs)] +use ckb_types::core::{BlockNumber, EpochNumberWithFraction}; +use ckb_types::packed::Byte32; +use ckb_types::prelude::{Entity, FromSliceShouldBeOk, Reader}; +use ckb_types::{packed, U256}; + +pub mod header_map; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct HeaderIndexView { + hash: Byte32, + number: BlockNumber, + epoch: EpochNumberWithFraction, + timestamp: u64, + parent_hash: Byte32, + total_difficulty: U256, + skip_hash: Option, +} + +impl HeaderIndexView { + pub fn new( + hash: Byte32, + number: BlockNumber, + epoch: EpochNumberWithFraction, + timestamp: u64, + parent_hash: Byte32, + total_difficulty: U256, + ) -> Self { + HeaderIndexView { + hash, + number, + epoch, + timestamp, + parent_hash, + total_difficulty, + skip_hash: None, + } + } + + pub fn hash(&self) -> Byte32 { + self.hash.clone() + } + + pub fn number(&self) -> BlockNumber { + self.number + } + + pub fn epoch(&self) -> EpochNumberWithFraction { + self.epoch + } + + pub fn timestamp(&self) -> u64 { + self.timestamp + } + + pub fn total_difficulty(&self) -> &U256 { + &self.total_difficulty + } + + pub fn parent_hash(&self) -> Byte32 { + self.parent_hash.clone() + } + + pub fn skip_hash(&self) -> Option<&Byte32> { + self.skip_hash.as_ref() + } + + // deserialize from bytes + fn from_slice_should_be_ok(hash: &[u8], slice: &[u8]) -> Self { + let hash = packed::Byte32Reader::from_slice_should_be_ok(hash).to_entity(); + let number = BlockNumber::from_le_bytes(slice[0..8].try_into().expect("stored slice")); + let epoch = EpochNumberWithFraction::from_full_value(u64::from_le_bytes( + slice[8..16].try_into().expect("stored slice"), + )); + let timestamp = u64::from_le_bytes(slice[16..24].try_into().expect("stored slice")); + let parent_hash = packed::Byte32Reader::from_slice_should_be_ok(&slice[24..56]).to_entity(); + let total_difficulty = U256::from_little_endian(&slice[56..88]).expect("stored slice"); + let skip_hash = if slice.len() == 120 { + Some(packed::Byte32Reader::from_slice_should_be_ok(&slice[88..120]).to_entity()) + } else { + None + }; + Self { + hash, + number, + epoch, + timestamp, + parent_hash, + total_difficulty, + skip_hash, + } + } + + // serialize all fields except `hash` to bytes + fn to_vec(&self) -> Vec { + let mut v = Vec::new(); + v.extend_from_slice(self.number.to_le_bytes().as_slice()); + v.extend_from_slice(self.epoch.full_value().to_le_bytes().as_slice()); + v.extend_from_slice(self.timestamp.to_le_bytes().as_slice()); + v.extend_from_slice(self.parent_hash.as_slice()); + v.extend_from_slice(self.total_difficulty.to_le_bytes().as_slice()); + if let Some(ref skip_hash) = self.skip_hash { + v.extend_from_slice(skip_hash.as_slice()); + } + v + } + + pub fn build_skip(&mut self, tip_number: BlockNumber, get_header_view: F, fast_scanner: G) + where + F: Fn(&Byte32, bool) -> Option, + G: Fn(BlockNumber, BlockNumberAndHash) -> Option, + { + if self.number == 0 { + return; + } + self.skip_hash = self + .get_ancestor( + tip_number, + get_skip_height(self.number()), + get_header_view, + fast_scanner, + ) + .map(|header| header.hash()); + } + + pub fn get_ancestor( + &self, + tip_number: BlockNumber, + number: BlockNumber, + get_header_view: F, + fast_scanner: G, + ) -> Option + where + F: Fn(&Byte32, bool) -> Option, + G: Fn(BlockNumber, BlockNumberAndHash) -> Option, + { + if number > self.number() { + return None; + } + + let mut current = self.clone(); + let mut number_walk = current.number(); + while number_walk > number { + let number_skip = get_skip_height(number_walk); + let number_skip_prev = get_skip_height(number_walk - 1); + let store_first = current.number() <= tip_number; + match current.skip_hash { + Some(ref hash) + if number_skip == number + || (number_skip > number + && !(number_skip_prev + 2 < number_skip + && number_skip_prev >= number)) => + { + // Only follow skip if parent->skip isn't better than skip->parent + current = get_header_view(hash, store_first)?; + number_walk = number_skip; + } + _ => { + current = get_header_view(¤t.parent_hash(), store_first)?; + number_walk -= 1; + } + } + if let Some(target) = fast_scanner(number, (current.number(), current.hash()).into()) { + current = target; + break; + } + } + Some(current) + } + + pub fn as_header_index(&self) -> HeaderIndex { + HeaderIndex::new(self.number(), self.hash(), self.total_difficulty().clone()) + } + + pub fn number_and_hash(&self) -> BlockNumberAndHash { + (self.number(), self.hash()).into() + } + + pub fn is_better_than(&self, total_difficulty: &U256) -> bool { + self.total_difficulty() > total_difficulty + } +} + +impl From<(ckb_types::core::HeaderView, U256)> for HeaderIndexView { + fn from((header, total_difficulty): (ckb_types::core::HeaderView, U256)) -> Self { + HeaderIndexView { + hash: header.hash(), + number: header.number(), + epoch: header.epoch(), + timestamp: header.timestamp(), + parent_hash: header.parent_hash(), + total_difficulty, + skip_hash: None, + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct HeaderIndex { + number: BlockNumber, + hash: Byte32, + total_difficulty: U256, +} + +impl HeaderIndex { + pub fn new(number: BlockNumber, hash: Byte32, total_difficulty: U256) -> Self { + HeaderIndex { + number, + hash, + total_difficulty, + } + } + + pub fn number(&self) -> BlockNumber { + self.number + } + + pub fn hash(&self) -> Byte32 { + self.hash.clone() + } + + pub fn total_difficulty(&self) -> &U256 { + &self.total_difficulty + } + + pub fn number_and_hash(&self) -> BlockNumberAndHash { + (self.number(), self.hash()).into() + } + + pub fn is_better_chain(&self, other: &Self) -> bool { + self.is_better_than(other.total_difficulty()) + } + + pub fn is_better_than(&self, other_total_difficulty: &U256) -> bool { + self.total_difficulty() > other_total_difficulty + } +} + +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct BlockNumberAndHash { + pub number: BlockNumber, + pub hash: Byte32, +} + +impl BlockNumberAndHash { + pub fn new(number: BlockNumber, hash: Byte32) -> Self { + Self { number, hash } + } + + pub fn number(&self) -> BlockNumber { + self.number + } + + pub fn hash(&self) -> Byte32 { + self.hash.clone() + } +} + +impl From<(BlockNumber, Byte32)> for BlockNumberAndHash { + fn from(inner: (BlockNumber, Byte32)) -> Self { + Self { + number: inner.0, + hash: inner.1, + } + } +} + +impl From<&ckb_types::core::HeaderView> for BlockNumberAndHash { + fn from(header: &ckb_types::core::HeaderView) -> Self { + Self { + number: header.number(), + hash: header.hash(), + } + } +} + +impl From for BlockNumberAndHash { + fn from(header: ckb_types::core::HeaderView) -> Self { + Self { + number: header.number(), + hash: header.hash(), + } + } +} + +// Compute what height to jump back to with the skip pointer. +fn get_skip_height(height: BlockNumber) -> BlockNumber { + // Turn the lowest '1' bit in the binary representation of a number into a '0'. + fn invert_lowest_one(n: i64) -> i64 { + n & (n - 1) + } + + if height < 2 { + return 0; + } + + // Determine which height to jump back to. Any number strictly lower than height is acceptable, + // but the following expression seems to perform well in simulations (max 110 steps to go back + // up to 2**18 blocks). + if (height & 1) > 0 { + invert_lowest_one(invert_lowest_one(height as i64 - 1)) as u64 + 1 + } else { + invert_lowest_one(height as i64) as u64 + } +} + +pub const SHRINK_THRESHOLD: usize = 300; diff --git a/sync/Cargo.toml b/sync/Cargo.toml index fc72cadf83..f881bc4521 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -27,7 +27,6 @@ ckb-error = { path = "../error", version = "= 0.116.0-pre" } ckb-tx-pool = { path = "../tx-pool", version = "= 0.116.0-pre" } sentry = { version = "0.26.0", optional = true } ckb-constant = { path = "../util/constant", version = "= 0.116.0-pre" } -ckb-async-runtime = { path = "../util/runtime", version = "= 0.116.0-pre" } ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.116.0-pre" } tokio = { version = "1", features = ["sync"] } lru = "0.7.1" @@ -35,10 +34,8 @@ futures = "0.3" governor = "0.3.1" tempfile.workspace = true ckb-systemtime = { path = "../util/systemtime", version = "= 0.116.0-pre" } -bitflags = "1.0" dashmap = "4.0" keyed_priority_queue = "0.3" -sled = "0.34.7" itertools.workspace = true [dev-dependencies] @@ -52,6 +49,7 @@ faux = "^0.1" once_cell = "1.8.0" ckb-systemtime = { path = "../util/systemtime", version = "= 0.116.0-pre", features = ["enable_faketime"] } ckb-proposal-table = { path = "../util/proposal-table", version = "= 0.116.0-pre" } +ckb-logger-service = { path = "../util/logger-service", version = "= 0.116.0-pre" } [features] default = [] diff --git a/sync/src/block_status.rs b/sync/src/block_status.rs deleted file mode 100644 index b417fc79ad..0000000000 --- a/sync/src/block_status.rs +++ /dev/null @@ -1,16 +0,0 @@ -#![allow(clippy::bad_bit_mask)] - -use bitflags::bitflags; - -bitflags! { - pub struct BlockStatus: u32 { - const UNKNOWN = 0; - - const HEADER_VALID = 1; - const BLOCK_RECEIVED = Self::HEADER_VALID.bits | 1 << 1; - const BLOCK_STORED = Self::HEADER_VALID.bits | Self::BLOCK_RECEIVED.bits | 1 << 3; - const BLOCK_VALID = Self::HEADER_VALID.bits | Self::BLOCK_RECEIVED.bits | Self::BLOCK_STORED.bits | 1 << 4; - - const BLOCK_INVALID = 1 << 12; - } -} diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 427880ed0f..e78d3c01a0 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -3,10 +3,8 @@ //! Sync module implement ckb sync protocol as specified here: //! -mod block_status; mod filter; pub(crate) mod net_time_checker; -pub(crate) mod orphan_block_pool; mod relayer; mod status; mod synchronizer; diff --git a/sync/src/relayer/block_transactions_process.rs b/sync/src/relayer/block_transactions_process.rs index 6b1161b36e..fa5522e349 100644 --- a/sync/src/relayer/block_transactions_process.rs +++ b/sync/src/relayer/block_transactions_process.rs @@ -23,7 +23,7 @@ use std::sync::Arc; pub struct BlockTransactionsProcess<'a> { message: packed::BlockTransactionsReader<'a>, relayer: &'a Relayer, - nc: Arc, + nc: Arc, peer: PeerIndex, } @@ -31,7 +31,7 @@ impl<'a> BlockTransactionsProcess<'a> { pub fn new( message: packed::BlockTransactionsReader<'a>, relayer: &'a Relayer, - nc: Arc, + nc: Arc, peer: PeerIndex, ) -> Self { BlockTransactionsProcess { @@ -116,10 +116,9 @@ impl<'a> BlockTransactionsProcess<'a> { match ret { ReconstructionResult::Block(block) => { pending.remove(); - let status = self - .relayer - .accept_block(self.nc.as_ref(), self.peer, block); - return status; + self.relayer + .accept_block(self.nc, self.peer, block, "BlockTransactions"); + return Status::ok(); } ReconstructionResult::Missing(transactions, uncles) => { // We need to get all transactions and uncles that do not exist locally diff --git a/sync/src/relayer/compact_block_process.rs b/sync/src/relayer/compact_block_process.rs index 3bd1d5043c..b46dcca1ef 100644 --- a/sync/src/relayer/compact_block_process.rs +++ b/sync/src/relayer/compact_block_process.rs @@ -1,13 +1,14 @@ -use crate::block_status::BlockStatus; use crate::relayer::compact_block_verifier::CompactBlockVerifier; use crate::relayer::{ReconstructionResult, Relayer}; -use crate::types::{ActiveChain, HeaderIndex, PendingCompactBlockMap}; +use crate::types::{ActiveChain, PendingCompactBlockMap}; use crate::utils::send_message_to; use crate::SyncShared; use crate::{attempt, Status, StatusCode}; use ckb_chain_spec::consensus::Consensus; use ckb_logger::{self, debug_target}; use ckb_network::{CKBProtocolContext, PeerIndex}; +use ckb_shared::block_status::BlockStatus; +use ckb_shared::types::HeaderIndex; use ckb_systemtime::unix_time_as_millis; use ckb_traits::{HeaderFields, HeaderFieldsProvider}; use ckb_types::{ @@ -34,7 +35,7 @@ use std::time::Instant; pub struct CompactBlockProcess<'a> { message: packed::CompactBlockReader<'a>, relayer: &'a Relayer, - nc: Arc, + nc: Arc, peer: PeerIndex, } @@ -42,7 +43,7 @@ impl<'a> CompactBlockProcess<'a> { pub fn new( message: packed::CompactBlockReader<'a>, relayer: &'a Relayer, - nc: Arc, + nc: Arc, peer: PeerIndex, ) -> Self { CompactBlockProcess { @@ -116,16 +117,15 @@ impl<'a> CompactBlockProcess<'a> { >= block.epoch().number() }); shrink_to_fit!(pending_compact_blocks, 20); - let status = self - .relayer - .accept_block(self.nc.as_ref(), self.peer, block); + self.relayer + .accept_block(Arc::clone(&self.nc), self.peer, block, "CompactBlock"); if let Some(metrics) = ckb_metrics::handle() { metrics .ckb_relay_cb_verify_duration .observe(instant.elapsed().as_secs_f64()); } - status + Status::ok() } ReconstructionResult::Missing(transactions, uncles) => { let missing_transactions: Vec = @@ -231,7 +231,7 @@ fn contextual_check( compact_block_header: &HeaderView, shared: &Arc, active_chain: &ActiveChain, - nc: &Arc, + nc: &Arc, peer: PeerIndex, ) -> Status { let block_hash = compact_block_header.hash(); @@ -331,7 +331,7 @@ fn contextual_check( return Status::ignored(); } else { shared - .state() + .shared() .insert_block_status(block_hash.clone(), BlockStatus::BLOCK_INVALID); return StatusCode::CompactBlockHasInvalidHeader .with_context(format!("{block_hash} {err}")); diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 80c5695290..634a795050 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -20,19 +20,23 @@ use self::get_block_transactions_process::GetBlockTransactionsProcess; use self::get_transactions_process::GetTransactionsProcess; use self::transaction_hashes_process::TransactionHashesProcess; use self::transactions_process::TransactionsProcess; -use crate::block_status::BlockStatus; -use crate::types::{ActiveChain, BlockNumberAndHash, SyncShared}; -use crate::utils::{ - is_internal_db_error, metric_ckb_message_bytes, send_message_to, MetricDirection, -}; +use crate::types::{post_sync_process, ActiveChain, SyncShared}; +use crate::utils::{metric_ckb_message_bytes, send_message_to, MetricDirection}; use crate::{Status, StatusCode}; -use ckb_chain::chain::ChainController; +use ckb_chain::VerifyResult; +use ckb_chain::{ChainController, RemoteBlock}; use ckb_constant::sync::BAD_MESSAGE_BAN_TIME; -use ckb_logger::{debug_target, error_target, info_target, trace_target, warn_target}; +use ckb_error::is_internal_db_error; +use ckb_logger::{ + debug, debug_target, error, error_target, info_target, trace_target, warn_target, +}; use ckb_network::{ async_trait, bytes::Bytes, tokio, CKBProtocolContext, CKBProtocolHandler, PeerIndex, SupportProtocols, TargetSession, }; +use ckb_shared::block_status::BlockStatus; +use ckb_shared::types::BlockNumberAndHash; +use ckb_shared::Shared; use ckb_systemtime::unix_time_as_millis; use ckb_tx_pool::service::TxVerificationResult; use ckb_types::{ @@ -49,7 +53,6 @@ use std::time::{Duration, Instant}; pub const TX_PROPOSAL_TOKEN: u64 = 0; pub const ASK_FOR_TXS_TOKEN: u64 = 1; pub const TX_HASHES_TOKEN: u64 = 2; -pub const SEARCH_ORPHAN_POOL_TOKEN: u64 = 3; pub const MAX_RELAY_PEERS: usize = 128; pub const MAX_RELAY_TXS_NUM_PER_BATCH: usize = 32767; @@ -70,7 +73,6 @@ pub enum ReconstructionResult { } /// Relayer protocol handle -#[derive(Clone)] pub struct Relayer { chain: ChainController, pub(crate) shared: Arc, @@ -87,6 +89,7 @@ impl Relayer { // current max rps is 10 (ASK_FOR_TXS_TOKEN / TX_PROPOSAL_TOKEN), 30 is a flexible hard cap with buffer let quota = governor::Quota::per_second(std::num::NonZeroU32::new(30).unwrap()); let rate_limiter = Arc::new(Mutex::new(RateLimiter::keyed(quota))); + Relayer { chain, shared, @@ -283,125 +286,73 @@ impl Relayer { #[allow(clippy::needless_collect)] pub fn accept_block( &self, - nc: &dyn CKBProtocolContext, - peer: PeerIndex, + nc: Arc, + peer_id: PeerIndex, block: core::BlockView, - ) -> Status { + msg_name: &str, + ) { if self .shared() .active_chain() .contains_block_status(&block.hash(), BlockStatus::BLOCK_STORED) { - return Status::ok(); - } - - let boxed: Arc = Arc::new(block); - match self - .shared() - .insert_new_block(&self.chain, Arc::clone(&boxed)) - { - Ok(true) => self.broadcast_compact_block(nc, peer, &boxed), - Ok(false) => debug_target!( - crate::LOG_TARGET_RELAY, - "Relayer accept_block received an uncle block, don't broadcast compact block" - ), - Err(err) => { - if !is_internal_db_error(&err) { - return StatusCode::BlockIsInvalid.with_context(format!( - "{}, error: {}", - boxed.hash(), - err, - )); - } - } + return; } - Status::ok() - } - fn broadcast_compact_block( - &self, - nc: &dyn CKBProtocolContext, - peer: PeerIndex, - boxed: &Arc, - ) { - debug_target!( - crate::LOG_TARGET_RELAY, - "[block_relay] relayer accept_block {} {}", - boxed.header().hash(), - unix_time_as_millis() - ); - let block_hash = boxed.hash(); - self.shared().state().remove_header_view(&block_hash); - let cb = packed::CompactBlock::build_from_block(boxed, &HashSet::new()); - let message = packed::RelayMessage::new_builder().set(cb).build(); - - let selected_peers: Vec = nc - .connected_peers() - .into_iter() - .filter(|target_peer| peer != *target_peer) - .take(MAX_RELAY_PEERS) - .collect(); - if let Err(err) = nc.quick_filter_broadcast( - TargetSession::Multi(Box::new(selected_peers.into_iter())), - message.as_bytes(), - ) { - debug_target!( - crate::LOG_TARGET_RELAY, - "relayer send block when accept block error: {:?}", - err, - ); - } + let block = Arc::new(block); + + let verify_callback = { + let nc: Arc = Arc::clone(&nc); + let block = Arc::clone(&block); + let shared = Arc::clone(self.shared()); + let msg_name = msg_name.to_owned(); + Box::new(move |result: VerifyResult| match result { + Ok(verified) => { + if !verified { + debug!( + "block {}-{} has verified already, won't build compact block and broadcast it", + block.number(), + block.hash() + ); + return; + } - if let Some(p2p_control) = nc.p2p_control() { - let snapshot = self.shared.shared().snapshot(); - let parent_chain_root = { - let mmr = snapshot.chain_root_mmr(boxed.header().number() - 1); - match mmr.get_root() { - Ok(root) => root, - Err(err) => { - error_target!( - crate::LOG_TARGET_RELAY, - "Generate last state to light client failed: {:?}", + build_and_broadcast_compact_block(nc.as_ref(), shared.shared(), peer_id, block); + } + Err(err) => { + error!( + "verify block {}-{} failed: {:?}, won't build compact block and broadcast it", + block.number(), + block.hash(), err - ); + ); + + let is_internal_db_error = is_internal_db_error(&err); + if is_internal_db_error { return; } + + // punish the malicious peer + post_sync_process( + nc.as_ref(), + peer_id, + &msg_name, + StatusCode::BlockIsInvalid.with_context(format!( + "block {} is invalid, reason: {}", + block.hash(), + err + )), + ); } - }; + }) + }; - let tip_header = packed::VerifiableHeader::new_builder() - .header(boxed.header().data()) - .uncles_hash(boxed.calc_uncles_hash()) - .extension(Pack::pack(&boxed.extension())) - .parent_chain_root(parent_chain_root) - .build(); - let light_client_message = { - let content = packed::SendLastState::new_builder() - .last_header(tip_header) - .build(); - packed::LightClientMessage::new_builder() - .set(content) - .build() - }; - let light_client_peers: HashSet = nc - .connected_peers() - .into_iter() - .filter_map(|index| nc.get_peer(index).map(|peer| (index, peer))) - .filter(|(_id, peer)| peer.if_lightclient_subscribed) - .map(|(id, _)| id) - .collect(); - if let Err(err) = p2p_control.filter_broadcast( - TargetSession::Filter(Box::new(move |id| light_client_peers.contains(id))), - SupportProtocols::LightClient.protocol_id(), - light_client_message.as_bytes(), - ) { - debug_target!( - crate::LOG_TARGET_RELAY, - "relayer send last state to light client when accept block, error: {:?}", - err, - ); - } - } + let remote_block = RemoteBlock { + block, + verify_callback, + }; + + self.shared.accept_remote_block(&self.chain, remote_block); } /// Reorganize the full block according to the compact block/txs/uncles @@ -512,7 +463,10 @@ impl Relayer { } } BlockStatus::BLOCK_RECEIVED => { - if let Some(uncle) = self.shared.state().get_orphan_block(&uncle_hash) { + if let Some(uncle) = self + .chain + .get_orphan_block(self.shared().store(), &uncle_hash) + { uncles.push(uncle.as_uncle().data()); } else { debug_target!( @@ -771,6 +725,92 @@ impl Relayer { } } +fn build_and_broadcast_compact_block( + nc: &dyn CKBProtocolContext, + shared: &Shared, + peer: PeerIndex, + block: Arc, +) { + debug_target!( + crate::LOG_TARGET_RELAY, + "[block_relay] relayer accept_block {} {}", + block.header().hash(), + unix_time_as_millis() + ); + let block_hash = block.hash(); + shared.remove_header_view(&block_hash); + let cb = packed::CompactBlock::build_from_block(&block, &HashSet::new()); + let message = packed::RelayMessage::new_builder().set(cb).build(); + + let selected_peers: Vec = nc + .connected_peers() + .into_iter() + .filter(|target_peer| peer != *target_peer) + .take(MAX_RELAY_PEERS) + .collect(); + if let Err(err) = nc.quick_filter_broadcast( + TargetSession::Multi(Box::new(selected_peers.into_iter())), + message.as_bytes(), + ) { + debug_target!( + crate::LOG_TARGET_RELAY, + "relayer send block when accept block error: {:?}", + err, + ); + } + + if let Some(p2p_control) = nc.p2p_control() { + let snapshot = shared.snapshot(); + let parent_chain_root = { + let mmr = snapshot.chain_root_mmr(block.header().number() - 1); + match mmr.get_root() { + Ok(root) => root, + Err(err) => { + error_target!( + crate::LOG_TARGET_RELAY, + "Generate last state to light client failed: {:?}", + err + ); + return; + } + } + }; + + let tip_header = packed::VerifiableHeader::new_builder() + .header(block.header().data()) + .uncles_hash(block.calc_uncles_hash()) + .extension(Pack::pack(&block.extension())) + .parent_chain_root(parent_chain_root) + .build(); + let light_client_message = { + let content = packed::SendLastState::new_builder() + .last_header(tip_header) + .build(); + packed::LightClientMessage::new_builder() + .set(content) + .build() + }; + let light_client_peers: HashSet = nc + .connected_peers() + .into_iter() + .filter_map(|index| nc.get_peer(index).map(|peer| (index, peer))) + .filter(|(_id, peer)| peer.if_lightclient_subscribed) + .map(|(id, _)| id) + .collect(); + if let Err(err) = p2p_control.filter_broadcast( + TargetSession::Filter(Box::new(move |id| light_client_peers.contains(id))), + SupportProtocols::LightClient.protocol_id(), + light_client_message.as_bytes(), + ) { + debug_target!( + crate::LOG_TARGET_RELAY, + "relayer send last state to light client when accept block, error: {:?}", + err, + ); + } + } +} + #[async_trait] impl CKBProtocolHandler for Relayer { async fn init(&mut self, nc: Arc) { @@ -783,10 +823,6 @@ impl CKBProtocolHandler for Relayer { nc.set_notify(Duration::from_millis(300), TX_HASHES_TOKEN) .await .expect("set_notify at init is ok"); - // todo: remove when the asynchronous verification is completed - nc.set_notify(Duration::from_secs(5), SEARCH_ORPHAN_POOL_TOKEN) - .await - .expect("set_notify at init is ok"); } async fn received( @@ -935,9 +971,6 @@ impl CKBProtocolHandler for Relayer { if nc.remove_notify(TX_HASHES_TOKEN).await.is_err() { trace_target!(crate::LOG_TARGET_RELAY, "remove v2 relay notify fail"); } - if nc.remove_notify(SEARCH_ORPHAN_POOL_TOKEN).await.is_err() { - trace_target!(crate::LOG_TARGET_RELAY, "remove v2 relay notify fail"); - } for kv_pair in self.shared().state().peers().state.iter() { let (peer, state) = kv_pair.pair(); if !state.peer_flags.is_2023edition { @@ -957,14 +990,6 @@ impl CKBProtocolHandler for Relayer { } ASK_FOR_TXS_TOKEN => self.ask_for_txs(nc.as_ref()), TX_HASHES_TOKEN => self.send_bulk_of_tx_hashes(nc.as_ref()), - SEARCH_ORPHAN_POOL_TOKEN => { - if !self.shared.state().orphan_pool().is_empty() { - tokio::task::block_in_place(|| { - self.shared.try_search_orphan_pool(&self.chain); - self.shared.periodic_clean_orphan_pool(); - }) - } - } _ => unreachable!(), } trace_target!( diff --git a/sync/src/relayer/tests/compact_block_process.rs b/sync/src/relayer/tests/compact_block_process.rs index 3088aae90c..df4e7491d4 100644 --- a/sync/src/relayer/tests/compact_block_process.rs +++ b/sync/src/relayer/tests/compact_block_process.rs @@ -1,11 +1,12 @@ -use crate::block_status::BlockStatus; use crate::relayer::compact_block_process::CompactBlockProcess; use crate::relayer::tests::helper::{ build_chain, gen_block, new_header_builder, MockProtocolContext, }; use crate::{Status, StatusCode}; -use ckb_chain::chain::ChainService; +use ckb_chain::start_chain_services; use ckb_network::{PeerIndex, SupportProtocols}; +use ckb_shared::block_status::BlockStatus; +use ckb_shared::ChainServicesBuilder; use ckb_store::ChainStore; use ckb_systemtime::unix_time_as_millis; use ckb_tx_pool::{PlugTarget, TxEntry}; @@ -56,7 +57,7 @@ fn test_in_block_status_map() { { relayer .shared - .state() + .shared() .insert_block_status(block.header().hash(), BlockStatus::BLOCK_INVALID); } @@ -76,7 +77,7 @@ fn test_in_block_status_map() { { relayer .shared - .state() + .shared() .insert_block_status(block.header().hash(), BlockStatus::BLOCK_STORED); } @@ -96,7 +97,7 @@ fn test_in_block_status_map() { { relayer .shared - .state() + .shared() .insert_block_status(block.header().hash(), BlockStatus::BLOCK_RECEIVED); } @@ -333,6 +334,8 @@ fn test_send_missing_indexes() { #[test] fn test_accept_block() { + let _log_guard = ckb_logger_service::init_for_test("info,ckb-chain=debug").expect("init log"); + let (relayer, _) = build_chain(5); let parent = { let active_chain = relayer.shared.active_chain(); @@ -379,16 +382,18 @@ fn test_accept_block() { } { - let chain_controller = { - let proposal_window = ckb_proposal_table::ProposalTable::new( - relayer.shared().shared().consensus().tx_proposal_window(), - ); - let chain_service = - ChainService::new(relayer.shared().shared().to_owned(), proposal_window); - chain_service.start::<&str>(None) + let proposal_table = ckb_proposal_table::ProposalTable::new( + relayer.shared().shared().consensus().tx_proposal_window(), + ); + let chain_service_builder = ChainServicesBuilder { + shared: relayer.shared().shared().to_owned(), + proposal_table, }; + + let chain_controller = start_chain_services(chain_service_builder); + chain_controller - .internal_process_block(Arc::new(uncle), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch(Arc::new(uncle), Switch::DISABLE_EXTENSION) .unwrap(); } diff --git a/sync/src/relayer/tests/helper.rs b/sync/src/relayer/tests/helper.rs index d81da762a4..f77bcd3f3f 100644 --- a/sync/src/relayer/tests/helper.rs +++ b/sync/src/relayer/tests/helper.rs @@ -1,6 +1,6 @@ use crate::{Relayer, SyncShared}; use ckb_app_config::NetworkConfig; -use ckb_chain::chain::ChainService; +use ckb_chain::start_chain_services; use ckb_chain_spec::consensus::{build_genesis_epoch_ext, ConsensusBuilder}; use ckb_dao::DaoCalculator; use ckb_dao_utils::genesis_dao_data; @@ -171,10 +171,7 @@ pub(crate) fn build_chain(tip: BlockNumber) -> (Relayer, OutPoint) { let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let chain_controller = { - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - chain_service.start::<&str>(None) - }; + let chain_controller = start_chain_services(pack.take_chain_services_builder()); // Build 1 ~ (tip-1) heights for i in 0..tip { @@ -212,7 +209,7 @@ pub(crate) fn build_chain(tip: BlockNumber) -> (Relayer, OutPoint) { .transaction(cellbase) .build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .expect("processing block should be ok"); } diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index 5ba4fcee8e..d573d7ed38 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -1,12 +1,13 @@ -use crate::block_status::BlockStatus; -use crate::types::{ActiveChain, BlockNumberAndHash, HeaderIndex, HeaderIndexView, IBDState}; +use crate::types::{ActiveChain, IBDState}; use crate::SyncShared; use ckb_constant::sync::{ BLOCK_DOWNLOAD_WINDOW, CHECK_POINT_WINDOW, INIT_BLOCKS_IN_TRANSIT_PER_PEER, - MAX_ORPHAN_POOL_SIZE, }; use ckb_logger::{debug, trace}; +use ckb_metrics::HistogramTimer; use ckb_network::PeerIndex; +use ckb_shared::block_status::BlockStatus; +use ckb_shared::types::{BlockNumberAndHash, HeaderIndex, HeaderIndexView}; use ckb_systemtime::unix_time_as_millis; use ckb_types::packed; use std::cmp::min; @@ -66,9 +67,20 @@ impl BlockFetcher { // If the peer reorganized, our previous last_common_header may not be an ancestor // of its current tip anymore. Go back enough to fix that. - last_common = self - .active_chain - .last_common_ancestor(&last_common, best_known)?; + last_common = { + let now = std::time::Instant::now(); + let last_common_ancestor = self + .active_chain + .last_common_ancestor(&last_common, best_known)?; + debug!( + "last_common_ancestor({:?}, {:?})->{:?} cost {:?}", + last_common, + best_known, + last_common_ancestor, + now.elapsed() + ); + last_common_ancestor + }; self.sync_shared .state() @@ -79,6 +91,21 @@ impl BlockFetcher { } pub fn fetch(self) -> Option>> { + let _trace_timecost: Option = { + ckb_metrics::handle().map(|handle| handle.ckb_sync_block_fetch_duration.start_timer()) + }; + + if self.sync_shared.shared().get_unverified_tip().number() + >= self.sync_shared.active_chain().tip_number() + BLOCK_DOWNLOAD_WINDOW * 9 + { + trace!( + "unverified_tip - tip > BLOCK_DOWNLOAD_WINDOW * 9, skip fetch, unverified_tip: {}, tip: {}", + self.sync_shared.shared().get_unverified_tip().number(), + self.sync_shared.active_chain().tip_number() + ); + return None; + } + if self.reached_inflight_limit() { trace!( "[block_fetcher] inflight count has reached the limit, preventing further downloads from peer {}", @@ -139,55 +166,58 @@ impl BlockFetcher { return None; } - let mut block_download_window = BLOCK_DOWNLOAD_WINDOW; + if matches!(self.ibd, IBDState::In) + && best_known.number() <= self.active_chain.unverified_tip_number() + { + debug!("In IBD mode, Peer {}'s best_known: {} is less or equal than unverified_tip : {}, won't request block from this peer", + self.peer, + best_known.number(), + self.active_chain.unverified_tip_number() + ); + return None; + }; + let state = self.sync_shared.state(); - let mut inflight = state.write_inflight_blocks(); - - // During IBD, if the total block size of the orphan block pool is greater than MAX_ORPHAN_POOL_SIZE, - // we will enter a special download mode. In this mode, the node will only allow downloading - // the tip+1 block to reduce memory usage as quickly as possible. - // - // If there are more than CHECK_POINT_WINDOW blocks(ckb block maximum is 570kb) in - // the orphan block pool, immediately trace the tip + 1 block being downloaded, and - // re-select the target for downloading after timeout. - // - // Also try to send a chunk download request for tip + 1 - if state.orphan_pool().total_size() >= MAX_ORPHAN_POOL_SIZE { - let tip = self.active_chain.tip_number(); - // set download window to 2 - block_download_window = 2; - debug!( - "[Enter special download mode], orphan pool total size = {}, \ - orphan len = {}, inflight_len = {}, tip = {}", - state.orphan_pool().total_size(), - state.orphan_pool().len(), - inflight.total_inflight_count(), - tip - ); - // will remove it's task if timeout - if state.orphan_pool().len() > CHECK_POINT_WINDOW as usize { - inflight.mark_slow_block(tip); + let mut start = { + match self.ibd { + IBDState::In => self.sync_shared.shared().get_unverified_tip().number() + 1, + IBDState::Out => last_common.number() + 1, } - } - - let mut start = last_common.number() + 1; - let mut end = min(best_known.number(), start + block_download_window); + }; + let mut end = min(best_known.number(), start + BLOCK_DOWNLOAD_WINDOW); let n_fetch = min( end.saturating_sub(start) as usize + 1, - inflight.peer_can_fetch_count(self.peer), + state.read_inflight_blocks().peer_can_fetch_count(self.peer), ); let mut fetch = Vec::with_capacity(n_fetch); let now = unix_time_as_millis(); + debug!( + "finding which blocks to fetch, start: {}, end: {}, best_known: {}", + start, + end, + best_known.number(), + ); while fetch.len() < n_fetch && start <= end { let span = min(end - start + 1, (n_fetch - fetch.len()) as u64); // Iterate in range `[start, start+span)` and consider as the next to-fetch candidates. - let mut header = self - .active_chain - .get_ancestor(&best_known.hash(), start + span - 1)?; - let mut status = self.active_chain.get_block_status(&header.hash()); + let mut header: HeaderIndexView = { + match self.ibd { + IBDState::In => self + .active_chain + .get_ancestor_with_unverified(&best_known.hash(), start + span - 1), + IBDState::Out => self + .active_chain + .get_ancestor(&best_known.hash(), start + span - 1), + } + }?; + + let mut status = self + .sync_shared + .active_chain() + .get_block_status(&header.hash()); // Judge whether we should fetch the target block, neither stored nor in-flighted for _ in 0..span { @@ -195,24 +225,38 @@ impl BlockFetcher { let hash = header.hash(); if status.contains(BlockStatus::BLOCK_STORED) { - // If the block is stored, its ancestor must on store - // So we can skip the search of this space directly - self.sync_shared - .state() - .peers() - .set_last_common_header(self.peer, header.number_and_hash()); - end = min(best_known.number(), header.number() + block_download_window); + if status.contains(BlockStatus::BLOCK_VALID) { + // If the block is stored, its ancestor must on store + // So we can skip the search of this space directly + self.sync_shared + .state() + .peers() + .set_last_common_header(self.peer, header.number_and_hash()); + } + + end = min(best_known.number(), header.number() + BLOCK_DOWNLOAD_WINDOW); break; } else if status.contains(BlockStatus::BLOCK_RECEIVED) { // Do not download repeatedly } else if (matches!(self.ibd, IBDState::In) || state.compare_with_pending_compact(&hash, now)) - && inflight.insert(self.peer, (header.number(), hash).into()) + && state + .write_inflight_blocks() + .insert(self.peer, (header.number(), hash).into()) { + debug!( + "block: {}-{} added to inflight, block_status: {:?}", + header.number(), + header.hash(), + status + ); fetch.push(header) } - status = self.active_chain.get_block_status(&parent_hash); + status = self + .sync_shared + .active_chain() + .get_block_status(&parent_hash); header = self .sync_shared .get_header_index_view(&parent_hash, false)?; @@ -226,24 +270,55 @@ impl BlockFetcher { fetch.sort_by_key(|header| header.number()); let tip = self.active_chain.tip_number(); + let unverified_tip = self.active_chain.unverified_tip_number(); let should_mark = fetch.last().map_or(false, |header| { - header.number().saturating_sub(CHECK_POINT_WINDOW) > tip + header.number().saturating_sub(CHECK_POINT_WINDOW) > unverified_tip }); if should_mark { - inflight.mark_slow_block(tip); + state + .write_inflight_blocks() + .mark_slow_block(unverified_tip); + } + + let inflight_total_count = state.read_inflight_blocks().total_inflight_count(); + if let Some(metrics) = ckb_metrics::handle() { + metrics + .ckb_inflight_blocks_count + .set(inflight_total_count as i64); } if fetch.is_empty() { debug!( - "[block fetch empty] fixed_last_common_header = {} \ - best_known_header = {}, tip = {}, inflight_len = {}, \ - inflight_state = {:?}", + "[block fetch empty] peer-{}, fixed_last_common_header = {} \ + best_known_header = {}, [tip/unverified_tip]: [{}/{}], inflight_len = {}", + self.peer, last_common.number(), best_known.number(), tip, - inflight.total_inflight_count(), - *inflight - ) + unverified_tip, + inflight_total_count, + ); + trace!( + "[block fetch empty] peer-{}, inflight_state = {:?}", + self.peer, + *state.read_inflight_blocks() + ); + } else { + let fetch_head = fetch.first().map_or(0_u64, |v| v.number()); + let fetch_last = fetch.last().map_or(0_u64, |v| v.number()); + let inflight_peer_count = state.read_inflight_blocks().peer_inflight_count(self.peer); + debug!( + "request peer-{} for batch blocks: [{}-{}], batch len:{}, [tip/unverified_tip]: [{}/{}], [peer/total inflight count]: [{} / {}], blocks: {}", + self.peer, + fetch_head, + fetch_last, + fetch.len(), + tip, + self.sync_shared.shared().get_unverified_tip().number(), + inflight_peer_count, + inflight_total_count, + fetch.iter().map(|h| h.number().to_string()).collect::>().join(","), + ); } Some( diff --git a/sync/src/synchronizer/block_process.rs b/sync/src/synchronizer/block_process.rs index 3526fb1450..074f0ac4d9 100644 --- a/sync/src/synchronizer/block_process.rs +++ b/sync/src/synchronizer/block_process.rs @@ -1,12 +1,19 @@ -use crate::{synchronizer::Synchronizer, utils::is_internal_db_error, Status, StatusCode}; +use crate::synchronizer::Synchronizer; +use crate::types::post_sync_process; +use crate::StatusCode; +use ckb_chain::RemoteBlock; +use ckb_error::is_internal_db_error; use ckb_logger::debug; -use ckb_network::PeerIndex; +use ckb_network::{CKBProtocolContext, PeerIndex}; +use ckb_types::packed::Byte32; use ckb_types::{packed, prelude::*}; +use std::sync::Arc; pub struct BlockProcess<'a> { message: packed::SendBlockReader<'a>, synchronizer: &'a Synchronizer, - _peer: PeerIndex, + peer: PeerIndex, + nc: Arc, } impl<'a> BlockProcess<'a> { @@ -14,15 +21,67 @@ impl<'a> BlockProcess<'a> { message: packed::SendBlockReader<'a>, synchronizer: &'a Synchronizer, peer: PeerIndex, + nc: Arc, ) -> Self { BlockProcess { message, synchronizer, - _peer: peer, + peer, + nc, } } - pub fn execute(self) -> Status { + pub fn execute(self) -> crate::Status { + let block = Arc::new(self.message.block().to_entity().into_view()); + debug!( + "BlockProcess received block {} {}", + block.number(), + block.hash(), + ); + let shared = self.synchronizer.shared(); + + if shared.new_block_received(&block) { + let verify_callback = { + let nc: Arc = Arc::clone(&self.nc); + let peer_id: PeerIndex = self.peer; + let block_hash: Byte32 = block.hash(); + Box::new(move |verify_result: Result| { + match verify_result { + Ok(_) => {} + Err(err) => { + let is_internal_db_error = is_internal_db_error(&err); + if is_internal_db_error { + return; + } + + // punish the malicious peer + post_sync_process( + nc.as_ref(), + peer_id, + "SendBlock", + StatusCode::BlockIsInvalid.with_context(format!( + "block {} is invalid, reason: {}", + block_hash, err + )), + ); + } + }; + }) + }; + let remote_block = RemoteBlock { + block, + verify_callback, + }; + self.synchronizer + .asynchronous_process_remote_block(remote_block); + } + + // block process is asynchronous, so we only return ignored here + crate::Status::ignored() + } + + #[cfg(test)] + pub fn blocking_execute(self) -> crate::Status { let block = self.message.block().to_entity().into_view(); debug!( "BlockProcess received block {} {}", @@ -30,12 +89,14 @@ impl<'a> BlockProcess<'a> { block.hash(), ); let shared = self.synchronizer.shared(); - let state = shared.state(); - if state.new_block_received(&block) { - if let Err(err) = self.synchronizer.process_new_block(block.clone()) { - if !is_internal_db_error(&err) { - return StatusCode::BlockIsInvalid.with_context(format!( + if shared.new_block_received(&block) { + if let Err(err) = self + .synchronizer + .blocking_process_new_block(block.clone(), self.peer) + { + if !ckb_error::is_internal_db_error(&err) { + return crate::StatusCode::BlockIsInvalid.with_context(format!( "{}, error: {}", block.hash(), err, @@ -43,7 +104,6 @@ impl<'a> BlockProcess<'a> { } } } - - Status::ok() + crate::Status::ok() } } diff --git a/sync/src/synchronizer/get_blocks_process.rs b/sync/src/synchronizer/get_blocks_process.rs index b9670d5f85..ac69b5f8fe 100644 --- a/sync/src/synchronizer/get_blocks_process.rs +++ b/sync/src/synchronizer/get_blocks_process.rs @@ -1,10 +1,10 @@ -use crate::block_status::BlockStatus; use crate::synchronizer::Synchronizer; use crate::utils::send_message_to; use crate::{attempt, Status, StatusCode}; use ckb_constant::sync::{INIT_BLOCKS_IN_TRANSIT_PER_PEER, MAX_HEADERS_LEN}; use ckb_logger::debug; use ckb_network::{CKBProtocolContext, PeerIndex}; +use ckb_shared::block_status::BlockStatus; use ckb_types::{packed, prelude::*}; use std::collections::HashSet; diff --git a/sync/src/synchronizer/get_headers_process.rs b/sync/src/synchronizer/get_headers_process.rs index 3b4b44cf12..12c5041413 100644 --- a/sync/src/synchronizer/get_headers_process.rs +++ b/sync/src/synchronizer/get_headers_process.rs @@ -55,10 +55,10 @@ impl<'a> GetHeadersProcess<'a> { self.peer ); self.send_in_ibd(); - let state = self.synchronizer.shared.state(); - if let Some(flag) = state.peers().get_flag(self.peer) { + let shared = self.synchronizer.shared(); + if let Some(flag) = shared.state().peers().get_flag(self.peer) { if flag.is_outbound || flag.is_whitelist || flag.is_protect { - state.insert_peer_unknown_header_list(self.peer, block_locator_hashes); + shared.insert_peer_unknown_header_list(self.peer, block_locator_hashes); } }; return Status::ignored(); diff --git a/sync/src/synchronizer/headers_process.rs b/sync/src/synchronizer/headers_process.rs index 1cb5d7e19f..c2ae0f7665 100644 --- a/sync/src/synchronizer/headers_process.rs +++ b/sync/src/synchronizer/headers_process.rs @@ -1,4 +1,3 @@ -use crate::block_status::BlockStatus; use crate::synchronizer::Synchronizer; use crate::types::{ActiveChain, SyncShared}; use crate::{Status, StatusCode}; @@ -6,6 +5,7 @@ use ckb_constant::sync::MAX_HEADERS_LEN; use ckb_error::Error; use ckb_logger::{debug, log_enabled, warn, Level}; use ckb_network::{CKBProtocolContext, PeerIndex}; +use ckb_shared::block_status::BlockStatus; use ckb_traits::HeaderFieldsProvider; use ckb_types::{core, packed, prelude::*}; use ckb_verification::{HeaderError, HeaderVerifier}; @@ -281,19 +281,26 @@ impl<'a, DL: HeaderFieldsProvider> HeaderAcceptor<'a, DL> { pub fn accept(&self) -> ValidationResult { let mut result = ValidationResult::default(); - let shared = self.active_chain.shared(); - let state = shared.state(); + let sync_shared = self.active_chain.sync_shared(); + let state = self.active_chain.state(); + let shared = sync_shared.shared(); // FIXME If status == BLOCK_INVALID then return early. But which error // type should we return? let status = self.active_chain.get_block_status(&self.header.hash()); if status.contains(BlockStatus::HEADER_VALID) { - let header_index = shared + let header_index = sync_shared .get_header_index_view( &self.header.hash(), status.contains(BlockStatus::BLOCK_STORED), ) - .expect("header with HEADER_VALID should exist") + .unwrap_or_else(|| { + panic!( + "header {}-{} with HEADER_VALID should exist", + self.header.number(), + self.header.hash() + ) + }) .as_header_index(); state .peers() @@ -307,7 +314,7 @@ impl<'a, DL: HeaderFieldsProvider> HeaderAcceptor<'a, DL> { self.header.number(), self.header.hash(), ); - state.insert_block_status(self.header.hash(), BlockStatus::BLOCK_INVALID); + shared.insert_block_status(self.header.hash(), BlockStatus::BLOCK_INVALID); return result; } @@ -318,7 +325,7 @@ impl<'a, DL: HeaderFieldsProvider> HeaderAcceptor<'a, DL> { self.header.hash(), ); if is_invalid { - state.insert_block_status(self.header.hash(), BlockStatus::BLOCK_INVALID); + shared.insert_block_status(self.header.hash(), BlockStatus::BLOCK_INVALID); } return result; } @@ -329,11 +336,11 @@ impl<'a, DL: HeaderFieldsProvider> HeaderAcceptor<'a, DL> { self.header.number(), self.header.hash(), ); - state.insert_block_status(self.header.hash(), BlockStatus::BLOCK_INVALID); + shared.insert_block_status(self.header.hash(), BlockStatus::BLOCK_INVALID); return result; } - shared.insert_valid_header(self.peer, self.header); + sync_shared.insert_valid_header(self.peer, self.header); result } } diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 18c34204be..f79dada29a 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -20,28 +20,32 @@ pub(crate) use self::get_headers_process::GetHeadersProcess; pub(crate) use self::headers_process::HeadersProcess; pub(crate) use self::in_ibd_process::InIBDProcess; -use crate::block_status::BlockStatus; -use crate::types::{HeaderIndexView, HeadersSyncController, IBDState, Peers, SyncShared}; +use crate::types::{post_sync_process, HeadersSyncController, IBDState, Peers, SyncShared}; use crate::utils::{metric_ckb_message_bytes, send_message_to, MetricDirection}; use crate::{Status, StatusCode}; +use ckb_shared::block_status::BlockStatus; -use ckb_chain::chain::ChainController; +use ckb_chain::{ChainController, RemoteBlock}; use ckb_channel as channel; use ckb_channel::{select, Receiver}; use ckb_constant::sync::{ BAD_MESSAGE_BAN_TIME, CHAIN_SYNC_TIMEOUT, EVICTION_HEADERS_RESPONSE_TIME, INIT_BLOCKS_IN_TRANSIT_PER_PEER, MAX_TIP_AGE, }; -use ckb_error::Error as CKBError; use ckb_logger::{debug, error, info, trace, warn}; +use ckb_metrics::HistogramTimer; use ckb_network::{ async_trait, bytes::Bytes, tokio, CKBProtocolContext, CKBProtocolHandler, PeerIndex, ServiceControl, SupportProtocols, }; +use ckb_shared::types::HeaderIndexView; use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; use ckb_systemtime::unix_time_as_millis; + +#[cfg(test)] +use ckb_types::core; use ckb_types::{ - core::{self, BlockNumber}, + core::BlockNumber, packed::{self, Byte32}, prelude::*, }; @@ -114,9 +118,10 @@ impl BlockFetchCMD { } CanStart::AssumeValidNotFound => { let state = self.sync_shared.state(); + let shared = self.sync_shared.shared(); let best_known = state.shared_best_header_ref(); let number = best_known.number(); - let assume_valid_target: Byte32 = state + let assume_valid_target: Byte32 = shared .assume_valid_target() .as_ref() .map(Pack::pack) @@ -212,6 +217,7 @@ impl BlockFetchCMD { return self.can_start; } + let shared = self.sync_shared.shared(); let state = self.sync_shared.state(); let min_work_reach = |flag: &mut CanStart| { @@ -221,9 +227,9 @@ impl BlockFetchCMD { }; let assume_valid_target_find = |flag: &mut CanStart| { - let mut assume_valid_target = state.assume_valid_target(); + let mut assume_valid_target = shared.assume_valid_target(); if let Some(ref target) = *assume_valid_target { - match state.header_map().get(&target.pack()) { + match shared.header_map().get(&target.pack()) { Some(header) => { *flag = CanStart::Ready; info!("assume valid target found in header_map; CKB will start fetch blocks now"); @@ -310,40 +316,51 @@ impl Synchronizer { fn try_process( &self, - nc: &dyn CKBProtocolContext, + nc: Arc, peer: PeerIndex, message: packed::SyncMessageUnionReader<'_>, ) -> Status { + let _trace_timecost: Option = { + ckb_metrics::handle().map(|handle| { + handle + .ckb_sync_msg_process_duration + .with_label_values(&[message.item_name()]) + .start_timer() + }) + }; + match message { packed::SyncMessageUnionReader::GetHeaders(reader) => { - GetHeadersProcess::new(reader, self, peer, nc).execute() + GetHeadersProcess::new(reader, self, peer, nc.as_ref()).execute() } packed::SyncMessageUnionReader::SendHeaders(reader) => { - HeadersProcess::new(reader, self, peer, nc).execute() + HeadersProcess::new(reader, self, peer, nc.as_ref()).execute() } packed::SyncMessageUnionReader::GetBlocks(reader) => { - GetBlocksProcess::new(reader, self, peer, nc).execute() + GetBlocksProcess::new(reader, self, peer, nc.as_ref()).execute() } packed::SyncMessageUnionReader::SendBlock(reader) => { if reader.check_data() { - BlockProcess::new(reader, self, peer).execute() + BlockProcess::new(reader, self, peer, nc).execute() } else { StatusCode::ProtocolMessageIsMalformed.with_context("SendBlock is invalid") } } - packed::SyncMessageUnionReader::InIBD(_) => InIBDProcess::new(self, peer, nc).execute(), + packed::SyncMessageUnionReader::InIBD(_) => { + InIBDProcess::new(self, peer, nc.as_ref()).execute() + } } } fn process( &self, - nc: &dyn CKBProtocolContext, + nc: Arc, peer: PeerIndex, message: packed::SyncMessageUnionReader<'_>, ) { let item_name = message.item_name(); let item_bytes = message.as_slice().len() as u64; - let status = self.try_process(nc, peer, message); + let status = self.try_process(Arc::clone(&nc), peer, message); metric_ckb_message_bytes( MetricDirection::In, @@ -353,17 +370,7 @@ impl Synchronizer { item_bytes, ); - if let Some(ban_time) = status.should_ban() { - error!( - "Receive {} from {}. Ban {:?} for {}", - item_name, peer, ban_time, status - ); - nc.ban_peer(peer, ban_time, status.to_string()); - } else if status.should_warn() { - warn!("Receive {} from {}, {}", item_name, peer, status); - } else if !status.is_ok() { - debug!("Receive {} from {}, {}", item_name, peer, status); - } + post_sync_process(nc.as_ref(), peer, item_name, status); } /// Get peers info @@ -390,22 +397,45 @@ impl Synchronizer { /// Process a new block sync from other peer //TODO: process block which we don't request - pub fn process_new_block(&self, block: core::BlockView) -> Result { + pub fn asynchronous_process_remote_block(&self, remote_block: RemoteBlock) { + let block_hash = remote_block.block.hash(); + let status = self.shared.active_chain().get_block_status(&block_hash); + // NOTE: Filtering `BLOCK_STORED` but not `BLOCK_RECEIVED`, is for avoiding + // stopping synchronization even when orphan_pool maintains dirty items by bugs. + if status.contains(BlockStatus::BLOCK_STORED) { + error!("Block {} already stored", block_hash); + } else if status.contains(BlockStatus::HEADER_VALID) { + self.shared.accept_remote_block(&self.chain, remote_block); + } else { + debug!( + "Synchronizer process_new_block unexpected status {:?} {}", + status, block_hash, + ); + // TODO which error should we return? + } + } + + #[cfg(test)] + pub fn blocking_process_new_block( + &self, + block: core::BlockView, + _peer_id: PeerIndex, + ) -> Result { let block_hash = block.hash(); let status = self.shared.active_chain().get_block_status(&block_hash); // NOTE: Filtering `BLOCK_STORED` but not `BLOCK_RECEIVED`, is for avoiding // stopping synchronization even when orphan_pool maintains dirty items by bugs. if status.contains(BlockStatus::BLOCK_STORED) { - debug!("Block {} already stored", block_hash); + error!("block {} already stored", block_hash); Ok(false) } else if status.contains(BlockStatus::HEADER_VALID) { - self.shared.insert_new_block(&self.chain, Arc::new(block)) + self.chain.blocking_process_block(Arc::new(block)) } else { debug!( "Synchronizer process_new_block unexpected status {:?} {}", status, block_hash, ); - // TODO which error should we return? + // TODO while error should we return? Ok(false) } } @@ -416,7 +446,7 @@ impl Synchronizer { peer: PeerIndex, ibd: IBDState, ) -> Option>> { - BlockFetcher::new(Arc::to_owned(self.shared()), peer, ibd).fetch() + BlockFetcher::new(Arc::clone(&self.shared), peer, ibd).fetch() } pub(crate) fn on_connected(&self, nc: &dyn CKBProtocolContext, peer: PeerIndex) { @@ -636,10 +666,21 @@ impl Synchronizer { } fn find_blocks_to_fetch(&mut self, nc: &dyn CKBProtocolContext, ibd: IBDState) { - let tip = self.shared.active_chain().tip_number(); + if self.chain.is_verifying_unverified_blocks_on_startup() { + trace!( + "skip find_blocks_to_fetch, ckb_chain is verifying unverified blocks on startup" + ); + return; + } + + let unverified_tip = self.shared.active_chain().unverified_tip_number(); let disconnect_list = { - let mut list = self.shared().state().write_inflight_blocks().prune(tip); + let mut list = self + .shared() + .state() + .write_inflight_blocks() + .prune(unverified_tip); if let IBDState::In = ibd { // best known < tip and in IBD state, and unknown list is empty, // these node can be disconnect @@ -647,7 +688,7 @@ impl Synchronizer { self.shared .state() .peers() - .get_best_known_less_than_tip_and_unknown_empty(tip), + .get_best_known_less_than_tip_and_unknown_empty(unverified_tip), ) }; list @@ -840,7 +881,7 @@ impl CKBProtocolHandler for Synchronizer { } let start_time = Instant::now(); - tokio::task::block_in_place(|| self.process(nc.as_ref(), peer_index, msg)); + tokio::task::block_in_place(|| self.process(nc, peer_index, msg)); debug!( "Process message={}, peer={}, cost={:?}", msg.item_name(), @@ -866,6 +907,7 @@ impl CKBProtocolHandler for Synchronizer { ) { let sync_state = self.shared().state(); sync_state.disconnected(peer_index); + info!("SyncProtocol.disconnected peer={}", peer_index); } async fn notify(&mut self, nc: Arc, token: u64) { diff --git a/sync/src/tests/block_status.rs b/sync/src/tests/block_status.rs index 351b120236..c9a797b20c 100644 --- a/sync/src/tests/block_status.rs +++ b/sync/src/tests/block_status.rs @@ -1,6 +1,6 @@ use std::collections::HashSet; -use crate::block_status::BlockStatus; +use ckb_shared::block_status::BlockStatus; fn all() -> Vec { vec![ diff --git a/sync/src/tests/inflight_blocks.rs b/sync/src/tests/inflight_blocks.rs index 46e6f45437..c2f3fcd11a 100644 --- a/sync/src/tests/inflight_blocks.rs +++ b/sync/src/tests/inflight_blocks.rs @@ -1,5 +1,6 @@ -use crate::types::{BlockNumberAndHash, InflightBlocks}; +use crate::types::InflightBlocks; use ckb_constant::sync::BLOCK_DOWNLOAD_TIMEOUT; +use ckb_shared::types::BlockNumberAndHash; use ckb_types::h256; use ckb_types::prelude::*; use std::collections::HashSet; diff --git a/sync/src/tests/mod.rs b/sync/src/tests/mod.rs index a64e84d4a5..cb6d1ab347 100644 --- a/sync/src/tests/mod.rs +++ b/sync/src/tests/mod.rs @@ -15,7 +15,6 @@ use std::time::Duration; mod block_status; mod inflight_blocks; mod net_time_checker; -mod orphan_block_pool; mod sync_shared; mod synchronizer; diff --git a/sync/src/tests/orphan_block_pool.rs b/sync/src/tests/orphan_block_pool.rs deleted file mode 100644 index f535871b03..0000000000 --- a/sync/src/tests/orphan_block_pool.rs +++ /dev/null @@ -1,153 +0,0 @@ -use ckb_chain_spec::consensus::ConsensusBuilder; -use ckb_systemtime::unix_time_as_millis; -use ckb_types::core::{BlockBuilder, BlockView, EpochNumberWithFraction, HeaderView}; -use ckb_types::prelude::*; -use std::collections::HashSet; -use std::sync::Arc; -use std::thread; - -use crate::orphan_block_pool::OrphanBlockPool; - -fn gen_block(parent_header: &HeaderView) -> BlockView { - let number = parent_header.number() + 1; - BlockBuilder::default() - .parent_hash(parent_header.hash()) - .timestamp(unix_time_as_millis().pack()) - .number(number.pack()) - .epoch(EpochNumberWithFraction::new(number / 1000, number % 1000, 1000).pack()) - .nonce((parent_header.nonce() + 1).pack()) - .build() -} - -#[test] -fn test_remove_blocks_by_parent() { - let consensus = ConsensusBuilder::default().build(); - let block_number = 200; - let mut blocks = Vec::new(); - let mut parent = consensus.genesis_block().header(); - let pool = OrphanBlockPool::with_capacity(200); - let mut total_size = 0; - for _ in 1..block_number { - let new_block = gen_block(&parent); - total_size += new_block.data().total_size(); - blocks.push(new_block.clone()); - pool.insert(new_block.clone()); - parent = new_block.header(); - } - assert_eq!(total_size, pool.total_size()); - - let orphan = pool.remove_blocks_by_parent(&consensus.genesis_block().hash()); - let orphan_set: HashSet = orphan.into_iter().collect(); - let blocks_set: HashSet = blocks.into_iter().collect(); - assert_eq!(orphan_set, blocks_set); - assert_eq!(0, pool.total_size()); -} - -#[test] -fn test_remove_blocks_by_parent_and_get_block_should_not_deadlock() { - let consensus = ConsensusBuilder::default().build(); - let pool = OrphanBlockPool::with_capacity(1024); - let mut header = consensus.genesis_block().header(); - let mut hashes = Vec::new(); - for _ in 1..1024 { - let new_block = gen_block(&header); - pool.insert(new_block.clone()); - header = new_block.header(); - hashes.push(header.hash()); - } - - let pool_arc1 = Arc::new(pool); - let pool_arc2 = Arc::clone(&pool_arc1); - - let thread1 = thread::spawn(move || { - pool_arc1.remove_blocks_by_parent(&consensus.genesis_block().hash()); - }); - - for hash in hashes.iter().rev() { - pool_arc2.get_block(hash); - } - - thread1.join().unwrap(); -} - -#[test] -fn test_leaders() { - let consensus = ConsensusBuilder::default().build(); - let block_number = 20; - let mut blocks = Vec::new(); - let mut parent = consensus.genesis_block().header(); - let pool = OrphanBlockPool::with_capacity(20); - for i in 0..block_number - 1 { - let new_block = gen_block(&parent); - blocks.push(new_block.clone()); - parent = new_block.header(); - if i % 5 != 0 { - pool.insert(new_block.clone()); - } - } - - assert_eq!(pool.len(), 15); - assert_eq!(pool.leaders_len(), 4); - - pool.insert(blocks[5].clone()); - assert_eq!(pool.len(), 16); - assert_eq!(pool.leaders_len(), 3); - - pool.insert(blocks[10].clone()); - assert_eq!(pool.len(), 17); - assert_eq!(pool.leaders_len(), 2); - - // index 0 doesn't in the orphan pool, so do nothing - let orphan = pool.remove_blocks_by_parent(&consensus.genesis_block().hash()); - assert!(orphan.is_empty()); - assert_eq!(pool.len(), 17); - assert_eq!(pool.leaders_len(), 2); - - pool.insert(blocks[0].clone()); - assert_eq!(pool.len(), 18); - assert_eq!(pool.leaders_len(), 2); - - let orphan = pool.remove_blocks_by_parent(&consensus.genesis_block().hash()); - assert_eq!(pool.len(), 3); - assert_eq!(pool.leaders_len(), 1); - - pool.insert(blocks[15].clone()); - assert_eq!(pool.len(), 4); - assert_eq!(pool.leaders_len(), 1); - - let orphan_1 = pool.remove_blocks_by_parent(&blocks[14].hash()); - - let orphan_set: HashSet = orphan.into_iter().chain(orphan_1).collect(); - let blocks_set: HashSet = blocks.into_iter().collect(); - assert_eq!(orphan_set, blocks_set); - assert_eq!(pool.len(), 0); - assert_eq!(pool.leaders_len(), 0); -} - -#[test] -fn test_remove_expired_blocks() { - let consensus = ConsensusBuilder::default().build(); - let block_number = 20; - let mut parent = consensus.genesis_block().header(); - let pool = OrphanBlockPool::with_capacity(block_number); - - let deprecated = EpochNumberWithFraction::new(10, 0, 10); - - for _ in 1..block_number { - let new_block = BlockBuilder::default() - .parent_hash(parent.hash()) - .timestamp(unix_time_as_millis().pack()) - .number((parent.number() + 1).pack()) - .epoch(deprecated.clone().pack()) - .nonce((parent.nonce() + 1).pack()) - .build(); - pool.insert(new_block.clone()); - parent = new_block.header(); - } - assert_eq!(pool.leaders_len(), 1); - - let v = pool.clean_expired_blocks(20_u64); - assert_eq!(v.len(), 19); - assert_eq!(pool.leaders_len(), 0); - assert_eq!(pool.total_size(), 0) -} diff --git a/sync/src/tests/sync_shared.rs b/sync/src/tests/sync_shared.rs index b743a6d59c..16499c86e9 100644 --- a/sync/src/tests/sync_shared.rs +++ b/sync/src/tests/sync_shared.rs @@ -1,14 +1,36 @@ -use crate::block_status::BlockStatus; +#![allow(unused_imports)] +#![allow(dead_code)] + use crate::tests::util::{build_chain, inherit_block}; use crate::SyncShared; -use ckb_chain::chain::ChainService; -use ckb_shared::SharedBuilder; +use ckb_chain::{start_chain_services, RemoteBlock, VerifyResult}; +use ckb_logger::info; +use ckb_shared::block_status::BlockStatus; +use ckb_shared::{Shared, SharedBuilder}; use ckb_store::{self, ChainStore}; use ckb_test_chain_utils::always_success_cellbase; -use ckb_types::core::Capacity; +use ckb_types::core::{BlockBuilder, BlockView, Capacity}; +use ckb_types::packed::Byte32; use ckb_types::prelude::*; +use std::fmt::format; use std::sync::Arc; +fn wait_for_expected_block_status( + shared: &SyncShared, + hash: &Byte32, + expect_status: BlockStatus, +) -> bool { + let now = std::time::Instant::now(); + while now.elapsed().as_secs() < 2 { + let current_status = shared.shared().get_block_status(hash); + if current_status == expect_status { + return true; + } + std::thread::sleep(std::time::Duration::from_micros(100)); + } + false +} + #[test] fn test_insert_new_block() { let (shared, chain) = build_chain(2); @@ -19,10 +41,10 @@ fn test_insert_new_block() { }; assert!(shared - .insert_new_block(&chain, Arc::clone(&new_block)) - .expect("insert valid block"),); + .blocking_insert_new_block(&chain, Arc::clone(&new_block)) + .expect("insert valid block")); assert!(!shared - .insert_new_block(&chain, Arc::clone(&new_block)) + .blocking_insert_new_block(&chain, Arc::clone(&new_block)) .expect("insert duplicated valid block"),); } @@ -42,7 +64,7 @@ fn test_insert_invalid_block() { }; assert!(shared - .insert_new_block(&chain, Arc::clone(&invalid_block)) + .blocking_insert_new_block(&chain, Arc::clone(&invalid_block)) .is_err(),); } @@ -54,10 +76,7 @@ fn test_insert_parent_unknown_block() { .consensus(shared1.consensus().clone()) .build() .unwrap(); - let chain_controller = { - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - chain_service.start::<&str>(None) - }; + let chain_controller = start_chain_services(pack.take_chain_services_builder()); ( SyncShared::new(shared, Default::default(), pack.take_relay_tx_receiver()), chain_controller, @@ -88,38 +107,134 @@ fn test_insert_parent_unknown_block() { let valid_hash = valid_orphan.header().hash(); let invalid_hash = invalid_orphan.header().hash(); let parent_hash = parent.header().hash(); + shared.accept_remote_block( + &chain, + RemoteBlock { + block: Arc::clone(&valid_orphan), + + verify_callback: Box::new(|_: VerifyResult| {}), + }, + ); + shared.accept_remote_block( + &chain, + RemoteBlock { + block: Arc::clone(&invalid_orphan), + verify_callback: Box::new(|_: VerifyResult| {}), + }, + ); + + let wait_for_block_status_match = |hash: &Byte32, expect_status: BlockStatus| -> bool { + let mut status_match = false; + let now = std::time::Instant::now(); + while now.elapsed().as_secs() < 2 { + if shared.active_chain().get_block_status(hash) == expect_status { + status_match = true; + break; + } + std::thread::sleep(std::time::Duration::from_micros(100)); + } + status_match + }; - assert!(!shared - .insert_new_block(&chain, Arc::clone(&valid_orphan)) - .expect("insert orphan block"),); - assert!(!shared - .insert_new_block(&chain, Arc::clone(&invalid_orphan)) - .expect("insert orphan block"),); assert_eq!( shared.active_chain().get_block_status(&valid_hash), BlockStatus::BLOCK_RECEIVED ); + + if shared.active_chain().get_block_status(&invalid_hash) == BlockStatus::BLOCK_RECEIVED { + wait_for_block_status_match(&invalid_hash, BlockStatus::BLOCK_INVALID); + } + + // This block won't pass non_contextual_check, and will be BLOCK_INVALID immediately assert_eq!( shared.active_chain().get_block_status(&invalid_hash), - BlockStatus::BLOCK_RECEIVED + BlockStatus::BLOCK_INVALID ); // After inserting parent of an orphan block + assert!(shared - .insert_new_block(&chain, Arc::clone(&parent)) - .expect("insert parent of orphan block"),); - assert_eq!( - shared.active_chain().get_block_status(&valid_hash), + .blocking_insert_new_block(&chain, Arc::clone(&parent)) + .expect("insert parent of orphan block")); + + assert!(wait_for_block_status_match( + &valid_hash, BlockStatus::BLOCK_VALID - ); - assert_eq!( - shared.active_chain().get_block_status(&invalid_hash), + )); + assert!(wait_for_block_status_match( + &invalid_hash, BlockStatus::BLOCK_INVALID - ); - assert_eq!( - shared.active_chain().get_block_status(&parent_hash), + )); + assert!(wait_for_block_status_match( + &parent_hash, BlockStatus::BLOCK_VALID - ); + )); +} + +#[test] +fn test_insert_child_block_with_stored_but_unverified_parent() { + let (shared1, _) = build_chain(2); + + let block = shared1 + .store() + .get_block(&shared1.active_chain().tip_header().hash()) + .unwrap(); + let parent = { + let parent = shared1 + .store() + .get_block(&block.header().parent_hash()) + .unwrap(); + Arc::new(parent) + }; + + let _logger = ckb_logger_service::init_for_test("info,ckb-chain=debug").expect("init log"); + + let parent_hash = parent.header().hash(); + let child = Arc::new(block); + let child_hash = child.header().hash(); + + let (shared, chain) = { + let (shared, mut pack) = SharedBuilder::with_temp_db() + .consensus(shared1.consensus().clone()) + .build() + .unwrap(); + + let db_txn = shared.store().begin_transaction(); + info!("inserting parent: {}-{}", parent.number(), parent.hash()); + db_txn.insert_block(&parent).expect("insert parent"); + db_txn.commit().expect("commit parent"); + + assert!( + shared.store().get_block(&parent_hash).is_some(), + "parent block should be stored" + ); + + let chain_controller = start_chain_services(pack.take_chain_services_builder()); + + while chain_controller.is_verifying_unverified_blocks_on_startup() { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + + ( + SyncShared::new(shared, Default::default(), pack.take_relay_tx_receiver()), + chain_controller, + ) + }; + + assert!(shared + .blocking_insert_new_block(&chain, Arc::clone(&child)) + .expect("insert child block")); + + assert!(wait_for_expected_block_status( + &shared, + &child_hash, + BlockStatus::BLOCK_VALID + )); + assert!(wait_for_expected_block_status( + &shared, + &parent_hash, + BlockStatus::BLOCK_VALID + )); } #[test] @@ -141,10 +256,10 @@ fn test_switch_valid_fork() { .build(); let arc_block = Arc::new(block.clone()); assert!(fork_shared - .insert_new_block(&fork_chain, Arc::clone(&arc_block)) + .blocking_insert_new_block(&fork_chain, Arc::clone(&arc_block)) .expect("insert fork"),); assert!(shared - .insert_new_block(&chain, arc_block) + .blocking_insert_new_block(&chain, arc_block) .expect("insert fork"),); parent_header = block.header().clone(); valid_fork.push(block); @@ -166,10 +281,10 @@ fn test_switch_valid_fork() { .build(); let arc_block = Arc::new(block.clone()); assert!(fork_shared - .insert_new_block(&fork_chain, Arc::clone(&arc_block)) + .blocking_insert_new_block(&fork_chain, Arc::clone(&arc_block)) .expect("insert fork"),); assert!(shared - .insert_new_block(&chain, arc_block) + .blocking_insert_new_block(&chain, arc_block) .expect("insert fork"),); parent_header = block.header().clone(); valid_fork.push(block); diff --git a/sync/src/tests/synchronizer/basic_sync.rs b/sync/src/tests/synchronizer/basic_sync.rs index 0d1af241b6..b139fdaab1 100644 --- a/sync/src/tests/synchronizer/basic_sync.rs +++ b/sync/src/tests/synchronizer/basic_sync.rs @@ -4,11 +4,12 @@ use crate::synchronizer::{ }; use crate::tests::TestNode; use crate::{SyncShared, Synchronizer}; -use ckb_chain::chain::ChainService; +use ckb_chain::start_chain_services; use ckb_chain_spec::consensus::ConsensusBuilder; use ckb_channel::bounded; use ckb_dao::DaoCalculator; use ckb_dao_utils::genesis_dao_data; +use ckb_logger::info; use ckb_network::SupportProtocols; use ckb_reward_calculator::RewardCalculator; use ckb_shared::{Shared, SharedBuilder}; @@ -32,20 +33,31 @@ const DEFAULT_CHANNEL: usize = 128; #[test] fn basic_sync() { + let _log_guard = ckb_logger_service::init_for_test("debug").expect("init log"); let _faketime_guard = ckb_systemtime::faketime(); _faketime_guard.set_faketime(0); let thread_name = "fake_time=0".to_string(); let (mut node1, shared1) = setup_node(1); + info!("finished setup node1"); let (mut node2, shared2) = setup_node(3); + info!("finished setup node2"); + info!("connnectiong node1 and node2"); node1.connect(&mut node2, SupportProtocols::Sync.protocol_id()); + info!("node1 and node2 connected"); + let now = std::time::Instant::now(); let (signal_tx1, signal_rx1) = bounded(DEFAULT_CHANNEL); - node1.start(thread_name.clone(), signal_tx1, |data| { + node1.start(thread_name.clone(), signal_tx1, move |data| { let msg = packed::SyncMessage::from_compatible_slice(&data) .expect("sync message") .to_enum(); + + assert!( + now.elapsed().as_secs() <= 10, + "node1 should got block(3)'s SendBlock message within 10 seconds" + ); // terminate thread after 3 blocks if let packed::SyncMessageUnionReader::SendBlock(reader) = msg.as_reader() { let block = reader.block().to_entity().into_view(); @@ -61,14 +73,22 @@ fn basic_sync() { // Wait node1 receive block from node2 let _ = signal_rx1.recv(); - node1.stop(); - node2.stop(); - - assert_eq!(shared1.snapshot().tip_number(), 3); - assert_eq!( - shared1.snapshot().tip_number(), - shared2.snapshot().tip_number() - ); + let test_start = std::time::Instant::now(); + while test_start.elapsed().as_secs() < 3 { + info!("node1 tip_number: {}", shared1.snapshot().tip_number()); + if shared1.snapshot().tip_number() == 3 { + assert_eq!(shared1.snapshot().tip_number(), 3); + assert_eq!( + shared1.snapshot().tip_number(), + shared2.snapshot().tip_number() + ); + + node1.stop(); + node2.stop(); + return; + } + } + panic!("node1 and node2 should sync in 3 seconds"); } fn setup_node(height: u64) -> (TestNode, Shared) { @@ -99,8 +119,7 @@ fn setup_node(height: u64) -> (TestNode, Shared) { .build() .unwrap(); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - let chain_controller = chain_service.start::<&str>(None); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); for _i in 0..height { let number = block.header().number() + 1; @@ -167,7 +186,7 @@ fn setup_node(height: u64) -> (TestNode, Shared) { .build(); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block should be OK"); } diff --git a/sync/src/tests/synchronizer/functions.rs b/sync/src/tests/synchronizer/functions.rs index 4f181ba59b..a0c758c695 100644 --- a/sync/src/tests/synchronizer/functions.rs +++ b/sync/src/tests/synchronizer/functions.rs @@ -1,4 +1,4 @@ -use ckb_chain::chain::{ChainController, ChainService}; +use ckb_chain::{start_chain_services, ChainController}; use ckb_chain_spec::consensus::{Consensus, ConsensusBuilder}; use ckb_constant::sync::{CHAIN_SYNC_TIMEOUT, EVICTION_HEADERS_RESPONSE_TIME, MAX_TIP_AGE}; use ckb_dao::DaoCalculator; @@ -8,6 +8,7 @@ use ckb_network::{ SessionType, TargetSession, }; use ckb_reward_calculator::RewardCalculator; +use ckb_shared::types::HeaderIndex; use ckb_shared::{Shared, SharedBuilder, Snapshot}; use ckb_store::ChainStore; use ckb_systemtime::unix_time_as_millis; @@ -36,7 +37,7 @@ use std::{ use crate::{ synchronizer::{BlockFetcher, BlockProcess, GetBlocksProcess, HeadersProcess, Synchronizer}, - types::{HeaderIndex, HeadersSyncController, IBDState, PeerState}, + types::{HeadersSyncController, IBDState, PeerState}, Status, StatusCode, SyncShared, }; @@ -48,8 +49,7 @@ fn start_chain(consensus: Option) -> (ChainController, Shared, Synchr let (shared, mut pack) = builder.build().unwrap(); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - let chain_controller = chain_service.start::<&str>(None); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); let sync_shared = Arc::new(SyncShared::new( shared.clone(), @@ -144,7 +144,7 @@ fn insert_block( let block = gen_block(shared, &parent, &epoch, nonce); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_EXTENSION) .expect("process block ok"); } @@ -179,6 +179,7 @@ fn test_locator() { #[test] fn test_locate_latest_common_block() { + let _log_guard = ckb_logger_service::init_for_test("debug").expect("init log"); let consensus = Consensus::default(); let (chain_controller1, shared1, synchronizer1) = start_chain(Some(consensus.clone())); let (chain_controller2, shared2, synchronizer2) = start_chain(Some(consensus.clone())); @@ -239,10 +240,10 @@ fn test_locate_latest_common_block2() { blocks.push(new_block.clone()); chain_controller1 - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); chain_controller2 - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); parent = new_block.header().to_owned(); } @@ -259,7 +260,7 @@ fn test_locate_latest_common_block2() { let new_block = gen_block(&shared2, &parent, &epoch, i + 100); chain_controller2 - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); parent = new_block.header().to_owned(); } @@ -347,7 +348,7 @@ fn test_process_new_block() { let new_block = gen_block(&shared1, &parent, &epoch, i + 100); chain_controller1 - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); parent = new_block.header().to_owned(); blocks.push(new_block); @@ -356,7 +357,7 @@ fn test_process_new_block() { blocks.into_iter().for_each(|block| { synchronizer .shared() - .insert_new_block(&synchronizer.chain, Arc::new(block)) + .blocking_insert_new_block(&synchronizer.chain, Arc::new(block)) .expect("Insert new block failed"); }); assert_eq!(&chain1_last_block.header(), shared2.snapshot().tip_header()); @@ -384,7 +385,7 @@ fn test_get_locator_response() { blocks.push(new_block.clone()); chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); parent = new_block.header().to_owned(); } @@ -665,8 +666,10 @@ fn test_sync_process() { for block in &fetched_blocks { let block = SendBlockBuilder::default().block(block.data()).build(); + + let nc = Arc::new(mock_network_context(1)); assert_eq!( - BlockProcess::new(block.as_reader(), &synchronizer1, peer1).execute(), + BlockProcess::new(block.as_reader(), &synchronizer1, peer1, nc).blocking_execute(), Status::ok(), ); } @@ -1091,7 +1094,10 @@ fn test_fix_last_common_header() { for number in 1..=main_tip_number { let key = m_(number); let block = graph.get(&key).cloned().unwrap(); - synchronizer.chain.process_block(Arc::new(block)).unwrap(); + synchronizer + .chain + .blocking_process_block(Arc::new(block)) + .unwrap(); } { let nc = mock_network_context(1); @@ -1204,7 +1210,7 @@ fn get_blocks_process() { #[test] fn test_internal_db_error() { - use crate::utils::is_internal_db_error; + use ckb_error::is_internal_db_error; let consensus = Consensus::default(); let mut builder = SharedBuilder::with_temp_db(); @@ -1212,9 +1218,6 @@ fn test_internal_db_error() { let (shared, mut pack) = builder.build().unwrap(); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - let _chain_controller = chain_service.start::<&str>(None); - let sync_shared = Arc::new(SyncShared::new( shared, Default::default(), @@ -1225,7 +1228,7 @@ fn test_internal_db_error() { let block = Arc::new(BlockBuilder::default().build()); // mock process_block - faux::when!(chain_controller.process_block(Arc::clone(&block))).then_return(Err( + faux::when!(chain_controller.blocking_process_block(Arc::clone(&block))).then_return(Err( InternalErrorKind::Database.other("mocked db error").into(), )); @@ -1233,7 +1236,7 @@ fn test_internal_db_error() { let status = synchronizer .shared() - .accept_block(&synchronizer.chain, Arc::clone(&block)); + .blocking_insert_new_block(&synchronizer.chain, Arc::clone(&block)); assert!(is_internal_db_error(&status.err().unwrap())); } diff --git a/sync/src/tests/types.rs b/sync/src/tests/types.rs index 081c95a012..228de50fb2 100644 --- a/sync/src/tests/types.rs +++ b/sync/src/tests/types.rs @@ -1,3 +1,4 @@ +use ckb_shared::types::HeaderIndexView; use ckb_types::{ core::{BlockNumber, EpochNumberWithFraction, HeaderBuilder}, packed::Byte32, @@ -10,7 +11,7 @@ use std::{ sync::atomic::{AtomicUsize, Ordering::Relaxed}, }; -use crate::types::{HeaderIndexView, TtlFilter, FILTER_TTL}; +use crate::types::{TtlFilter, FILTER_TTL}; const SKIPLIST_LENGTH: u64 = 10_000; diff --git a/sync/src/tests/util.rs b/sync/src/tests/util.rs index 3149b80ba5..8f37b7e7bf 100644 --- a/sync/src/tests/util.rs +++ b/sync/src/tests/util.rs @@ -1,5 +1,5 @@ use crate::SyncShared; -use ckb_chain::chain::{ChainController, ChainService}; +use ckb_chain::{start_chain_services, ChainController}; use ckb_dao::DaoCalculator; use ckb_reward_calculator::RewardCalculator; use ckb_shared::{Shared, SharedBuilder, Snapshot}; @@ -19,10 +19,7 @@ pub fn build_chain(tip: BlockNumber) -> (SyncShared, ChainController) { .consensus(always_success_consensus()) .build() .unwrap(); - let chain_controller = { - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - chain_service.start::<&str>(None) - }; + let chain_controller = start_chain_services(pack.take_chain_services_builder()); generate_blocks(&shared, &chain_controller, tip); let sync_shared = SyncShared::new(shared, Default::default(), pack.take_relay_tx_receiver()); (sync_shared, chain_controller) @@ -40,7 +37,7 @@ pub fn generate_blocks( let block = inherit_block(shared, &parent_hash).build(); parent_hash = block.header().hash(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .expect("processing block should be ok"); } } diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index c444f870fa..90179224b5 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1,9 +1,8 @@ -use crate::block_status::BlockStatus; -use crate::orphan_block_pool::OrphanBlockPool; -use crate::utils::is_internal_db_error; use crate::{Status, StatusCode, FAST_INDEX, LOW_INDEX, NORMAL_INDEX, TIME_TRACE_SIZE}; use ckb_app_config::SyncConfig; -use ckb_chain::chain::ChainController; +#[cfg(test)] +use ckb_chain::VerifyResult; +use ckb_chain::{ChainController, RemoteBlock}; use ckb_chain_spec::consensus::{Consensus, MAX_BLOCK_INTERVAL, MIN_BLOCK_INTERVAL}; use ckb_channel::Receiver; use ckb_constant::sync::{ @@ -13,10 +12,14 @@ use ckb_constant::sync::{ MAX_UNKNOWN_TX_HASHES_SIZE, MAX_UNKNOWN_TX_HASHES_SIZE_PER_PEER, POW_INTERVAL, RETRY_ASK_TX_TIMEOUT_INCREASE, SUSPEND_SYNC_TIME, }; -use ckb_error::Error as CKBError; -use ckb_logger::{debug, error, info, trace}; +use ckb_logger::{debug, error, info, trace, warn}; use ckb_network::{CKBProtocolContext, PeerIndex, SupportProtocols}; -use ckb_shared::{shared::Shared, Snapshot}; +use ckb_shared::{ + block_status::BlockStatus, + shared::Shared, + types::{BlockNumberAndHash, HeaderIndex, HeaderIndexView, SHRINK_THRESHOLD}, + Snapshot, +}; use ckb_store::{ChainDB, ChainStore}; use ckb_systemtime::unix_time_as_millis; use ckb_traits::{HeaderFields, HeaderFieldsProvider}; @@ -25,35 +28,27 @@ use ckb_types::{ core::{self, BlockNumber, EpochExt}, packed::{self, Byte32}, prelude::*, - H256, U256, + U256, }; use ckb_util::{shrink_to_fit, Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; -use ckb_verification_traits::Switch; use dashmap::{self, DashMap}; use keyed_priority_queue::{self, KeyedPriorityQueue}; use lru::LruCache; use std::collections::{btree_map::Entry, BTreeMap, HashMap, HashSet}; use std::hash::Hash; -use std::path::{Path, PathBuf}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::{Duration, Instant}; use std::{cmp, fmt, iter}; -mod header_map; - use crate::utils::send_message; -use ckb_types::core::{EpochNumber, EpochNumberWithFraction}; -pub use header_map::HeaderMap; const GET_HEADERS_CACHE_SIZE: usize = 10000; // TODO: Need discussed const GET_HEADERS_TIMEOUT: Duration = Duration::from_secs(15); const FILTER_SIZE: usize = 50000; -const ORPHAN_BLOCK_SIZE: usize = 1024; // 2 ** 13 < 6 * 1800 < 2 ** 14 const ONE_DAY_BLOCK_NUMBER: u64 = 8192; -const SHRINK_THRESHOLD: usize = 300; pub(crate) const FILTER_TTL: u64 = 4 * 60 * 60; // State used to enforce CHAIN_SYNC_TIMEOUT @@ -402,53 +397,6 @@ impl InflightState { } } -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct BlockNumberAndHash { - pub number: BlockNumber, - pub hash: Byte32, -} - -impl BlockNumberAndHash { - pub fn new(number: BlockNumber, hash: Byte32) -> Self { - Self { number, hash } - } - - pub fn number(&self) -> BlockNumber { - self.number - } - - pub fn hash(&self) -> Byte32 { - self.hash.clone() - } -} - -impl From<(BlockNumber, Byte32)> for BlockNumberAndHash { - fn from(inner: (BlockNumber, Byte32)) -> Self { - Self { - number: inner.0, - hash: inner.1, - } - } -} - -impl From<&core::HeaderView> for BlockNumberAndHash { - fn from(header: &core::HeaderView) -> Self { - Self { - number: header.number(), - hash: header.hash(), - } - } -} - -impl From for BlockNumberAndHash { - fn from(header: core::HeaderView) -> Self { - Self { - number: header.number(), - hash: header.hash(), - } - } -} - enum TimeQuantile { MinToFast, FastToNormal, @@ -724,6 +672,14 @@ impl InflightBlocks { trace.remove(key); } remove_key.push(key.clone()); + debug!( + "prune: remove InflightState: remove {}-{} from {}", + key.number, key.hash, value.peer + ); + + if let Some(metrics) = ckb_metrics::handle() { + metrics.ckb_inflight_timeout_count.inc(); + } } } @@ -768,6 +724,10 @@ impl InflightBlocks { d.punish(1); } d.hashes.remove(key); + debug!( + "prune: remove download_schedulers: remove {}-{} from {}", + key.number, key.hash, state.peer + ); }; } @@ -801,21 +761,23 @@ impl InflightBlocks { download_scheduler.hashes.insert(block) } - pub fn remove_by_peer(&mut self, peer: PeerIndex) -> bool { + pub fn remove_by_peer(&mut self, peer: PeerIndex) -> usize { let trace = &mut self.trace_number; let state = &mut self.inflight_states; self.download_schedulers .remove(&peer) .map(|blocks| { + let blocks_count = blocks.hashes.iter().len(); for block in blocks.hashes { state.remove(&block); if !trace.is_empty() { trace.remove(&block); } } + blocks_count }) - .is_some() + .unwrap_or_default() } pub fn remove_by_block(&mut self, block: BlockNumberAndHash) -> bool { @@ -1012,257 +974,6 @@ impl Peers { } } -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct HeaderIndex { - number: BlockNumber, - hash: Byte32, - total_difficulty: U256, -} - -impl HeaderIndex { - pub fn new(number: BlockNumber, hash: Byte32, total_difficulty: U256) -> Self { - HeaderIndex { - number, - hash, - total_difficulty, - } - } - - pub fn number(&self) -> BlockNumber { - self.number - } - - pub fn hash(&self) -> Byte32 { - self.hash.clone() - } - - pub fn total_difficulty(&self) -> &U256 { - &self.total_difficulty - } - - pub fn number_and_hash(&self) -> BlockNumberAndHash { - (self.number(), self.hash()).into() - } - - pub fn is_better_chain(&self, other: &Self) -> bool { - self.is_better_than(other.total_difficulty()) - } - - pub fn is_better_than(&self, other_total_difficulty: &U256) -> bool { - self.total_difficulty() > other_total_difficulty - } -} - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct HeaderIndexView { - hash: Byte32, - number: BlockNumber, - epoch: EpochNumberWithFraction, - timestamp: u64, - parent_hash: Byte32, - total_difficulty: U256, - skip_hash: Option, -} - -impl HeaderIndexView { - pub fn new( - hash: Byte32, - number: BlockNumber, - epoch: EpochNumberWithFraction, - timestamp: u64, - parent_hash: Byte32, - total_difficulty: U256, - ) -> Self { - HeaderIndexView { - hash, - number, - epoch, - timestamp, - parent_hash, - total_difficulty, - skip_hash: None, - } - } - - pub fn hash(&self) -> Byte32 { - self.hash.clone() - } - - pub fn number(&self) -> BlockNumber { - self.number - } - - pub fn epoch(&self) -> EpochNumberWithFraction { - self.epoch - } - - pub fn timestamp(&self) -> u64 { - self.timestamp - } - - pub fn total_difficulty(&self) -> &U256 { - &self.total_difficulty - } - - pub fn parent_hash(&self) -> Byte32 { - self.parent_hash.clone() - } - - pub fn skip_hash(&self) -> Option<&Byte32> { - self.skip_hash.as_ref() - } - - // deserialize from bytes - fn from_slice_should_be_ok(hash: &[u8], slice: &[u8]) -> Self { - let hash = packed::Byte32Reader::from_slice_should_be_ok(hash).to_entity(); - let number = BlockNumber::from_le_bytes(slice[0..8].try_into().expect("stored slice")); - let epoch = EpochNumberWithFraction::from_full_value(u64::from_le_bytes( - slice[8..16].try_into().expect("stored slice"), - )); - let timestamp = u64::from_le_bytes(slice[16..24].try_into().expect("stored slice")); - let parent_hash = packed::Byte32Reader::from_slice_should_be_ok(&slice[24..56]).to_entity(); - let total_difficulty = U256::from_little_endian(&slice[56..88]).expect("stored slice"); - let skip_hash = if slice.len() == 120 { - Some(packed::Byte32Reader::from_slice_should_be_ok(&slice[88..120]).to_entity()) - } else { - None - }; - Self { - hash, - number, - epoch, - timestamp, - parent_hash, - total_difficulty, - skip_hash, - } - } - - // serialize all fields except `hash` to bytes - fn to_vec(&self) -> Vec { - let mut v = Vec::new(); - v.extend_from_slice(self.number.to_le_bytes().as_slice()); - v.extend_from_slice(self.epoch.full_value().to_le_bytes().as_slice()); - v.extend_from_slice(self.timestamp.to_le_bytes().as_slice()); - v.extend_from_slice(self.parent_hash.as_slice()); - v.extend_from_slice(self.total_difficulty.to_le_bytes().as_slice()); - if let Some(ref skip_hash) = self.skip_hash { - v.extend_from_slice(skip_hash.as_slice()); - } - v - } - - pub fn build_skip(&mut self, tip_number: BlockNumber, get_header_view: F, fast_scanner: G) - where - F: Fn(&Byte32, bool) -> Option, - G: Fn(BlockNumber, BlockNumberAndHash) -> Option, - { - if self.number == 0 { - return; - } - self.skip_hash = self - .get_ancestor( - tip_number, - get_skip_height(self.number()), - get_header_view, - fast_scanner, - ) - .map(|header| header.hash()); - } - - pub fn get_ancestor( - &self, - tip_number: BlockNumber, - number: BlockNumber, - get_header_view: F, - fast_scanner: G, - ) -> Option - where - F: Fn(&Byte32, bool) -> Option, - G: Fn(BlockNumber, BlockNumberAndHash) -> Option, - { - if number > self.number() { - return None; - } - - let mut current = self.clone(); - let mut number_walk = current.number(); - while number_walk > number { - let number_skip = get_skip_height(number_walk); - let number_skip_prev = get_skip_height(number_walk - 1); - let store_first = current.number() <= tip_number; - match current.skip_hash { - Some(ref hash) - if number_skip == number - || (number_skip > number - && !(number_skip_prev + 2 < number_skip - && number_skip_prev >= number)) => - { - // Only follow skip if parent->skip isn't better than skip->parent - current = get_header_view(hash, store_first)?; - number_walk = number_skip; - } - _ => { - current = get_header_view(¤t.parent_hash(), store_first)?; - number_walk -= 1; - } - } - if let Some(target) = fast_scanner(number, (current.number(), current.hash()).into()) { - current = target; - break; - } - } - Some(current) - } - - pub fn as_header_index(&self) -> HeaderIndex { - HeaderIndex::new(self.number(), self.hash(), self.total_difficulty().clone()) - } - - pub fn number_and_hash(&self) -> BlockNumberAndHash { - (self.number(), self.hash()).into() - } - - pub fn is_better_than(&self, total_difficulty: &U256) -> bool { - self.total_difficulty() > total_difficulty - } -} - -impl From<(core::HeaderView, U256)> for HeaderIndexView { - fn from((header, total_difficulty): (core::HeaderView, U256)) -> Self { - HeaderIndexView { - hash: header.hash(), - number: header.number(), - epoch: header.epoch(), - timestamp: header.timestamp(), - parent_hash: header.parent_hash(), - total_difficulty, - skip_hash: None, - } - } -} - -// Compute what height to jump back to with the skip pointer. -fn get_skip_height(height: BlockNumber) -> BlockNumber { - // Turn the lowest '1' bit in the binary representation of a number into a '0'. - fn invert_lowest_one(n: i64) -> i64 { - n & (n - 1) - } - - if height < 2 { - return 0; - } - - // Determine which height to jump back to. Any number strictly lower than height is acceptable, - // but the following expression seems to perform well in simulations (max 110 steps to go back - // up to 2**18 blocks). - if (height & 1) > 0 { - invert_lowest_one(invert_lowest_one(height as i64 - 1)) as u64 + 1 - } else { - invert_lowest_one(height as i64) as u64 - } -} - // , Vec)>, timestamp)> pub(crate) type PendingCompactBlockMap = HashMap< Byte32, @@ -1281,25 +992,12 @@ pub struct SyncShared { } impl SyncShared { - /// only use on test + /// Create a SyncShared pub fn new( shared: Shared, sync_config: SyncConfig, tx_relay_receiver: Receiver, ) -> SyncShared { - Self::with_tmpdir::(shared, sync_config, None, tx_relay_receiver) - } - - /// Generate a global sync state through configuration - pub fn with_tmpdir

( - shared: Shared, - sync_config: SyncConfig, - tmpdir: Option

, - tx_relay_receiver: Receiver, - ) -> SyncShared - where - P: AsRef, - { let (total_difficulty, header) = { let snapshot = shared.snapshot(); ( @@ -1308,31 +1006,22 @@ impl SyncShared { ) }; let shared_best_header = RwLock::new((header, total_difficulty).into()); - ckb_logger::info!( + info!( "header_map.memory_limit {}", sync_config.header_map.memory_limit ); - let header_map = HeaderMap::new( - tmpdir, - sync_config.header_map.memory_limit.as_u64() as usize, - shared.async_handle(), - ); let state = SyncState { shared_best_header, - header_map, - block_status_map: DashMap::new(), tx_filter: Mutex::new(TtlFilter::default()), unknown_tx_hashes: Mutex::new(KeyedPriorityQueue::new()), peers: Peers::default(), pending_get_block_proposals: DashMap::new(), pending_compact_blocks: Mutex::new(HashMap::default()), - orphan_block_pool: OrphanBlockPool::with_capacity(ORPHAN_BLOCK_SIZE), inflight_proposals: DashMap::new(), inflight_blocks: RwLock::new(InflightBlocks::default()), pending_get_headers: RwLock::new(LruCache::new(GET_HEADERS_CACHE_SIZE)), tx_relay_receiver, - assume_valid_target: Mutex::new(sync_config.assume_valid_target), min_chain_work: sync_config.min_chain_work, }; @@ -1350,7 +1039,7 @@ impl SyncShared { /// Get snapshot with current chain pub fn active_chain(&self) -> ActiveChain { ActiveChain { - shared: self.clone(), + sync_shared: self.clone(), snapshot: Arc::clone(&self.shared.snapshot()), } } @@ -1370,128 +1059,29 @@ impl SyncShared { self.shared.consensus() } - /// Insert new block to chain store - pub fn insert_new_block( + // Only used by unit test + // Blocking insert a new block, return the verify result + #[cfg(test)] + pub(crate) fn blocking_insert_new_block( &self, chain: &ChainController, block: Arc, - ) -> Result { - // Insert the given block into orphan_block_pool if its parent is not found - if !self.is_stored(&block.parent_hash()) { - debug!( - "insert new orphan block {} {}", - block.header().number(), - block.header().hash() - ); - self.state.insert_orphan_block((*block).clone()); - return Ok(false); - } - - // Attempt to accept the given block if its parent already exist in database - let ret = self.accept_block(chain, Arc::clone(&block)); - if ret.is_err() { - debug!("accept block {:?} {:?}", block, ret); - return ret; - } - - // The above block has been accepted. Attempt to accept its descendant blocks in orphan pool. - // The returned blocks of `remove_blocks_by_parent` are in topology order by parents - self.try_search_orphan_pool(chain); - ret - } - - /// Try to find blocks from the orphan block pool that may no longer be orphan - pub fn try_search_orphan_pool(&self, chain: &ChainController) { - let leaders = self.state.orphan_pool().clone_leaders(); - debug!("orphan pool leader parents hash len: {}", leaders.len()); - - for hash in leaders { - if self.state.orphan_pool().is_empty() { - break; - } - if self.is_stored(&hash) { - let descendants = self.state.remove_orphan_by_parent(&hash); - debug!( - "attempting to accept {} descendant orphan blocks with existing parents hash", - descendants.len() - ); - for block in descendants { - // If we can not find the block's parent in database, that means it was failed to accept - // its parent, so we treat it as an invalid block as well. - if !self.is_stored(&block.parent_hash()) { - debug!( - "parent-unknown orphan block, block: {}, {}, parent: {}", - block.header().number(), - block.header().hash(), - block.header().parent_hash(), - ); - continue; - } - - let block = Arc::new(block); - if let Err(err) = self.accept_block(chain, Arc::clone(&block)) { - debug!( - "accept descendant orphan block {} error {:?}", - block.header().hash(), - err - ); - } - } - } - } - } - - /// Cleanup orphan_pool, - /// Remove blocks whose epoch is 6 (EXPIRED_EPOCH) epochs behind the current epoch. - pub(crate) fn periodic_clean_orphan_pool(&self) { - let hashes = self - .state - .clean_expired_blocks(self.active_chain().epoch_ext().number()); - for hash in hashes { - self.state.remove_header_view(&hash); - } + ) -> VerifyResult { + chain.blocking_process_block(block) } - pub(crate) fn accept_block( - &self, - chain: &ChainController, - block: Arc, - ) -> Result { - let ret = { - let mut assume_valid_target = self.state.assume_valid_target(); - if let Some(ref target) = *assume_valid_target { - // if the target has been reached, delete it - let switch = if target == &Unpack::::unpack(&core::BlockView::hash(&block)) { - assume_valid_target.take(); - info!("assume valid target reached; CKB will do full verification from now on"); - Switch::NONE - } else { - Switch::DISABLE_SCRIPT - }; - - chain.internal_process_block(Arc::clone(&block), switch) - } else { - chain.process_block(Arc::clone(&block)) - } - }; - if let Err(ref error) = ret { - if !is_internal_db_error(error) { - error!("accept block {:?} {}", block, error); - self.state - .insert_block_status(block.header().hash(), BlockStatus::BLOCK_INVALID); + pub(crate) fn accept_remote_block(&self, chain: &ChainController, remote_block: RemoteBlock) { + { + let entry = self + .shared() + .block_status_map() + .entry(remote_block.block.header().hash()); + if let dashmap::mapref::entry::Entry::Vacant(entry) = entry { + entry.insert(BlockStatus::BLOCK_RECEIVED); } - } else { - // Clear the newly inserted block from block_status_map. - // - // We don't know whether the actual block status is BLOCK_VALID or BLOCK_INVALID. - // So we just simply remove the corresponding in-memory block status, - // and the next time `get_block_status` would acquire the real-time - // status via fetching block_ext from the database. - self.state.remove_block_status(&block.as_ref().hash()); - self.state.remove_header_view(&block.as_ref().hash()); } - ret + chain.asynchronous_process_remote_block(remote_block) } /// Sync a new valid header, try insert to sync state @@ -1534,7 +1124,7 @@ impl SyncShared { } }, ); - self.state.header_map.insert(header_view.clone()); + self.shared.header_map().insert(header_view.clone()); self.state .peers() .may_set_best_known_header(peer, header_view.as_header_index()); @@ -1555,9 +1145,9 @@ impl SyncShared { .get_block_ext(hash) .map(|block_ext| (header, block_ext.total_difficulty).into()) }) - .or_else(|| self.state.header_map.get(hash)) + .or_else(|| self.shared.header_map().get(hash)) } else { - self.state.header_map.get(hash).or_else(|| { + self.shared.header_map().get(hash).or_else(|| { store.get_block_header(hash).and_then(|header| { store .get_block_ext(hash) @@ -1577,12 +1167,61 @@ impl SyncShared { pub fn get_epoch_ext(&self, hash: &Byte32) -> Option { self.store().get_block_epoch(hash) } + + /// Insert peer's unknown_header_list + pub fn insert_peer_unknown_header_list(&self, pi: PeerIndex, header_list: Vec) { + // update peer's unknown_header_list only once + if self.state().peers.unknown_header_list_is_empty(pi) { + // header list is an ordered list, sorted from highest to lowest, + // so here you discard and exit early + for hash in header_list { + if let Some(header) = self.shared().header_map().get(&hash) { + self.state() + .peers + .may_set_best_known_header(pi, header.as_header_index()); + break; + } else { + self.state().peers.insert_unknown_header_hash(pi, hash) + } + } + } + } + + /// Return true when the block is that we have requested and received first time. + pub fn new_block_received(&self, block: &core::BlockView) -> bool { + if !self + .state() + .write_inflight_blocks() + .remove_by_block((block.number(), block.hash()).into()) + { + return false; + } + + let status = self.active_chain().get_block_status(&block.hash()); + debug!( + "new_block_received {}-{}, status: {:?}", + block.number(), + block.hash(), + status + ); + if !BlockStatus::HEADER_VALID.eq(&status) { + return false; + } + + if let dashmap::mapref::entry::Entry::Vacant(status) = + self.shared().block_status_map().entry(block.hash()) + { + status.insert(BlockStatus::BLOCK_RECEIVED); + return true; + } + false + } } impl HeaderFieldsProvider for SyncShared { fn get_header_fields(&self, hash: &Byte32) -> Option { - self.state - .header_map + self.shared + .header_map() .get(hash) .map(|header| HeaderFields { hash: header.hash(), @@ -1670,8 +1309,6 @@ impl PartialOrd for UnknownTxHashPriority { pub struct SyncState { /* Status irrelevant to peers */ shared_best_header: RwLock, - header_map: HeaderMap, - block_status_map: DashMap, tx_filter: Mutex>, // The priority is ordering by timestamp (reversed), means do not ask the tx before this timestamp (timeout). @@ -1684,7 +1321,7 @@ pub struct SyncState { pending_get_block_proposals: DashMap>, pending_get_headers: RwLock>, pending_compact_blocks: Mutex, - orphan_block_pool: OrphanBlockPool, + // orphan_block_pool: OrphanBlockPool, /* In-flight items for which we request to peers, but not got the responses yet */ inflight_proposals: DashMap, @@ -1692,15 +1329,10 @@ pub struct SyncState { /* cached for sending bulk */ tx_relay_receiver: Receiver, - assume_valid_target: Mutex>, min_chain_work: U256, } impl SyncState { - pub fn assume_valid_target(&self) -> MutexGuard> { - self.assume_valid_target.lock() - } - pub fn min_chain_work(&self) -> &U256 { &self.min_chain_work } @@ -1753,10 +1385,6 @@ impl SyncState { self.shared_best_header.read() } - pub fn header_map(&self) -> &HeaderMap { - &self.header_map - } - pub fn may_set_shared_best_header(&self, header: HeaderIndexView) { if !header.is_better_than(self.shared_best_header.read().total_difficulty()) { return; @@ -1768,10 +1396,6 @@ impl SyncState { *self.shared_best_header.write() = header; } - pub fn remove_header_view(&self, hash: &Byte32) { - self.header_map.remove(hash); - } - pub(crate) fn suspend_sync(&self, peer_state: &mut PeerState) { if peer_state.sync_started() { assert_ne!( @@ -1873,7 +1497,7 @@ impl SyncState { || unknown_tx_hashes.len() >= self.peers.state.len() * MAX_UNKNOWN_TX_HASHES_SIZE_PER_PEER { - ckb_logger::warn!( + warn!( "unknown_tx_hashes is too long, len: {}", unknown_tx_hashes.len() ); @@ -1910,19 +1534,6 @@ impl SyncState { self.unknown_tx_hashes.lock() } - // Return true when the block is that we have requested and received first time. - pub fn new_block_received(&self, block: &core::BlockView) -> bool { - if self - .write_inflight_blocks() - .remove_by_block((block.number(), block.hash()).into()) - { - self.insert_block_status(block.hash(), BlockStatus::BLOCK_RECEIVED); - true - } else { - false - } - } - pub fn insert_inflight_proposals( &self, ids: Vec, @@ -1961,33 +1572,6 @@ impl SyncState { self.inflight_proposals.contains_key(proposal_id) } - pub fn insert_orphan_block(&self, block: core::BlockView) { - self.insert_block_status(block.hash(), BlockStatus::BLOCK_RECEIVED); - self.orphan_block_pool.insert(block); - } - - pub fn remove_orphan_by_parent(&self, parent_hash: &Byte32) -> Vec { - let blocks = self.orphan_block_pool.remove_blocks_by_parent(parent_hash); - blocks.iter().for_each(|block| { - self.block_status_map.remove(&block.hash()); - }); - shrink_to_fit!(self.block_status_map, SHRINK_THRESHOLD); - blocks - } - - pub fn orphan_pool(&self) -> &OrphanBlockPool { - &self.orphan_block_pool - } - - pub fn insert_block_status(&self, block_hash: Byte32, status: BlockStatus) { - self.block_status_map.insert(block_hash, status); - } - - pub fn remove_block_status(&self, block_hash: &Byte32) { - self.block_status_map.remove(block_hash); - shrink_to_fit!(self.block_status_map, SHRINK_THRESHOLD); - } - pub fn drain_get_block_proposals( &self, ) -> DashMap> { @@ -2010,47 +1594,40 @@ impl SyncState { // TODO: record peer's connection duration (disconnect time - connect established time) // and report peer's connection duration to ckb_metrics pub fn disconnected(&self, pi: PeerIndex) { - self.write_inflight_blocks().remove_by_peer(pi); - self.peers().disconnected(pi); - } - - pub fn get_orphan_block(&self, block_hash: &Byte32) -> Option { - self.orphan_block_pool.get_block(block_hash) - } - - pub fn clean_expired_blocks(&self, epoch: EpochNumber) -> Vec { - self.orphan_block_pool.clean_expired_blocks(epoch) - } - - pub fn insert_peer_unknown_header_list(&self, pi: PeerIndex, header_list: Vec) { - // update peer's unknown_header_list only once - if self.peers.unknown_header_list_is_empty(pi) { - // header list is an ordered list, sorted from highest to lowest, - // so here you discard and exit early - for hash in header_list { - if let Some(header) = self.header_map.get(&hash) { - self.peers - .may_set_best_known_header(pi, header.as_header_index()); - break; - } else { - self.peers.insert_unknown_header_hash(pi, hash) - } - } + let removed_inflight_blocks_count = self.write_inflight_blocks().remove_by_peer(pi); + if removed_inflight_blocks_count > 0 { + debug!( + "disconnected {}, remove {} inflight blocks", + pi, removed_inflight_blocks_count + ) } + self.peers().disconnected(pi); } } /** ActiveChain captures a point-in-time view of indexed chain of blocks. */ #[derive(Clone)] pub struct ActiveChain { - shared: SyncShared, + sync_shared: SyncShared, snapshot: Arc, } #[doc(hidden)] impl ActiveChain { + pub(crate) fn sync_shared(&self) -> &SyncShared { + &self.sync_shared + } + + pub fn shared(&self) -> &Shared { + self.sync_shared.shared() + } + fn store(&self) -> &ChainDB { - self.shared.store() + self.sync_shared.store() + } + + pub fn state(&self) -> &SyncState { + self.sync_shared.state() } fn snapshot(&self) -> &Snapshot { @@ -2088,10 +1665,6 @@ impl ActiveChain { .unwrap_or_default() } - pub fn shared(&self) -> &SyncShared { - &self.shared - } - pub fn total_difficulty(&self) -> &U256 { self.snapshot.total_difficulty() } @@ -2115,30 +1688,76 @@ impl ActiveChain { pub fn is_main_chain(&self, hash: &packed::Byte32) -> bool { self.snapshot.is_main_chain(hash) } + pub fn is_unverified_chain(&self, hash: &packed::Byte32) -> bool { + self.store().get_block_epoch_index(hash).is_some() + } pub fn is_initial_block_download(&self) -> bool { - self.shared.shared().is_initial_block_download() + self.shared().is_initial_block_download() + } + pub fn unverified_tip_header(&self) -> HeaderIndex { + self.shared().get_unverified_tip() + } + + pub fn unverified_tip_hash(&self) -> Byte32 { + self.unverified_tip_header().hash() + } + + pub fn unverified_tip_number(&self) -> BlockNumber { + self.unverified_tip_header().number() } pub fn get_ancestor(&self, base: &Byte32, number: BlockNumber) -> Option { - let tip_number = self.tip_number(); - self.shared + self.get_ancestor_internal(base, number, false) + } + + pub fn get_ancestor_with_unverified( + &self, + base: &Byte32, + number: BlockNumber, + ) -> Option { + self.get_ancestor_internal(base, number, true) + } + + fn get_ancestor_internal( + &self, + base: &Byte32, + number: BlockNumber, + with_unverified: bool, + ) -> Option { + let tip_number = { + if with_unverified { + self.unverified_tip_number() + } else { + self.tip_number() + } + }; + + let block_is_on_chain_fn = |hash: &Byte32| { + if with_unverified { + self.is_unverified_chain(hash) + } else { + self.is_main_chain(hash) + } + }; + + let get_header_view_fn = |hash: &Byte32, store_first: bool| { + self.sync_shared.get_header_index_view(hash, store_first) + }; + + let fast_scanner_fn = |number: BlockNumber, current: BlockNumberAndHash| { + // shortcut to return an ancestor block + if current.number <= tip_number && block_is_on_chain_fn(¤t.hash) { + self.get_block_hash(number) + .and_then(|hash| self.sync_shared.get_header_index_view(&hash, true)) + } else { + None + } + }; + + self.sync_shared .get_header_index_view(base, false)? - .get_ancestor( - tip_number, - number, - |hash, store_first| self.shared.get_header_index_view(hash, store_first), - |number, current| { - // shortcut to return an ancestor block - if current.number <= tip_number && self.snapshot().is_main_chain(¤t.hash) - { - self.get_block_hash(number) - .and_then(|hash| self.shared.get_header_index_view(&hash, true)) - } else { - None - } - }, - ) + .get_ancestor(tip_number, number, get_header_view_fn, fast_scanner_fn) } pub fn get_locator(&self, start: BlockNumberAndHash) -> Vec { @@ -2184,7 +1803,7 @@ impl ActiveChain { } // always include genesis hash if index != 0 { - locator.push(self.shared.consensus().genesis_hash()); + locator.push(self.sync_shared.consensus().genesis_hash()); } break; } @@ -2234,7 +1853,7 @@ impl ActiveChain { } let locator_hash = locator.last().expect("empty checked"); - if locator_hash != &self.shared.consensus().genesis_hash() { + if locator_hash != &self.sync_shared.consensus().genesis_hash() { return None; } @@ -2252,11 +1871,11 @@ impl ActiveChain { if let Some(header) = locator .get(index - 1) - .and_then(|hash| self.shared.store().get_block_header(hash)) + .and_then(|hash| self.sync_shared.store().get_block_header(hash)) { let mut block_hash = header.data().raw().parent_hash(); loop { - let block_header = match self.shared.store().get_block_header(&block_hash) { + let block_header = match self.sync_shared.store().get_block_header(&block_hash) { None => break latest_common, Some(block_header) => block_header, }; @@ -2285,7 +1904,7 @@ impl ActiveChain { (block_number + 1..max_height) .filter_map(|block_number| self.snapshot.get_block_hash(block_number)) .take_while(|block_hash| block_hash != hash_stop) - .filter_map(|block_hash| self.shared.store().get_block_header(&block_hash)) + .filter_map(|block_hash| self.sync_shared.store().get_block_header(&block_hash)) .collect() } @@ -2296,8 +1915,7 @@ impl ActiveChain { block_number_and_hash: BlockNumberAndHash, ) { if let Some(last_time) = self - .shared() - .state + .state() .pending_get_headers .write() .get(&(peer, block_number_and_hash.hash())) @@ -2315,8 +1933,7 @@ impl ActiveChain { ); } } - self.shared() - .state() + self.state() .pending_get_headers .write() .put((peer, block_number_and_hash.hash()), Instant::now()); @@ -2336,25 +1953,7 @@ impl ActiveChain { } pub fn get_block_status(&self, block_hash: &Byte32) -> BlockStatus { - match self.shared().state().block_status_map.get(block_hash) { - Some(status_ref) => *status_ref.value(), - None => { - if self.shared().state().header_map.contains_key(block_hash) { - BlockStatus::HEADER_VALID - } else { - let verified = self - .snapshot - .get_block_ext(block_hash) - .map(|block_ext| block_ext.verified); - match verified { - None => BlockStatus::UNKNOWN, - Some(None) => BlockStatus::BLOCK_STORED, - Some(Some(true)) => BlockStatus::BLOCK_VALID, - Some(Some(false)) => BlockStatus::BLOCK_INVALID, - } - } - } - } + self.shared().get_block_status(block_hash) } pub fn contains_block_status(&self, block_hash: &Byte32, status: BlockStatus) -> bool { @@ -2388,3 +1987,22 @@ impl From for bool { } } } + +pub(crate) fn post_sync_process( + nc: &dyn CKBProtocolContext, + peer: PeerIndex, + item_name: &str, + status: Status, +) { + if let Some(ban_time) = status.should_ban() { + error!( + "Receive {} from {}. Ban {:?} for {}", + item_name, peer, ban_time, status + ); + nc.ban_peer(peer, ban_time, status.to_string()); + } else if status.should_warn() { + warn!("Receive {} from {}, {}", item_name, peer, status); + } else if !status.is_ok() { + debug!("Receive {} from {}, {}", item_name, peer, status); + } +} diff --git a/sync/src/utils.rs b/sync/src/utils.rs index fac6e7ef05..c0949de0fd 100644 --- a/sync/src/utils.rs +++ b/sync/src/utils.rs @@ -1,5 +1,4 @@ use crate::{Status, StatusCode}; -use ckb_error::{Error as CKBError, ErrorKind, InternalError, InternalErrorKind}; use ckb_logger::error; use ckb_network::{CKBProtocolContext, PeerIndex, ProtocolId, SupportProtocols}; use ckb_types::packed::{RelayMessageReader, SyncMessageReader}; @@ -157,25 +156,3 @@ fn protocol_name(protocol_id: ProtocolId) -> String { } } } - -/// return whether the error's kind is `InternalErrorKind::Database` -/// -/// ### Panic -/// -/// Panic if the error kind is `InternalErrorKind::DataCorrupted`. -/// If the database is corrupted, panic is better than handle it silently. -pub(crate) fn is_internal_db_error(error: &CKBError) -> bool { - if error.kind() == ErrorKind::Internal { - let error_kind = error - .downcast_ref::() - .expect("error kind checked") - .kind(); - if error_kind == InternalErrorKind::DataCorrupted { - panic!("{}", error) - } else { - return error_kind == InternalErrorKind::Database - || error_kind == InternalErrorKind::System; - } - } - false -} diff --git a/test/Cargo.toml b/test/Cargo.toml index 3223b7a9b8..cc26a72bc5 100644 --- a/test/Cargo.toml +++ b/test/Cargo.toml @@ -29,6 +29,9 @@ ckb-logger-config = { path = "../util/logger-config", version = "= 0.116.0-pre" ckb-logger-service = { path = "../util/logger-service", version = "= 0.116.0-pre" } ckb-error = { path = "../error", version = "= 0.116.0-pre" } ckb-constant = { path = "../util/constant", version = "= 0.116.0-pre" } +ckb-db = { path = "../db", version = "= 0.116.0-pre" } +ckb-store = { path = "../store", version = "= 0.116.0-pre" } +ckb-shared = { path = "../shared", version = "= 0.116.0-pre" } tempfile = "3" reqwest = { version = "=0.11.20", features = ["blocking", "json"] } rand = "0.7" diff --git a/test/src/main.rs b/test/src/main.rs index cef9c5a673..684798c479 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -398,6 +398,8 @@ fn all_specs() -> Vec> { Box::new(BlockSyncNonAncestorBestBlocks), Box::new(RequestUnverifiedBlocks), Box::new(SyncTimeout), + Box::new(SyncChurn), + Box::new(SyncInvalid), Box::new(GetBlockFilterCheckPoints), Box::new(GetBlockFilterHashes), Box::new(GetBlockFilters), @@ -587,6 +589,7 @@ fn all_specs() -> Vec> { Box::new(CheckVmVersion1), Box::new(CheckVmVersion2), Box::new(CheckVmBExtension), + Box::new(RandomlyKill), ]; specs.shuffle(&mut thread_rng()); specs diff --git a/test/src/net.rs b/test/src/net.rs index 56c4f5676e..4863c46792 100644 --- a/test/src/net.rs +++ b/test/src/net.rs @@ -140,7 +140,7 @@ impl Net { let protocol_id = protocol.protocol_id(); let peer_index = self .receivers - .get(node_id) + .get(&node_id) .map(|(peer_index, _)| *peer_index) .unwrap_or_else(|| panic!("not connected peer {}", node.p2p_address())); self.controller() @@ -156,7 +156,7 @@ impl Net { let node_id = node.node_id(); let (peer_index, receiver) = self .receivers - .get(node_id) + .get(&node_id) .unwrap_or_else(|| panic!("not connected peer {}", node.p2p_address())); let net_message = receiver.recv_timeout(timeout)?; info!( diff --git a/test/src/node.rs b/test/src/node.rs index 650c43533d..338a0f6fbf 100644 --- a/test/src/node.rs +++ b/test/src/node.rs @@ -2,14 +2,17 @@ use crate::global::binary; use crate::rpc::RpcClient; use crate::utils::{find_available_port, temp_path, wait_until}; use crate::{SYSTEM_CELL_ALWAYS_FAILURE_INDEX, SYSTEM_CELL_ALWAYS_SUCCESS_INDEX}; -use ckb_app_config::CKBAppConfig; +use ckb_app_config::{AppConfig, CKBAppConfig, ExitCode}; use ckb_chain_spec::consensus::Consensus; use ckb_chain_spec::ChainSpec; use ckb_error::AnyError; use ckb_jsonrpc_types::{BlockFilter, BlockTemplate, TxPoolInfo}; use ckb_jsonrpc_types::{PoolTxDetailInfo, TxStatus}; use ckb_logger::{debug, error, info}; +use ckb_network::multiaddr::Multiaddr; use ckb_resource::Resource; +use ckb_shared::shared_builder::open_or_create_db; +use ckb_store::ChainDB; use ckb_types::{ bytes, core::{ @@ -19,19 +22,19 @@ use ckb_types::{ packed::{Block, Byte32, CellDep, CellInput, CellOutput, CellOutputBuilder, OutPoint, Script}, prelude::*, }; -use std::borrow::Borrow; -use std::collections::HashSet; -use std::convert::Into; +use std::borrow::{Borrow, BorrowMut}; +use std::collections::{HashMap, HashSet}; use std::fs; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::process::{Child, Command, Stdio}; +use std::sync::{Arc, RwLock}; use std::thread::sleep; use std::time::{Duration, Instant}; #[cfg(target_os = "windows")] use windows_sys::Win32::System::Console::{GenerateConsoleCtrlEvent, CTRL_C_EVENT}; -struct ProcessGuard { +pub(crate) struct ProcessGuard { pub name: String, pub child: Child, pub killed: bool, @@ -49,7 +52,12 @@ impl Drop for ProcessGuard { } } +#[derive(Clone)] pub struct Node { + inner: Arc, +} + +pub struct InnerNode { spec_node_name: String, working_dir: PathBuf, consensus: Consensus, @@ -57,8 +65,8 @@ pub struct Node { rpc_client: RpcClient, rpc_listen: String, - node_id: Option, // initialize when starts node - guard: Option, // initialize when starts node + node_id: RwLock>, // initialize when starts node + guard: RwLock>, // initialize when starts node } impl Node { @@ -108,7 +116,7 @@ impl Node { modifier(&mut app_config); fs::write(&app_config_path, toml::to_string(&app_config).unwrap()).unwrap(); - *self = Self::init(self.working_dir(), self.spec_node_name.clone()); + *self = Self::init(self.working_dir(), self.inner.spec_node_name.clone()); } pub fn modify_chain_spec(&mut self, modifier: M) @@ -121,7 +129,7 @@ impl Node { modifier(&mut chain_spec); fs::write(&chain_spec_path, toml::to_string(&chain_spec).unwrap()).unwrap(); - *self = Self::init(self.working_dir(), self.spec_node_name.clone()); + *self = Self::init(self.working_dir(), self.inner.spec_node_name.clone()); } // Initialize Node instance based on working directory @@ -153,44 +161,51 @@ impl Node { chain_spec.build_consensus().unwrap() }; Self { - spec_node_name, - working_dir, - consensus, - p2p_listen, - rpc_client, - rpc_listen, - node_id: None, - guard: None, + inner: Arc::new(InnerNode { + spec_node_name, + working_dir, + consensus, + p2p_listen, + rpc_client, + rpc_listen, + node_id: RwLock::new(None), + guard: RwLock::new(None), + }), } } pub fn rpc_client(&self) -> &RpcClient { - &self.rpc_client + &self.inner.rpc_client } pub fn working_dir(&self) -> PathBuf { - self.working_dir.clone() + self.inner.working_dir.clone() } pub fn log_path(&self) -> PathBuf { self.working_dir().join("data/logs/run.log") } - pub fn node_id(&self) -> &str { + pub fn node_id(&self) -> String { // peer_id.to_base58() - self.node_id.as_ref().expect("uninitialized node_id") + self.inner + .node_id + .read() + .expect("read locked node_id") + .clone() + .expect("uninitialized node_id") } pub fn consensus(&self) -> &Consensus { - &self.consensus + &self.inner.consensus } pub fn p2p_listen(&self) -> String { - self.p2p_listen.clone() + self.inner.p2p_listen.clone() } pub fn rpc_listen(&self) -> String { - self.rpc_listen.clone() + self.inner.rpc_listen.clone() } pub fn p2p_address(&self) -> String { @@ -681,20 +696,104 @@ impl Node { self.wait_tx_pool_ready(); - self.guard = Some(ProcessGuard { - name: self.spec_node_name.clone(), + self.set_process_guard(ProcessGuard { + name: self.inner.spec_node_name.clone(), child: child_process, killed: false, }); - self.node_id = Some(node_info.node_id); + self.set_node_id(node_info.node_id.as_str()); + } + + pub(crate) fn set_process_guard(&mut self, guard: ProcessGuard) { + let mut g = self.inner.guard.write().unwrap(); + *g = Some(guard); + } + + pub(crate) fn set_node_id(&mut self, node_id: &str) { + let mut n = self.inner.node_id.write().unwrap(); + *n = Some(node_id.to_owned()); + } + + pub(crate) fn take_guard(&mut self) -> Option { + let mut g = self.inner.guard.write().unwrap(); + g.take() } pub fn stop(&mut self) { - drop(self.guard.take()) + drop(self.take_guard()); + } + + fn derive_options( + &self, + mut config: CKBAppConfig, + root_dir: &Path, + subcommand_name: &str, + ) -> Result { + config.root_dir = root_dir.to_path_buf(); + + config.data_dir = root_dir.join(config.data_dir); + + config.db.adjust(root_dir, &config.data_dir, "db"); + config.ancient = config.data_dir.join("ancient"); + + config.network.path = config.data_dir.join("network"); + if config.tmp_dir.is_none() { + config.tmp_dir = Some(config.data_dir.join("tmp")); + } + config.logger.log_dir = config.data_dir.join("logs"); + config.logger.file = Path::new(&(subcommand_name.to_string() + ".log")).to_path_buf(); + + let tx_pool_path = config.data_dir.join("tx_pool"); + config.tx_pool.adjust(root_dir, tx_pool_path); + + let indexer_path = config.data_dir.join("indexer"); + config.indexer.adjust(root_dir, indexer_path); + + config.chain.spec.absolutize(root_dir); + + Ok(config) + } + + pub fn access_db(&self, f: F) + where + F: Fn(&ChainDB), + { + info!("accessing db"); + info!("AppConfig load_for_subcommand {:?}", self.working_dir()); + + let resource = Resource::ckb_config(self.working_dir()); + let app_config = + CKBAppConfig::load_from_slice(&resource.get().expect("resource")).expect("app config"); + + let config = AppConfig::CKB(Box::new( + self.derive_options(app_config, self.working_dir().as_ref(), "run") + .expect("app config"), + )); + + let consensus = config + .chain_spec() + .expect("spec") + .build_consensus() + .expect("consensus"); + + let app_config = config.into_ckb().expect("app config"); + + let db = open_or_create_db( + "ckb", + &app_config.root_dir, + &app_config.db, + consensus.hardfork_switch().clone(), + ) + .expect("open_or_create_db"); + let chain_db = ChainDB::new(db, app_config.store); + f(&chain_db); + + info!("accessed db done"); } pub fn stop_gracefully(&mut self) { - if let Some(mut guard) = self.guard.take() { + let guard = self.take_guard(); + if let Some(mut guard) = guard { if !guard.killed { // on nix: send SIGINT to the child // on windows: use taskkill to kill the child gracefully @@ -770,11 +869,11 @@ pub fn connect_all(nodes: &[Node]) { } // TODO it will be removed out later, in another PR -pub fn disconnect_all(nodes: &[Node]) { +pub fn disconnect_all>(nodes: &[N]) { for node_a in nodes.iter() { for node_b in nodes.iter() { - if node_a.p2p_address() != node_b.p2p_address() { - node_a.disconnect(node_b); + if node_a.borrow().p2p_address() != node_b.borrow().p2p_address() { + node_a.borrow().disconnect(node_b.borrow()); } } } @@ -794,9 +893,51 @@ pub fn waiting_for_sync>(nodes: &[N]) { tip_headers.len() == 1 }); if !synced { - panic!("timeout to wait for sync, tip_headers: {tip_headers:?}"); + panic!( + "timeout to wait for sync, tip_headers: {:?}", + tip_headers + .iter() + .map(|header| header.inner.number.value()) + .collect::>() + ); } for node in nodes { node.borrow().wait_for_tx_pool(); } } + +pub fn make_bootnodes_for_all>(nodes: &mut [N]) { + let node_multiaddrs: HashMap = nodes + .iter() + .map(|n| { + ( + n.borrow().node_id().to_owned(), + n.borrow().p2p_address().try_into().unwrap(), + ) + }) + .collect(); + let other_node_addrs: Vec> = node_multiaddrs + .keys() + .map(|id| { + let addrs = node_multiaddrs + .iter() + .filter(|(other_id, _)| other_id.as_str() != id.as_str()) + .map(|(_, addr)| addr.to_owned()) + .collect::>(); + addrs + }) + .collect(); + for (i, node) in nodes.iter_mut().enumerate() { + node.borrow_mut() + .modify_app_config(|config: &mut CKBAppConfig| { + info!("Setting bootnodes to {:?}", other_node_addrs[i]); + config.network.bootnodes = other_node_addrs[i].clone(); + }) + } + // Restart nodes to make bootnodes work + for node in nodes.iter_mut() { + node.borrow_mut().stop(); + node.borrow_mut().start(); + info!("Restarted node {:?}", node.borrow_mut().node_id()); + } +} diff --git a/test/src/rpc.rs b/test/src/rpc.rs index 2502f8ad76..3ce2ed564c 100644 --- a/test/src/rpc.rs +++ b/test/src/rpc.rs @@ -7,7 +7,7 @@ use ckb_error::AnyError; use ckb_jsonrpc_types::{ Alert, BannedAddr, Block, BlockEconomicState, BlockFilter, BlockNumber, BlockTemplate, BlockView, Capacity, CellWithStatus, ChainInfo, EpochNumber, EpochView, EstimateCycles, - HeaderView, LocalNode, OutPoint, PoolTxDetailInfo, RawTxPool, RemoteNode, Timestamp, + HeaderView, LocalNode, OutPoint, PoolTxDetailInfo, RawTxPool, RemoteNode, SyncState, Timestamp, Transaction, TransactionProof, TransactionWithStatusResponse, TxPoolInfo, Uint32, Uint64, Version, }; @@ -150,6 +150,10 @@ impl RpcClient { .expect("rpc call get_banned_addresses") } + pub fn sync_state(&self) -> SyncState { + self.inner.sync_state().expect("rpc call sync_state") + } + pub fn clear_banned_addresses(&self) { self.inner .clear_banned_addresses() @@ -322,6 +326,7 @@ jsonrpc!( pub fn get_current_epoch(&self) -> EpochView; pub fn get_epoch_by_number(&self, number: EpochNumber) -> Option; + pub fn sync_state(&self) -> SyncState; pub fn local_node_info(&self) -> LocalNode; pub fn get_peers(&self) -> Vec; pub fn get_banned_addresses(&self) -> Vec; diff --git a/test/src/specs/fault_injection/mod.rs b/test/src/specs/fault_injection/mod.rs new file mode 100644 index 0000000000..aa54ea05d4 --- /dev/null +++ b/test/src/specs/fault_injection/mod.rs @@ -0,0 +1,3 @@ +mod randomly_kill; + +pub use randomly_kill::*; diff --git a/test/src/specs/fault_injection/randomly_kill.rs b/test/src/specs/fault_injection/randomly_kill.rs new file mode 100644 index 0000000000..4bb0033734 --- /dev/null +++ b/test/src/specs/fault_injection/randomly_kill.rs @@ -0,0 +1,31 @@ +use crate::{Node, Spec}; + +use ckb_logger::info; +use rand::{thread_rng, Rng}; + +pub struct RandomlyKill; + +impl Spec for RandomlyKill { + crate::setup!(num_nodes: 1); + + fn run(&self, nodes: &mut Vec) { + let mut rng = thread_rng(); + let node = &mut nodes[0]; + for _ in 0..rng.gen_range(10, 20) { + let n = rng.gen_range(0, 10); + // TODO: the kill of child process and mining are actually sequential here + // We need to find some way to so these two things in parallel. + // It would be great if we can kill and start the node externally (instead of writing + // rust code to manage all the nodes, because in that case we will have to fight + // ownership rules, and monitor node). + if n != 0 { + info!("Mining {} blocks", n); + node.mine(n); + } + info!("Stop the node"); + node.stop(); + info!("Start the node"); + node.start(); + } + } +} diff --git a/test/src/specs/mod.rs b/test/src/specs/mod.rs index 5e9d9fc569..d981a242a2 100644 --- a/test/src/specs/mod.rs +++ b/test/src/specs/mod.rs @@ -1,6 +1,7 @@ mod alert; mod consensus; mod dao; +mod fault_injection; mod hardfork; mod mining; mod p2p; @@ -12,6 +13,7 @@ mod tx_pool; pub use alert::*; pub use consensus::*; pub use dao::*; +pub use fault_injection::*; pub use hardfork::*; pub use mining::*; pub use p2p::*; diff --git a/test/src/specs/p2p/whitelist.rs b/test/src/specs/p2p/whitelist.rs index 12bd86b06a..5141528e19 100644 --- a/test/src/specs/p2p/whitelist.rs +++ b/test/src/specs/p2p/whitelist.rs @@ -46,10 +46,7 @@ impl Spec for WhitelistOnSessionLimit { let rpc_client0 = node0.rpc_client(); let is_connect_peer_num_eq_2 = wait_until(10, || { let peers = rpc_client0.get_peers(); - peers.len() == 2 - && peers - .into_iter() - .all(|node| id_set.contains(&node.node_id.as_str())) + peers.len() == 2 && peers.into_iter().all(|node| id_set.contains(&node.node_id)) }); if !is_connect_peer_num_eq_2 { @@ -78,10 +75,7 @@ impl Spec for WhitelistOnSessionLimit { let rpc_client0 = node0.rpc_client(); let is_connect_peer_num_eq_3 = wait_until(10, || { let peers = rpc_client0.get_peers(); - peers.len() == 3 - && peers - .into_iter() - .all(|node| id_set.contains(&node.node_id.as_str())) + peers.len() == 3 && peers.into_iter().all(|node| id_set.contains(&node.node_id)) }); if !is_connect_peer_num_eq_3 { diff --git a/test/src/specs/sync/mod.rs b/test/src/specs/sync/mod.rs index 52c2fe5997..8e75c85d93 100644 --- a/test/src/specs/sync/mod.rs +++ b/test/src/specs/sync/mod.rs @@ -7,6 +7,8 @@ mod invalid_block; mod invalid_locator_size; mod last_common_header; mod sync_and_mine; +mod sync_churn; +mod sync_invalid; mod sync_timeout; pub use block_filter::*; @@ -18,4 +20,6 @@ pub use invalid_block::*; pub use invalid_locator_size::*; pub use last_common_header::*; pub use sync_and_mine::*; +pub use sync_churn::*; +pub use sync_invalid::*; pub use sync_timeout::*; diff --git a/test/src/specs/sync/sync_churn.rs b/test/src/specs/sync/sync_churn.rs new file mode 100644 index 0000000000..002cfa8e52 --- /dev/null +++ b/test/src/specs/sync/sync_churn.rs @@ -0,0 +1,76 @@ +use crate::node::{make_bootnodes_for_all, waiting_for_sync}; +use crate::util::mining::out_ibd_mode; +use crate::{Node, Spec}; +use ckb_logger::info; +use rand::Rng; +use std::sync::mpsc; +use std::thread; + +fn select_random_node<'a, R: Rng>(rng: &mut R, nodes: &'a mut [Node]) -> &'a mut Node { + let index = rng.gen_range(0, nodes.len()); + &mut nodes[index] +} + +pub struct SyncChurn; + +/// This test will start 5 nodes, and randomly restart 4 nodes in the middle of mining. +/// After all nodes are synced, the test is considered successful. +/// This test is used to test the robustness of the sync protocol. +/// If the sync protocol is not robust enough, the test will fail. +/// But this test is not a complete test, it can only test the robustness of the sync protocol to a certain extent. +/// Some weaknesses of this test: +/// 1. This test only consider the simple case of some nodes restarting in the middle of mining, +/// while other nodes are always mining correctly. +/// 2. This fault injection of restarting nodes is not comprehensive enough. +/// 3. Even if the test fails, we can't deterministically reproduce the same error. +/// We may need some foundationdb-like tools to deterministically reproduce the same error. +impl Spec for SyncChurn { + crate::setup!(num_nodes: 5); + + fn run(&self, nodes: &mut Vec) { + make_bootnodes_for_all(nodes); + out_ibd_mode(nodes); + + let mut mining_nodes = nodes.clone(); + let mut churn_nodes = mining_nodes.split_off(2); + + let (restart_stopped_tx, restart_stopped_rx) = mpsc::channel(); + + let mining_thread = thread::spawn(move || { + let mut rng = rand::thread_rng(); + loop { + let mining_node = select_random_node(&mut rng, &mut mining_nodes); + mining_node.mine(1); + // Because the test that waiting for nodes to sync has a implicit maximum waiting time + // (currently 60 seconds, we can sync about 200 blocks per second, so a maxium blocks of 10000 is reasonable) + // and the implicit waiting time is not long enough when there are too many blocks to sync, + // so we stop mining when the tip block number is greater than 15000. + // Otherwise nodes may not be able to sync within the implicit waiting time. + let too_many_blocks = mining_node.get_tip_block_number() > 10000; + if too_many_blocks || restart_stopped_rx.try_recv().is_ok() { + break; + } + waiting_for_sync(&mining_nodes); + } + }); + + let restart_thread = thread::spawn(move || { + let mut rng = rand::thread_rng(); + // It takes about 1 second to restart a node. So restarting nodes 100 times takes about 100 seconds. + let num_restarts = 100; + for _ in 0..num_restarts { + let node = select_random_node(&mut rng, &mut churn_nodes); + info!("Restarting node {}", node.node_id()); + node.stop(); + node.start(); + } + restart_stopped_tx.send(()).unwrap(); + }); + + mining_thread.join().unwrap(); + restart_thread.join().unwrap(); + + info!("Waiting for all nodes sync"); + waiting_for_sync(nodes); + } +} diff --git a/test/src/specs/sync/sync_invalid.rs b/test/src/specs/sync/sync_invalid.rs new file mode 100644 index 0000000000..41b13e559e --- /dev/null +++ b/test/src/specs/sync/sync_invalid.rs @@ -0,0 +1,114 @@ +use crate::{Node, Spec}; +use ckb_app_config::CKBAppConfig; +use ckb_logger::info; +use ckb_store::{ChainDB, ChainStore}; +use ckb_types::core; +use ckb_types::packed; +use ckb_types::prelude::{AsBlockBuilder, Builder, Entity, IntoUncleBlockView}; +use std::thread::sleep; +use std::time::Duration; + +pub struct SyncInvalid; + +impl Spec for SyncInvalid { + crate::setup!(num_nodes: 2); + + fn run(&self, nodes: &mut Vec) { + nodes[0].mine(20); + nodes[1].mine(1); + + nodes[0].connect(&nodes[1]); + + let info_nodes_tip = || { + info!( + "nodes tip_number: {:?}", + nodes + .iter() + .map(|node| node.get_tip_block_number()) + .collect::>() + ); + }; + + let insert_invalid_block = || { + let template = nodes[0].rpc_client().get_block_template(None, None, None); + + let block = packed::Block::from(template) + .as_advanced_builder() + .uncle(packed::UncleBlock::new_builder().build().into_view()) + .build(); + nodes[0] + .rpc_client() + .process_block_without_verify(block.data().into(), false); + }; + + info_nodes_tip(); + insert_invalid_block(); + insert_invalid_block(); + info_nodes_tip(); + assert_eq!(nodes[0].get_tip_block_number(), 22); + + while nodes[1] + .rpc_client() + .sync_state() + .best_known_block_number + .value() + <= 20 + { + sleep(Duration::from_secs(1)); + } + + let block_21_hash = core::BlockView::from( + nodes[0] + .rpc_client() + .get_block_by_number(21) + .expect("get block 21"), + ) + .hash(); + let block_22_hash = core::BlockView::from( + nodes[0] + .rpc_client() + .get_block_by_number(22) + .expect("get block 22"), + ) + .hash(); + + assert!(!nodes[1].rpc_client().get_banned_addresses().is_empty()); + assert!(nodes[1] + .rpc_client() + .get_banned_addresses() + .first() + .unwrap() + .ban_reason + .contains(&format!("{}", block_21_hash))); + info_nodes_tip(); + + nodes[0].stop(); + nodes[1].stop(); + + nodes[0].access_db(|store: &ChainDB| { + { + assert!(store.get_block(&block_21_hash).is_some()); + assert!(store.get_block(&block_22_hash).is_some()); + let ext = store.get_block_ext(&block_21_hash).expect("block 21 ext"); + assert_eq!(ext.verified, Some(true)); + } + { + assert!(store.get_block(&block_22_hash).is_some()); + assert!(store.get_block(&block_22_hash).is_some()); + let ext = store.get_block_ext(&block_22_hash).expect("block 22 ext"); + assert_eq!(ext.verified, Some(true)); + } + }); + + nodes[1].access_db(|store: &ChainDB| { + assert!(store.get_block(&block_21_hash).is_none()); + assert!(store.get_block_ext(&block_21_hash).is_none()); + assert!(store.get_block(&block_22_hash).is_none()); + assert!(store.get_block_ext(&block_22_hash).is_none()); + }); + } + + fn modify_app_config(&self, config: &mut CKBAppConfig) { + config.logger.filter = Some("ckb=debug".to_string()); + } +} diff --git a/util/app-config/src/tests/bats_tests/export_import.bats b/util/app-config/src/tests/bats_tests/export_import.bats index 555ce26402..1c53da1f9d 100644 --- a/util/app-config/src/tests/bats_tests/export_import.bats +++ b/util/app-config/src/tests/bats_tests/export_import.bats @@ -13,7 +13,8 @@ function export { #@test } _import() { - bash -c "ckb import -C ${CKB_DIRNAME} ${TMP_DIR}/ckb*.json" + bash -c "ckb init -C ${TMP_DIR}/import" + bash -c "ckb import -C ${TMP_DIR}/import ${TMP_DIR}/ckb*.json" } function ckb_import { #@test @@ -27,4 +28,5 @@ setup_file() { teardown_file() { rm -f ${TMP_DIR}/ckb*.json + rm -rvf ${TMP_DIR}/import } diff --git a/util/app-config/src/tests/bats_tests/graceful_shutdown.bats b/util/app-config/src/tests/bats_tests/graceful_shutdown.bats index 067844058d..44c32efd2e 100644 --- a/util/app-config/src/tests/bats_tests/graceful_shutdown.bats +++ b/util/app-config/src/tests/bats_tests/graceful_shutdown.bats @@ -21,7 +21,7 @@ function ckb_graceful_shutdown { #@test [ "$status" -eq 0 ] assert_output --regexp "INFO ckb_bin::subcommand::run Trapped exit signal, exiting..." - assert_output --regexp "INFO ckb_chain::chain ChainService received exit signal, exit now" + assert_output --regexp "INFO ckb_chain::chain_service ChainService received exit signal, exit now" assert_output --regexp "INFO ckb_sync::synchronizer BlockDownload received exit signal, exit now" assert_output --regexp "INFO ckb_tx_pool::chunk_process TxPool chunk_command service received exit signal, exit now" assert_output --regexp "INFO ckb_tx_pool::service TxPool is saving, please wait..." @@ -29,7 +29,7 @@ function ckb_graceful_shutdown { #@test assert_output --regexp "INFO ckb_indexer_sync Indexer received exit signal, exit now" assert_output --regexp "INFO ckb_notify NotifyService received exit signal, exit now" assert_output --regexp "INFO ckb_block_filter::filter BlockFilter received exit signal, exit now" - assert_output --regexp "INFO ckb_sync::types::header_map HeaderMap limit_memory received exit signal, exit now" + assert_output --regexp "INFO ckb_shared::types::header_map HeaderMap limit_memory received exit signal, exit now" assert_output --regexp "INFO ckb_network::network NetworkService receive exit signal, start shutdown..." assert_output --regexp "INFO ckb_network::network NetworkService shutdown now" assert_output --regexp "INFO ckb_tx_pool::process TxPool saved successfully" diff --git a/util/constant/src/sync.rs b/util/constant/src/sync.rs index 1462fc31fe..488e1faecd 100644 --- a/util/constant/src/sync.rs +++ b/util/constant/src/sync.rs @@ -53,9 +53,6 @@ pub const BLOCK_DOWNLOAD_TIMEOUT: u64 = 30 * 1000; // 30s // potential degree of disordering of blocks. pub const BLOCK_DOWNLOAD_WINDOW: u64 = 1024 * 8; // 1024 * default_outbound_peers -/// Orphan block pool max size -pub const MAX_ORPHAN_POOL_SIZE: usize = 1024 * 1024 * 256; - /// Interval between repeated inquiry transactions pub const RETRY_ASK_TX_TIMEOUT_INCREASE: Duration = Duration::from_secs(30); diff --git a/util/instrument/src/import.rs b/util/instrument/src/import.rs index 1c911e5e79..70500f2913 100644 --- a/util/instrument/src/import.rs +++ b/util/instrument/src/import.rs @@ -1,4 +1,4 @@ -use ckb_chain::chain::ChainController; +use ckb_chain::ChainController; use ckb_jsonrpc_types::BlockView as JsonBlock; use ckb_types::core; #[cfg(feature = "progress_bar")] @@ -39,7 +39,7 @@ impl Import { let block: Arc = Arc::new(block.into()); if !block.is_genesis() { self.chain - .process_block(block) + .blocking_process_block(block) .expect("import occur malformation data"); } } @@ -64,7 +64,7 @@ impl Import { let block: Arc = Arc::new(block.into()); if !block.is_genesis() { self.chain - .process_block(block) + .blocking_process_block(block) .expect("import occur malformation data"); } progress_bar.inc(s.as_bytes().len() as u64); diff --git a/util/jsonrpc-types/src/net.rs b/util/jsonrpc-types/src/net.rs index 502ee0f753..8751621985 100644 --- a/util/jsonrpc-types/src/net.rs +++ b/util/jsonrpc-types/src/net.rs @@ -1,4 +1,5 @@ use crate::{BlockNumber, Byte32, Timestamp, Uint64}; +use ckb_types::H256; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; @@ -276,10 +277,16 @@ pub struct SyncState { /// /// If this number is too high, it indicates that block download has stuck at some block. pub orphan_blocks_count: Uint64, - /// The size of all download orphan blocks - pub orphan_blocks_size: Uint64, /// Count of downloading blocks. pub inflight_blocks_count: Uint64, + /// The block number of current unverified tip block + pub unverified_tip_number: BlockNumber, + /// The block hash of current unverified tip block + pub unverified_tip_hash: H256, + /// The block number of current tip block + pub tip_number: BlockNumber, + /// The block hash of current tip block + pub tip_hash: H256, /// The download scheduler's time analysis data, the fast is the 1/3 of the cut-off point, unit ms pub fast_time: Uint64, /// The download scheduler's time analysis data, the normal is the 4/5 of the cut-off point, unit ms diff --git a/util/launcher/Cargo.toml b/util/launcher/Cargo.toml index dc4eff7011..81dccf9171 100644 --- a/util/launcher/Cargo.toml +++ b/util/launcher/Cargo.toml @@ -19,22 +19,20 @@ ckb-build-info = { path = "../build-info", version = "= 0.116.0-pre" } ckb-jsonrpc-types = { path = "../jsonrpc-types", version = "= 0.116.0-pre" } ckb-chain = { path = "../../chain", version = "= 0.116.0-pre" } ckb-shared = { path = "../../shared", version = "= 0.116.0-pre" } -ckb-network = { path = "../../network", version = "= 0.116.0-pre"} -ckb-rpc = { path = "../../rpc", version = "= 0.116.0-pre"} -ckb-resource = { path = "../../resource", version = "= 0.116.0-pre"} +ckb-network = { path = "../../network", version = "= 0.116.0-pre" } +ckb-rpc = { path = "../../rpc", version = "= 0.116.0-pre" } +ckb-resource = { path = "../../resource", version = "= 0.116.0-pre" } ckb-network-alert = { path = "../network-alert", version = "= 0.116.0-pre" } -ckb-sync = { path = "../../sync", version = "= 0.116.0-pre"} +ckb-sync = { path = "../../sync", version = "= 0.116.0-pre" } ckb-verification = { path = "../../verification", version = "= 0.116.0-pre" } ckb-verification-traits = { path = "../../verification/traits", version = "= 0.116.0-pre" } ckb-async-runtime = { path = "../runtime", version = "= 0.116.0-pre" } -ckb-proposal-table = { path = "../proposal-table", version = "= 0.116.0-pre" } ckb-channel = { path = "../channel", version = "= 0.116.0-pre" } ckb-tx-pool = { path = "../../tx-pool", version = "= 0.116.0-pre" } ckb-light-client-protocol-server = { path = "../light-client-protocol-server", version = "= 0.116.0-pre" } ckb-block-filter = { path = "../../block-filter", version = "= 0.116.0-pre" } - [features] -with_sentry = [ "ckb-sync/with_sentry", "ckb-network/with_sentry", "ckb-app-config/with_sentry" ] +with_sentry = ["ckb-sync/with_sentry", "ckb-network/with_sentry", "ckb-app-config/with_sentry"] portable = ["ckb-shared/portable"] march-native = ["ckb-shared/march-native"] diff --git a/util/launcher/src/lib.rs b/util/launcher/src/lib.rs index 0370339a54..2e567fd509 100644 --- a/util/launcher/src/lib.rs +++ b/util/launcher/src/lib.rs @@ -8,7 +8,7 @@ use ckb_app_config::{ use ckb_async_runtime::Handle; use ckb_block_filter::filter::BlockFilter as BlockFilterService; use ckb_build_info::Version; -use ckb_chain::chain::{ChainController, ChainService}; +use ckb_chain::ChainController; use ckb_channel::Receiver; use ckb_jsonrpc_types::ScriptHashType; use ckb_light_client_protocol_server::LightClientProtocol; @@ -18,11 +18,9 @@ use ckb_network::{ NetworkState, SupportProtocols, }; use ckb_network_alert::alert_relayer::AlertRelayer; -use ckb_proposal_table::ProposalTable; use ckb_resource::Resource; -use ckb_rpc::RpcServer; -use ckb_rpc::ServiceBuilder; -use ckb_shared::Shared; +use ckb_rpc::{RpcServer, ServiceBuilder}; +use ckb_shared::{ChainServicesBuilder, Shared}; use ckb_shared::shared_builder::{SharedBuilder, SharedPackage}; use ckb_store::{ChainDB, ChainStore}; @@ -202,6 +200,8 @@ impl Launcher { .tx_pool_config(self.args.config.tx_pool.clone()) .notify_config(self.args.config.notify.clone()) .store_config(self.args.config.store) + .sync_config(self.args.config.network.sync.clone()) + .header_map_tmp_dir(self.args.config.tmp_dir.clone()) .block_assembler_config(block_assembler_config) .build()?; @@ -226,9 +226,12 @@ impl Launcher { } /// Start chain service, return ChainController - pub fn start_chain_service(&self, shared: &Shared, table: ProposalTable) -> ChainController { - let chain_service = ChainService::new(shared.clone(), table); - let chain_controller = chain_service.start(Some("ChainService")); + pub fn start_chain_service( + &self, + shared: &Shared, + chain_services_builder: ChainServicesBuilder, + ) -> ChainController { + let chain_controller = ckb_chain::start_chain_services(chain_services_builder); info!("chain genesis hash: {:#x}", shared.genesis_hash()); chain_controller } @@ -277,10 +280,9 @@ impl Launcher { miner_enable: bool, relay_tx_receiver: Receiver, ) -> NetworkController { - let sync_shared = Arc::new(SyncShared::with_tmpdir( + let sync_shared = Arc::new(SyncShared::new( shared.clone(), self.args.config.network.sync.clone(), - self.args.config.tmp_dir.as_ref(), relay_tx_receiver, )); let fork_enable = { @@ -309,17 +311,18 @@ impl Launcher { let mut flags = Flags::all(); if support_protocols.contains(&SupportProtocol::Relay) { - let relayer = Relayer::new(chain_controller.clone(), Arc::clone(&sync_shared)); + let relayer_v3 = Relayer::new(chain_controller.clone(), Arc::clone(&sync_shared)).v3(); protocols.push(CKBProtocol::new_with_support_protocol( SupportProtocols::RelayV3, - Box::new(relayer.clone().v3()), + Box::new(relayer_v3), Arc::clone(&network_state), )); if !fork_enable { + let relayer_v2 = Relayer::new(chain_controller.clone(), Arc::clone(&sync_shared)); protocols.push(CKBProtocol::new_with_support_protocol( SupportProtocols::RelayV2, - Box::new(relayer), + Box::new(relayer_v2), Arc::clone(&network_state), )) } @@ -413,7 +416,11 @@ impl Launcher { chain_controller.clone(), miner_enable, ) - .enable_net(network_controller.clone(), sync_shared) + .enable_net( + network_controller.clone(), + sync_shared, + Arc::new(chain_controller.clone()), + ) .enable_stats(shared.clone(), Arc::clone(&alert_notifier)) .enable_experiment(shared.clone()) .enable_integration_test(shared.clone(), network_controller.clone(), chain_controller) diff --git a/util/light-client-protocol-server/src/tests/utils/chain.rs b/util/light-client-protocol-server/src/tests/utils/chain.rs index 177d4c9bee..03e37e704b 100644 --- a/util/light-client-protocol-server/src/tests/utils/chain.rs +++ b/util/light-client-protocol-server/src/tests/utils/chain.rs @@ -4,7 +4,7 @@ use std::{ }; use ckb_app_config::{BlockAssemblerConfig, NetworkConfig}; -use ckb_chain::chain::{ChainController, ChainService}; +use ckb_chain::{start_chain_services, ChainController}; use ckb_chain_spec::consensus::{build_genesis_epoch_ext, ConsensusBuilder}; use ckb_dao_utils::genesis_dao_data; use ckb_jsonrpc_types::ScriptHashType; @@ -87,8 +87,7 @@ impl MockChain { let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - let chain_controller = chain_service.start::<&str>(None); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); Self { chain_controller, @@ -144,7 +143,7 @@ impl MockChain { let block_number = block.number(); let is_ok = self .controller() - .process_block(Arc::new(block)) + .blocking_process_block(Arc::new(block)) .expect("process block"); assert!(is_ok, "failed to process block {block_number}"); while self diff --git a/util/logger-service/src/lib.rs b/util/logger-service/src/lib.rs index 3c87957c35..37c7eb2684 100644 --- a/util/logger-service/src/lib.rs +++ b/util/logger-service/src/lib.rs @@ -527,3 +527,27 @@ fn setup_panic_logger() { }; panic::set_hook(Box::new(panic_logger)); } + +/// Only used by unit test +/// Initializes the [Logger](struct.Logger.html) and run the logging service. +pub fn init_for_test(filter: &str) -> Result { + setup_panic_logger(); + let config: Config = Config { + filter: Some(filter.to_string()), + color: true, + log_to_stdout: true, + log_to_file: false, + + emit_sentry_breadcrumbs: None, + file: Default::default(), + log_dir: Default::default(), + extra: Default::default(), + }; + + let logger = Logger::new(None, config); + let filter = logger.filter(); + log::set_boxed_logger(Box::new(logger)).map(|_| { + log::set_max_level(filter); + LoggerInitGuard + }) +} diff --git a/util/metrics/src/lib.rs b/util/metrics/src/lib.rs index 64e06afbc5..3609524743 100644 --- a/util/metrics/src/lib.rs +++ b/util/metrics/src/lib.rs @@ -6,13 +6,11 @@ //! //! [`ckb-metrics-service`]: ../ckb_metrics_service/index.html -use prometheus::{ - register_histogram, register_histogram_vec, register_int_counter, register_int_gauge, - register_int_gauge_vec, Histogram, HistogramVec, IntCounter, IntGauge, IntGaugeVec, -}; use prometheus_static_metric::make_static_metric; use std::cell::Cell; +pub use prometheus::*; + pub fn gather() -> Vec { prometheus::gather() } @@ -46,11 +44,48 @@ make_static_metric! { proposed, }, } + + struct CkbHeaderMapMemoryHitMissStatistics: IntCounter{ + "type" => { + hit, + miss, + }, + } } pub struct Metrics { /// Gauge metric for CKB chain tip header number pub ckb_chain_tip: IntGauge, + /// CKB chain unverified tip header number + pub ckb_chain_unverified_tip: IntGauge, + /// ckb_chain asynchronous_process duration (seconds) + pub ckb_chain_async_process_block_duration: Histogram, + /// ckb_chain consume_orphan thread's process_lonely_block duration (seconds) + pub ckb_chain_process_lonely_block_duration: Histogram, + /// ckb_chain consume_unverified thread's consume_unverified_block duration (seconds) + pub ckb_chain_consume_unverified_block_duration: Histogram, + /// ckb_chain consume_unverified thread's consume_unverified_block waiting for block duration (seconds) + pub ckb_chain_consume_unverified_block_waiting_block_duration: Histogram, + /// ckb_chain execute_callback duration (seconds) + pub ckb_chain_execute_callback_duration: Histogram, + /// ckb_chain orphan blocks count + pub ckb_chain_orphan_count: IntGauge, + pub ckb_chain_lonely_block_ch_len: IntGauge, + pub ckb_chain_unverified_block_ch_len: IntGauge, + pub ckb_chain_preload_unverified_block_ch_len: IntGauge, + pub ckb_chain_load_full_unverified_block: Histogram, + /// ckb_sync_msg_process duration (seconds) + pub ckb_sync_msg_process_duration: HistogramVec, + /// ckb_sync_block_fetch duraiton (seconds) + pub ckb_sync_block_fetch_duration: Histogram, + // ckb_header_map_limit_memory duration (seconds) + pub ckb_header_map_limit_memory_duration: Histogram, + // ckb_header_map_limit_memory operation duration (seconds) + pub ckb_header_map_ops_duration: HistogramVec, + // how many headers in the HeaderMap's memory map? + pub ckb_header_map_memory_count: IntGauge, + // how many times the HeaderMap's memory map is hit? + pub ckb_header_map_memory_hit_miss_count: CkbHeaderMapMemoryHitMissStatistics, /// Gauge for tracking the size of all frozen data pub ckb_freezer_size: IntGauge, /// Counter for measuring the effective amount of data read @@ -83,77 +118,158 @@ pub struct Metrics { pub ckb_sys_mem_rocksdb: IntGaugeVec, /// Counter for CKB network ban peers pub ckb_network_ban_peer: IntCounter, + pub ckb_inflight_blocks_count: IntGauge, + pub ckb_inflight_timeout_count: IntCounter, } -static METRICS: once_cell::sync::Lazy = once_cell::sync::Lazy::new(|| Metrics { - ckb_chain_tip: register_int_gauge!("ckb_chain_tip", "The CKB chain tip header number").unwrap(), - ckb_freezer_size: register_int_gauge!("ckb_freezer_size", "The CKB freezer size").unwrap(), - ckb_freezer_read: register_int_counter!("ckb_freezer_read", "The CKB freezer read").unwrap(), - ckb_relay_transaction_short_id_collide: register_int_counter!( +static METRICS: once_cell::sync::Lazy = once_cell::sync::Lazy::new(|| { + Metrics { + ckb_chain_tip: register_int_gauge!("ckb_chain_tip", "The CKB chain tip header number").unwrap(), + ckb_chain_unverified_tip: register_int_gauge!( + "ckb_chain_unverified_tip", + "The CKB chain unverified tip header number" + ) + .unwrap(), + ckb_chain_async_process_block_duration: register_histogram!( + "ckb_chain_async_process_block_duration", + "The CKB chain asynchronous_process_block duration (seconds)" + ) + .unwrap(), + ckb_chain_process_lonely_block_duration: register_histogram!( + "ckb_chain_process_lonely_block_duration", + "The CKB chain consume_orphan thread's process_lonely_block duration (seconds)" + ) + .unwrap(), + ckb_chain_consume_unverified_block_duration: register_histogram!( + "ckb_chain_consume_unverified_block_duration", + "The CKB chain consume_unverified thread's consume_unverified_block duration (seconds)" + ) + .unwrap(), + ckb_chain_consume_unverified_block_waiting_block_duration: register_histogram!( + "ckb_chain_consume_unverified_block_waiting_block_duration", + "The CKB chain consume_unverified thread's consume_unverified_block waiting for block duration (seconds)" + ).unwrap(), + ckb_chain_execute_callback_duration: register_histogram!( + "ckb_chain_execute_callback_duration", + "The CKB chain execute_callback duration (seconds)" + ).unwrap(), + ckb_chain_orphan_count: register_int_gauge!( + "ckb_chain_orphan_count", + "The CKB chain orphan blocks count", + ).unwrap(), + ckb_chain_lonely_block_ch_len: register_int_gauge!( + "ckb_chain_lonely_block_ch_len", + "The CKB chain lonely block channel length", + ).unwrap(), + ckb_chain_unverified_block_ch_len: register_int_gauge!( + "ckb_chain_unverified_block_ch_len", + "The CKB chain unverified block channel length", + ).unwrap(), + ckb_chain_preload_unverified_block_ch_len: register_int_gauge!( + "ckb_chain_preload_unverified_block_ch_len", + "The CKB chain fill unverified block channel length", + ).unwrap(), + ckb_chain_load_full_unverified_block: register_histogram!( + "ckb_chain_load_full_unverified_block", + "The CKB chain load_full_unverified_block duration (seconds)" + ).unwrap(), + ckb_sync_msg_process_duration: register_histogram_vec!( + "ckb_sync_msg_process_duration", + "The CKB sync message process duration (seconds)", + &["msg_type"], + ).unwrap(), + ckb_sync_block_fetch_duration: register_histogram!( + "ckb_sync_block_fetch_duration", + "The CKB sync block fetch duration (seconds)" + ).unwrap(), + ckb_header_map_limit_memory_duration: register_histogram!( + "ckb_header_map_limit_memory_duration", + "The CKB header map limit_memory job duration (seconds)" + ).unwrap(), + ckb_header_map_ops_duration: register_histogram_vec!( + "ckb_header_map_ops_duration", + "The CKB header map operation duration (seconds)", + &["operation"], + ).unwrap(), + ckb_header_map_memory_count: register_int_gauge!( + "ckb_header_map_memory_count", + "The CKB HeaderMap memory count", + ).unwrap(), + ckb_header_map_memory_hit_miss_count: CkbHeaderMapMemoryHitMissStatistics::from( + ®ister_int_counter_vec!( + "ckb_header_map_memory_hit_miss_count", + "The CKB HeaderMap memory hit count", + &["type"] + ) + .unwrap() + ), + ckb_freezer_size: register_int_gauge!("ckb_freezer_size", "The CKB freezer size").unwrap(), + ckb_freezer_read: register_int_counter!("ckb_freezer_read", "The CKB freezer read").unwrap(), + ckb_relay_transaction_short_id_collide: register_int_counter!( "ckb_relay_transaction_short_id_collide", "The CKB relay transaction short id collide" ) - .unwrap(), - ckb_relay_cb_verify_duration: register_histogram!( + .unwrap(), + ckb_relay_cb_verify_duration: register_histogram!( "ckb_relay_cb_verify_duration", "The CKB relay compact block verify duration" ) - .unwrap(), - ckb_block_process_duration: register_histogram!( + .unwrap(), + ckb_block_process_duration: register_histogram!( "ckb_block_process_duration", "The CKB block process duration" ) - .unwrap(), - ckb_relay_cb_transaction_count: register_int_counter!( + .unwrap(), + ckb_relay_cb_transaction_count: register_int_counter!( "ckb_relay_cb_transaction_count", "The CKB relay compact block transaction count" ) - .unwrap(), - ckb_relay_cb_reconstruct_ok: register_int_counter!( + .unwrap(), + ckb_relay_cb_reconstruct_ok: register_int_counter!( "ckb_relay_cb_reconstruct_ok", "The CKB relay compact block reconstruct ok count" ) - .unwrap(), - ckb_relay_cb_fresh_tx_cnt: register_int_counter!( + .unwrap(), + ckb_relay_cb_fresh_tx_cnt: register_int_counter!( "ckb_relay_cb_fresh_tx_cnt", "The CKB relay compact block fresh tx count" ) - .unwrap(), - ckb_relay_cb_reconstruct_fail: register_int_counter!( + .unwrap(), + ckb_relay_cb_reconstruct_fail: register_int_counter!( "ckb_relay_cb_reconstruct_fail", "The CKB relay compact block reconstruct fail count" ) - .unwrap(), - ckb_shared_best_number: register_int_gauge!( + .unwrap(), + ckb_shared_best_number: register_int_gauge!( "ckb_shared_best_number", "The CKB shared best header number" ) - .unwrap(), - ckb_sys_mem_process: CkbSysMemProcessStatistics::from( - ®ister_int_gauge_vec!( + .unwrap(), + ckb_sys_mem_process: CkbSysMemProcessStatistics::from( + ®ister_int_gauge_vec!( "ckb_sys_mem_process", "CKB system memory for process statistics", &["type"] ) - .unwrap(), - ), - ckb_sys_mem_jemalloc: CkbSysMemJemallocStatistics::from( - ®ister_int_gauge_vec!( + .unwrap(), + ), + ckb_sys_mem_jemalloc: CkbSysMemJemallocStatistics::from( + ®ister_int_gauge_vec!( "ckb_sys_mem_jemalloc", "CKB system memory for jemalloc statistics", &["type"] ) - .unwrap(), - ), - ckb_tx_pool_entry: CkbTxPoolEntryStatistics::from( - ®ister_int_gauge_vec!( + .unwrap(), + ), + ckb_tx_pool_entry: CkbTxPoolEntryStatistics::from( + ®ister_int_gauge_vec!( "ckb_tx_pool_entry", "CKB tx-pool entry status statistics", &["type"] ) - .unwrap(), - ), - ckb_message_bytes: register_histogram_vec!( + .unwrap(), + ), + ckb_message_bytes: register_histogram_vec!( "ckb_message_bytes", "The CKB message bytes", &["direction", "protocol_name", "msg_item_name", "status_code"], @@ -161,19 +277,30 @@ static METRICS: once_cell::sync::Lazy = once_cell::sync::Lazy::new(|| M 500.0, 1000.0, 2000.0, 5000.0, 10000.0, 20000.0, 50000.0, 100000.0, 200000.0, 500000.0 ] ) - .unwrap(), + .unwrap(), - ckb_sys_mem_rocksdb: register_int_gauge_vec!( + ckb_sys_mem_rocksdb: register_int_gauge_vec!( "ckb_sys_mem_rocksdb", "CKB system memory for rocksdb statistics", &["type", "cf"] ) - .unwrap(), - ckb_network_ban_peer: register_int_counter!( + .unwrap(), + ckb_network_ban_peer: register_int_counter!( "ckb_network_ban_peer", "CKB network baned peer count" ) - .unwrap(), + .unwrap(), + ckb_inflight_blocks_count: register_int_gauge!( + "ckb_inflight_blocks_count", + "The CKB inflight blocks count" + ) + .unwrap(), + ckb_inflight_timeout_count: register_int_counter!( + "ckb_inflight_timeout_count", + "The CKB inflight timeout count" + ) + .unwrap(), + } }); /// Indicate whether the metrics service is enabled. diff --git a/util/stop-handler/src/stop_register.rs b/util/stop-handler/src/stop_register.rs index c9146332dc..73b3efbe1d 100644 --- a/util/stop-handler/src/stop_register.rs +++ b/util/stop-handler/src/stop_register.rs @@ -25,7 +25,7 @@ pub fn wait_all_ckb_services_exit() { } } } - debug!("All ckb threads have been stopped."); + info!("All ckb threads have been stopped"); } static CKB_HANDLES: once_cell::sync::Lazy> = diff --git a/verification/contextual/src/tests/contextual_block_verifier.rs b/verification/contextual/src/tests/contextual_block_verifier.rs index a53b1146ba..fc6c4182d4 100644 --- a/verification/contextual/src/tests/contextual_block_verifier.rs +++ b/verification/contextual/src/tests/contextual_block_verifier.rs @@ -1,6 +1,6 @@ use super::super::contextual_block_verifier::{EpochVerifier, TwoPhaseCommitVerifier}; use crate::contextual_block_verifier::{RewardVerifier, VerifyContext}; -use ckb_chain::chain::{ChainController, ChainService}; +use ckb_chain::{start_chain_services, ChainController}; use ckb_chain_spec::consensus::{Consensus, ConsensusBuilder}; use ckb_error::assert_error_eq; use ckb_shared::{Shared, SharedBuilder}; @@ -83,8 +83,7 @@ fn start_chain(consensus: Option) -> (ChainController, Shared) { } let (shared, mut pack) = builder.build().unwrap(); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - let chain_controller = chain_service.start::<&str>(None); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); (chain_controller, shared) } @@ -230,7 +229,7 @@ fn test_proposal() { .collect(); let block = gen_block(&parent, vec![], proposal_ids, vec![]); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .unwrap(); parent = block.header(); @@ -249,7 +248,7 @@ fn test_proposal() { //test chain forward let new_block = gen_block(&parent, vec![], vec![], vec![]); chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .unwrap(); parent = new_block.header().to_owned(); } @@ -263,7 +262,7 @@ fn test_proposal() { //test chain forward let new_block = gen_block(&parent, vec![], vec![], vec![]); chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .unwrap(); parent = new_block.header().to_owned(); } @@ -311,7 +310,7 @@ fn test_uncle_proposal() { let uncle = gen_block(&parent, vec![], proposal_ids, vec![]); let block = gen_block(&parent, vec![], vec![], vec![uncle.as_uncle()]); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .unwrap(); parent = block.header(); @@ -326,7 +325,7 @@ fn test_uncle_proposal() { //test chain forward let new_block = gen_block(&parent, vec![], vec![], vec![]); chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .unwrap(); parent = new_block.header().to_owned(); } @@ -340,7 +339,7 @@ fn test_uncle_proposal() { //test chain forward let new_block = gen_block(&parent, vec![], vec![], vec![]); chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .unwrap(); parent = new_block.header().to_owned(); } diff --git a/verification/contextual/src/tests/uncle_verifier.rs b/verification/contextual/src/tests/uncle_verifier.rs index af12732084..0928abdee9 100644 --- a/verification/contextual/src/tests/uncle_verifier.rs +++ b/verification/contextual/src/tests/uncle_verifier.rs @@ -2,7 +2,7 @@ use crate::contextual_block_verifier::{UncleVerifierContext, VerifyContext}; use crate::uncles_verifier::UnclesVerifier; -use ckb_chain::chain::{ChainController, ChainService}; +use ckb_chain::{start_chain_services, ChainController}; use ckb_chain_spec::consensus::Consensus; use ckb_error::assert_error_eq; use ckb_shared::{Shared, SharedBuilder}; @@ -43,8 +43,8 @@ fn start_chain(consensus: Option) -> (ChainController, Shared) { } let (shared, mut pack) = builder.build().unwrap(); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - let chain_controller = chain_service.start::<&str>(None); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); + (chain_controller, shared) } @@ -88,7 +88,7 @@ fn prepare() -> (Shared, Vec, Vec) { .epoch(); let new_block = gen_block(&parent, random(), &epoch); chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); chain1.push(new_block.clone()); parent = new_block.header(); @@ -110,7 +110,7 @@ fn prepare() -> (Shared, Vec, Vec) { chain1[(i - 1) as usize].clone() }; chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); chain2.push(new_block.clone()); parent = new_block.header(); @@ -493,7 +493,7 @@ fn test_uncle_with_uncle_descendant() { for block in &chain2 { controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); } @@ -506,7 +506,7 @@ fn test_uncle_with_uncle_descendant() { .build(); controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); {