diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 24f83179f6d..624dc968ada 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -741,7 +741,7 @@ impl BeaconChain { /// /// - `slot` always increases by `1`. /// - Skipped slots contain the root of the closest prior - /// non-skipped slot (identical to the way they are stored in `state.block_roots`). + /// non-skipped slot (identical to the way they are stored in `state.block_roots`). /// - Iterator returns `(Hash256, Slot)`. /// /// Will return a `BlockOutOfRange` error if the requested start slot is before the period of @@ -805,7 +805,7 @@ impl BeaconChain { /// /// - `slot` always decreases by `1`. /// - Skipped slots contain the root of the closest prior - /// non-skipped slot (identical to the way they are stored in `state.block_roots`) . + /// non-skipped slot (identical to the way they are stored in `state.block_roots`) . /// - Iterator returns `(Hash256, Slot)`. /// - The provided `block_root` is included as the first item in the iterator. pub fn rev_iter_block_roots_from( @@ -834,7 +834,7 @@ impl BeaconChain { /// - `slot` always decreases by `1`. /// - Iterator returns `(Hash256, Slot)`. /// - As this iterator starts at the `head` of the chain (viz., the best block), the first slot - /// returned may be earlier than the wall-clock slot. + /// returned may be earlier than the wall-clock slot. pub fn rev_iter_state_roots_from<'a>( &'a self, state_root: Hash256, diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index d10bbfbbc5f..567433caee1 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -178,7 +178,7 @@ pub fn compute_proposer_duties_from_head( /// - Returns an error if `state.current_epoch() > target_epoch`. /// - No-op if `state.current_epoch() == target_epoch`. /// - It must be the case that `state.canonical_root() == state_root`, but this function will not -/// check that. +/// check that. pub fn ensure_state_is_in_epoch( state: &mut BeaconState, state_root: Hash256, diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 4a5282a1d74..48caea9c7ff 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -5,7 +5,7 @@ //! - Verification for gossip blocks (i.e., should we gossip some block from the network). //! - Verification for normal blocks (e.g., some block received on the RPC during a parent lookup). //! - Verification for chain segments (e.g., some chain of blocks received on the RPC during a -//! sync). +//! sync). //! //! The primary source of complexity here is that we wish to avoid doing duplicate work as a block //! moves through the verification process. For example, if some block is verified for gossip, we diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index c94ea0e9414..b62554f1b4d 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -33,7 +33,7 @@ pub struct CacheItem { /// /// - Produce an attestation without using `chain.canonical_head`. /// - Verify that a block root exists (i.e., will be imported in the future) during attestation -/// verification. +/// verification. /// - Provide a block which can be sent to peers via RPC. #[derive(Default)] pub struct EarlyAttesterCache { diff --git a/beacon_node/beacon_chain/src/eth1_finalization_cache.rs b/beacon_node/beacon_chain/src/eth1_finalization_cache.rs index 24b6542eabc..8280d156751 100644 --- a/beacon_node/beacon_chain/src/eth1_finalization_cache.rs +++ b/beacon_node/beacon_chain/src/eth1_finalization_cache.rs @@ -469,7 +469,7 @@ pub mod tests { let last_finalized_eth1 = eth1s_by_count .range(0..(finalized_deposits + 1)) .map(|(_, eth1)| eth1) - .last() + .next_back() .cloned(); assert_eq!( eth1cache.finalize(finalized_checkpoint), diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 01b790bb25b..d41c33176ae 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1283,7 +1283,7 @@ impl InvalidHeadSetup { /// /// 1. A chain where the only viable head block has an invalid execution payload. /// 2. A block (`fork_block`) which will become the head of the chain when - /// it is imported. + /// it is imported. async fn new() -> InvalidHeadSetup { let slots_per_epoch = E::slots_per_epoch(); let mut rig = InvalidPayloadRig::new().enable_attestations(); diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index cde6cc6f486..820ec8d6b67 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1710,7 +1710,7 @@ impl ExecutionLayer { /// /// - `Some(true)` if the given `block_hash` is the terminal proof-of-work block. /// - `Some(false)` if the given `block_hash` is certainly *not* the terminal proof-of-work - /// block. + /// block. /// - `None` if the `block_hash` or its parent were not present on the execution engine. /// - `Err(_)` if there was an error connecting to the execution engine. /// diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 6067d52042f..baeb5976768 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -992,23 +992,23 @@ impl PeerManager { /// - Do not prune outbound peers to exceed our outbound target. /// - Do not prune more peers than our target peer count. /// - If we have an option to remove a number of peers, remove ones that have the least - /// long-lived subnets. + /// long-lived subnets. /// - When pruning peers based on subnet count. If multiple peers can be chosen, choose a peer - /// that is not subscribed to a long-lived sync committee subnet. + /// that is not subscribed to a long-lived sync committee subnet. /// - When pruning peers based on subnet count, do not prune a peer that would lower us below the - /// MIN_SYNC_COMMITTEE_PEERS peer count. To keep it simple, we favour a minimum number of sync-committee-peers over - /// uniformity subnet peers. NOTE: We could apply more sophisticated logic, but the code is - /// simpler and easier to maintain if we take this approach. If we are pruning subnet peers - /// below the MIN_SYNC_COMMITTEE_PEERS and maintaining the sync committee peers, this should be - /// fine as subnet peers are more likely to be found than sync-committee-peers. Also, we're - /// in a bit of trouble anyway if we have so few peers on subnets. The - /// MIN_SYNC_COMMITTEE_PEERS - /// number should be set low as an absolute lower bound to maintain peers on the sync - /// committees. + /// MIN_SYNC_COMMITTEE_PEERS peer count. To keep it simple, we favour a minimum number of sync-committee-peers over + /// uniformity subnet peers. NOTE: We could apply more sophisticated logic, but the code is + /// simpler and easier to maintain if we take this approach. If we are pruning subnet peers + /// below the MIN_SYNC_COMMITTEE_PEERS and maintaining the sync committee peers, this should be + /// fine as subnet peers are more likely to be found than sync-committee-peers. Also, we're + /// in a bit of trouble anyway if we have so few peers on subnets. The + /// MIN_SYNC_COMMITTEE_PEERS + /// number should be set low as an absolute lower bound to maintain peers on the sync + /// committees. /// - Do not prune trusted peers. NOTE: This means if a user has more trusted peers than the - /// excess peer limit, all of the following logic is subverted as we will not prune any peers. - /// Also, the more trusted peers a user has, the less room Lighthouse has to efficiently manage - /// its peers across the subnets. + /// excess peer limit, all of the following logic is subverted as we will not prune any peers. + /// Also, the more trusted peers a user has, the less room Lighthouse has to efficiently manage + /// its peers across the subnets. /// /// Prune peers in the following order: /// 1. Remove worst scoring peers diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index b6926399115..0912bd1cd24 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -155,7 +155,7 @@ impl PeerDB { matches!( self.connection_status(peer_id), Some(PeerConnectionStatus::Disconnected { .. }) - | Some(PeerConnectionStatus::Unknown { .. }) + | Some(PeerConnectionStatus::Unknown) | None ) && !self.score_state_banned_or_disconnected(peer_id) } @@ -776,8 +776,8 @@ impl PeerDB { NewConnectionState::Connected { .. } // We have established a new connection (peer may not have been seen before) | NewConnectionState::Disconnecting { .. }// We are disconnecting from a peer that may not have been registered before | NewConnectionState::Dialing { .. } // We are dialing a potentially new peer - | NewConnectionState::Disconnected { .. } // Dialing a peer that responds by a different ID can be immediately - // disconnected without having being stored in the db before + | NewConnectionState::Disconnected // Dialing a peer that responds by a different ID can be immediately + // disconnected without having being stored in the db before ) { warn!(log_ref, "Updating state of unknown peer"; "peer_id" => %peer_id, "new_state" => ?new_state); diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 2bf35b0e35e..838f1b8a161 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -1009,7 +1009,7 @@ mod tests { let mut block: BeaconBlockBellatrix<_, FullPayload> = BeaconBlockBellatrix::empty(&Spec::default_spec()); let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat(tx).take(5000).collect::>()); + let txs = VariableList::from(std::iter::repeat_n(tx, 5000).collect::>()); block.body.execution_payload.execution_payload.transactions = txs; @@ -1028,7 +1028,7 @@ mod tests { let mut block: BeaconBlockBellatrix<_, FullPayload> = BeaconBlockBellatrix::empty(&Spec::default_spec()); let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat(tx).take(100000).collect::>()); + let txs = VariableList::from(std::iter::repeat_n(tx, 100000).collect::>()); block.body.execution_payload.execution_payload.transactions = txs; diff --git a/beacon_node/lighthouse_network/src/types/sync_state.rs b/beacon_node/lighthouse_network/src/types/sync_state.rs index 0519d6f4b04..0327f7073fa 100644 --- a/beacon_node/lighthouse_network/src/types/sync_state.rs +++ b/beacon_node/lighthouse_network/src/types/sync_state.rs @@ -104,8 +104,8 @@ impl std::fmt::Display for SyncState { match self { SyncState::SyncingFinalized { .. } => write!(f, "Syncing Finalized Chain"), SyncState::SyncingHead { .. } => write!(f, "Syncing Head Chain"), - SyncState::Synced { .. } => write!(f, "Synced"), - SyncState::Stalled { .. } => write!(f, "Stalled"), + SyncState::Synced => write!(f, "Synced"), + SyncState::Stalled => write!(f, "Stalled"), SyncState::SyncTransition => write!(f, "Evaluating known peers"), SyncState::BackFillSyncing { .. } => write!(f, "Syncing Historical Blocks"), } diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 4b54a24ddc8..80364753d70 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -25,7 +25,7 @@ type E = MinimalEthSpec; fn bellatrix_block_small(fork_context: &ForkContext, spec: &ChainSpec) -> BeaconBlock { let mut block = BeaconBlockBellatrix::::empty(spec); let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat(tx).take(5000).collect::>()); + let txs = VariableList::from(std::iter::repeat_n(tx, 5000).collect::>()); block.body.execution_payload.execution_payload.transactions = txs; @@ -40,7 +40,7 @@ fn bellatrix_block_small(fork_context: &ForkContext, spec: &ChainSpec) -> Beacon fn bellatrix_block_large(fork_context: &ForkContext, spec: &ChainSpec) -> BeaconBlock { let mut block = BeaconBlockBellatrix::::empty(spec); let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat(tx).take(100000).collect::>()); + let txs = VariableList::from(std::iter::repeat_n(tx, 100000).collect::>()); block.body.execution_payload.execution_payload.transactions = txs; diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 0956c153a68..af75791e4d7 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -841,7 +841,7 @@ impl NetworkBeaconProcessor { | GossipDataColumnError::ProposerIndexMismatch { .. } | GossipDataColumnError::IsNotLaterThanParent { .. } | GossipDataColumnError::InvalidSubnetId { .. } - | GossipDataColumnError::InvalidInclusionProof { .. } + | GossipDataColumnError::InvalidInclusionProof | GossipDataColumnError::InvalidKzgProof { .. } | GossipDataColumnError::UnexpectedDataColumn | GossipDataColumnError::InvalidColumnIndex(_) diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index fc31e837277..041b1dba9f5 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -688,7 +688,7 @@ impl SyncManager { if new_state.is_synced() && !matches!( old_state, - SyncState::Synced { .. } | SyncState::BackFillSyncing { .. } + SyncState::Synced | SyncState::BackFillSyncing { .. } ) { self.network.subscribe_core_topics(); diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 9ab581950c9..271b2322faf 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -1301,7 +1301,7 @@ impl TestRig { .sync_manager .get_sampling_request_status(block_root, index) .unwrap_or_else(|| panic!("No request state for {index}")); - if !matches!(status, crate::sync::peer_sampling::Status::NoPeers { .. }) { + if !matches!(status, crate::sync::peer_sampling::Status::NoPeers) { panic!("expected {block_root} {index} request to be no peers: {status:?}"); } } diff --git a/common/eth2_wallet_manager/src/locked_wallet.rs b/common/eth2_wallet_manager/src/locked_wallet.rs index a77f9bd7804..2af863a4bfd 100644 --- a/common/eth2_wallet_manager/src/locked_wallet.rs +++ b/common/eth2_wallet_manager/src/locked_wallet.rs @@ -22,7 +22,7 @@ pub const LOCK_FILE: &str = ".lock"; /// /// - Control over the `.lock` file to prevent concurrent access. /// - A `next_validator` function which wraps `Wallet::next_validator`, ensuring that the wallet is -/// persisted to disk (as JSON) between each consecutive call. +/// persisted to disk (as JSON) between each consecutive call. pub struct LockedWallet { wallet_dir: PathBuf, wallet: Wallet, diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 5d0bee4c853..cf6ebb3b004 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -760,7 +760,7 @@ impl ProtoArray { /// /// - The child is already the best child but it's now invalid due to a FFG change and should be removed. /// - The child is already the best child and the parent is updated with the new - /// best-descendant. + /// best-descendant. /// - The child is not the best child but becomes the best child. /// - The child is not the best child and does not become the best child. fn maybe_update_best_child_and_descendant( diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 88d46603117..4da632bf580 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -1121,7 +1121,7 @@ mod test_compute_deltas { /// /// - `A` (slot 31) is the common descendant. /// - `B` (slot 33) descends from `A`, but there is a single skip slot - /// between it and `A`. + /// between it and `A`. /// - `C` (slot 32) descends from `A` and conflicts with `B`. /// /// Imagine that the `B` chain is finalized at epoch 1. This means that the diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee_contribution.rs index 9bae770fe52..090e16fc6d4 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee_contribution.rs @@ -41,8 +41,8 @@ impl SyncCommitteeContribution { /// /// - `message`: A single `SyncCommitteeMessage`. /// - `subcommittee_index`: The subcommittee this contribution pertains to out of the broader - /// sync committee. This can be determined from the `SyncSubnetId` of the gossip subnet - /// this message was seen on. + /// sync committee. This can be determined from the `SyncSubnetId` of the gossip subnet + /// this message was seen on. /// - `validator_sync_committee_index`: The index of the validator **within** the subcommittee. pub fn from_message( message: &SyncCommitteeMessage, diff --git a/consensus/types/src/test_utils/test_random/bitfield.rs b/consensus/types/src/test_utils/test_random/bitfield.rs index 35176d389d0..e335ac7fe8b 100644 --- a/consensus/types/src/test_utils/test_random/bitfield.rs +++ b/consensus/types/src/test_utils/test_random/bitfield.rs @@ -3,7 +3,7 @@ use smallvec::smallvec; impl TestRandom for BitList { fn random_for_test(rng: &mut impl RngCore) -> Self { - let initial_len = std::cmp::max(1, (N::to_usize() + 7) / 8); + let initial_len = std::cmp::max(1, N::to_usize().div_ceil(8)); let mut raw_bytes = smallvec![0; initial_len]; rng.fill_bytes(&mut raw_bytes); @@ -24,7 +24,7 @@ impl TestRandom for BitList { impl TestRandom for BitVector { fn random_for_test(rng: &mut impl RngCore) -> Self { - let mut raw_bytes = smallvec![0; std::cmp::max(1, (N::to_usize() + 7) / 8)]; + let mut raw_bytes = smallvec![0; std::cmp::max(1, N::to_usize().div_ceil(8))]; rng.fill_bytes(&mut raw_bytes); // If N isn't divisible by 8 // zero out bits greater than N diff --git a/crypto/bls/src/lib.rs b/crypto/bls/src/lib.rs index 6ea85548c0d..d05b34f9891 100644 --- a/crypto/bls/src/lib.rs +++ b/crypto/bls/src/lib.rs @@ -10,7 +10,7 @@ //! //! - `supranational`: the pure-assembly, highly optimized version from the `blst` crate. //! - `fake_crypto`: an always-returns-valid implementation that is only useful for testing -//! scenarios which intend to *ignore* real cryptography. +//! scenarios which intend to *ignore* real cryptography. //! //! This crate uses traits to reduce code-duplication between the two implementations. For example, //! the `GenericPublicKey` struct exported from this crate is generic across the `TPublicKey` trait diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index 4a202ee3d2d..31662e831a4 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -84,11 +84,11 @@ pub use transition::TransitionTest; /// /// The feature tests can be run with one of the following methods: /// 1. `handler.run_for_feature(feature_name)` for new tests that are not on existing fork, i.e. a -/// new handler. This will be temporary and the test will need to be updated to use -/// `handle.run()` once the feature is incorporated into a fork. +/// new handler. This will be temporary and the test will need to be updated to use +/// `handle.run()` once the feature is incorporated into a fork. /// 2. `handler.run()` for tests that are already on existing forks, but with new test vectors for -/// the feature. In this case the `handler.is_enabled_for_feature` will need to be implemented -/// to return `true` for the feature in order for the feature test vector to be tested. +/// the feature. In this case the `handler.is_enabled_for_feature` will need to be implemented +/// to return `true` for the feature in order for the feature test vector to be tested. #[derive(Debug, PartialEq, Clone, Copy)] pub enum FeatureName { // TODO(fulu): to be removed once we start using Fulu types for test vectors. diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 05804d7e366..c3835f425e1 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -143,7 +143,7 @@ impl LoadCase for ForkChoiceTest { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { let description = path .iter() - .last() + .next_back() .expect("path must be non-empty") .to_str() .expect("path must be valid OsStr") diff --git a/validator_client/http_api/src/tests/keystores.rs b/validator_client/http_api/src/tests/keystores.rs index 6559a2bb9e5..13494e5fa69 100644 --- a/validator_client/http_api/src/tests/keystores.rs +++ b/validator_client/http_api/src/tests/keystores.rs @@ -92,7 +92,7 @@ fn keystore_pubkey(keystore: &Keystore) -> PublicKeyBytes { } fn all_with_status(count: usize, status: T) -> impl Iterator { - std::iter::repeat(status).take(count) + std::iter::repeat_n(status, count) } fn all_imported(count: usize) -> impl Iterator { @@ -1059,7 +1059,7 @@ async fn migrate_some_extra_slashing_protection() { /// - `first_vc_attestations`: attestations to sign on the first VC as `(validator_idx, att)` /// - `delete_indices`: validators to delete from the first VC /// - `slashing_protection_indices`: validators to transfer slashing protection data for. It should -/// be a subset of `delete_indices` or the test will panic. +/// be a subset of `delete_indices` or the test will panic. /// - `import_indices`: validators to transfer. It needn't be a subset of `delete_indices`. /// - `second_vc_attestations`: attestations to sign on the second VC after the transfer. The bool /// indicates whether the signing should be successful. diff --git a/validator_client/validator_store/src/lib.rs b/validator_client/validator_store/src/lib.rs index 5bd9ffd8b2f..51140003257 100644 --- a/validator_client/validator_store/src/lib.rs +++ b/validator_client/validator_store/src/lib.rs @@ -265,9 +265,9 @@ impl ValidatorStore { /// are two primary functions used here: /// /// - `DoppelgangerStatus::only_safe`: only returns pubkeys which have passed doppelganger - /// protection and are safe-enough to sign messages. + /// protection and are safe-enough to sign messages. /// - `DoppelgangerStatus::ignored`: returns all the pubkeys from `only_safe` *plus* those still - /// undergoing protection. This is useful for collecting duties or other non-signing tasks. + /// undergoing protection. This is useful for collecting duties or other non-signing tasks. #[allow(clippy::needless_collect)] // Collect is required to avoid holding a lock. pub fn voting_pubkeys(&self, filter_func: F) -> I where