Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ rust.missing_debug_implementations = "warn"
rust.missing_docs = "warn"
rust.unreachable_pub = "warn"
rust.unused_must_use = "deny"
rust.rust_2018_idioms = "deny"
rust.rust_2018_idioms = { level = "deny", priority = -1 }
rustdoc.all = "warn"

[workspace.lints.clippy]
Expand Down
6 changes: 3 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -413,9 +413,9 @@ fix-lint-other-targets:
-- -D warnings

fix-lint:
make lint-reth && \
make lint-op-reth && \
make lint-other-targets && \
make fix-lint-reth && \
make fix-lint-op-reth && \
make fix-lint-other-targets && \
make fmt

.PHONY: rustdocs
Expand Down
2 changes: 1 addition & 1 deletion bin/reth/src/commands/db/stats.rs
Original file line number Diff line number Diff line change
Expand Up @@ -377,7 +377,7 @@ impl Command {
let max_widths = table.column_max_content_widths();
let mut separator = Row::new();
for width in max_widths {
separator.add_cell(Cell::new(&"-".repeat(width as usize)));
separator.add_cell(Cell::new("-".repeat(width as usize)));
}
table.add_row(separator);

Expand Down
2 changes: 1 addition & 1 deletion crates/blockchain-tree/src/blockchain_tree.rs
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ where
*key_value.0
} else {
debug!(target: "blockchain_tree", ?chain_id, "No blockhashes stored");
return None;
return None
};
let canonical_chain = canonical_chain
.iter()
Expand Down
8 changes: 4 additions & 4 deletions crates/consensus/beacon/src/engine/hooks/prune.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,10 +78,10 @@ impl<DB: Database + 'static> PruneHook<DB> {

/// This will try to spawn the pruner if it is idle:
/// 1. Check if pruning is needed through [Pruner::is_pruning_needed].
/// 2.
/// 1. If pruning is needed, pass tip block number to the [Pruner::run] and spawn it in a
/// separate task. Set pruner state to [PrunerState::Running].
/// 2. If pruning is not needed, set pruner state back to [PrunerState::Idle].
///
/// 2.1. If pruning is needed, pass tip block number to the [Pruner::run] and spawn it in a
/// separate task. Set pruner state to [PrunerState::Running].
/// 2.2. If pruning is not needed, set pruner state back to [PrunerState::Idle].
///
/// If pruner is already running, do nothing.
fn try_spawn_pruner(&mut self, tip_block_number: BlockNumber) -> Option<EngineHookEvent> {
Expand Down
14 changes: 7 additions & 7 deletions crates/consensus/beacon/src/engine/hooks/static_file.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,13 +71,13 @@ impl<DB: Database + 'static> StaticFileHook<DB> {
/// 1. Check if producing static files is needed through
/// [StaticFileProducer::get_static_file_targets](reth_static_file::StaticFileProducerInner::get_static_file_targets)
/// and then [StaticFileTargets::any](reth_static_file::StaticFileTargets::any).
/// 2.
/// 1. If producing static files is needed, pass static file request to the
/// [StaticFileProducer::run](reth_static_file::StaticFileProducerInner::run) and spawn
/// it in a separate task. Set static file producer state to
/// [StaticFileProducerState::Running].
/// 2. If producing static files is not needed, set static file producer state back to
/// [StaticFileProducerState::Idle].
///
/// 2.1. If producing static files is needed, pass static file request to the
/// [StaticFileProducer::run](reth_static_file::StaticFileProducerInner::run) and
/// spawn it in a separate task. Set static file producer state to
/// [StaticFileProducerState::Running].
/// 2.2. If producing static files is not needed, set static file producer state back to
/// [StaticFileProducerState::Idle].
///
/// If static_file_producer is already running, do nothing.
fn try_spawn_static_file_producer(
Expand Down
10 changes: 5 additions & 5 deletions crates/consensus/beacon/src/engine/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -703,13 +703,13 @@ where
/// If validation fails, the response MUST contain the latest valid hash:
///
/// - The block hash of the ancestor of the invalid payload satisfying the following two
/// conditions:
/// conditions:
/// - It is fully validated and deemed VALID
/// - Any other ancestor of the invalid payload with a higher blockNumber is INVALID
/// - 0x0000000000000000000000000000000000000000000000000000000000000000 if the above
/// conditions are satisfied by a PoW block.
/// conditions are satisfied by a PoW block.
/// - null if client software cannot determine the ancestor of the invalid payload satisfying
/// the above conditions.
/// the above conditions.
fn latest_valid_hash_for_invalid_payload(
&mut self,
parent_hash: B256,
Expand Down Expand Up @@ -1103,8 +1103,8 @@ where
/// - invalid extra data
/// - invalid transactions
/// - incorrect hash
/// - the versioned hashes passed with the payload do not exactly match transaction
/// versioned hashes
/// - the versioned hashes passed with the payload do not exactly match transaction versioned
/// hashes
/// - the block does not contain blob transactions if it is pre-cancun
///
/// This validates the following engine API rule:
Expand Down
2 changes: 1 addition & 1 deletion crates/exex/src/manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ pub struct ExExManagerMetrics {
/// The manager is responsible for:
///
/// - Receiving relevant events from the rest of the node, and sending these to the execution
/// extensions
/// extensions
/// - Backpressure
/// - Error handling
/// - Monitoring
Expand Down
1 change: 1 addition & 0 deletions crates/interfaces/src/blockchain_tree/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,7 @@ pub enum BlockStatus {
/// This is required to:
/// - differentiate whether trie state updates should be cached.
/// - inform other
///
/// This is required because the state root check can only be performed if the targeted block can be
/// traced back to the canonical __head__.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
Expand Down
4 changes: 2 additions & 2 deletions crates/net/ecies/src/algorithm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@ fn ecdh_x(public_key: &PublicKey, secret_key: &SecretKey) -> B256 {
/// # Panics
/// * If the `dest` is empty
/// * If the `dest` len is greater than or equal to the hash output len * the max counter value. In
/// this case, the hash output len is 32 bytes, and the max counter value is 2^32 - 1. So the dest
/// cannot have a len greater than 32 * 2^32 - 1.
/// this case, the hash output len is 32 bytes, and the max counter value is 2^32 - 1. So the dest
/// cannot have a len greater than 32 * 2^32 - 1.
fn kdf(secret: B256, s1: &[u8], dest: &mut [u8]) {
concat_kdf::derive_key_into::<Sha256>(secret.as_slice(), s1, dest).unwrap();
}
Expand Down
2 changes: 1 addition & 1 deletion crates/net/eth-wire-types/src/message.rs
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ impl From<EthBroadcastMessage> for ProtocolBroadcastMessage {
/// The ethereum wire protocol is a set of messages that are broadcast to the network in two
/// styles:
/// * A request message sent by a peer (such as [`GetPooledTransactions`]), and an associated
/// response message (such as [`PooledTransactions`]).
/// response message (such as [`PooledTransactions`]).
/// * A message that is broadcast to the network, without a corresponding request.
///
/// The newer `eth/66` is an efficiency upgrade on top of `eth/65`, introducing a request id to
Expand Down
2 changes: 1 addition & 1 deletion crates/node-core/src/args/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ pub enum SocketAddressParsingError {
/// The following formats are checked:
///
/// - If the value can be parsed as a `u16` or starts with `:` it is considered a port, and the
/// hostname is set to `localhost`.
/// hostname is set to `localhost`.
/// - If the value contains `:` it is assumed to be the format `<host>:<port>`
/// - Otherwise it is assumed to be a hostname
///
Expand Down
1 change: 1 addition & 0 deletions crates/node-core/src/dirs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -257,6 +257,7 @@ impl<D> From<PathBuf> for MaybePlatformPath<D> {
/// * mainnet: `<DIR>/mainnet`
/// * goerli: `<DIR>/goerli`
/// * sepolia: `<DIR>/sepolia`
///
/// Otherwise, the path will be dependent on the chain ID:
/// * `<DIR>/<CHAIN_ID>`
#[derive(Clone, Debug, PartialEq, Eq)]
Expand Down
4 changes: 2 additions & 2 deletions crates/payload/validator/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -84,8 +84,8 @@ impl ExecutionPayloadValidator {
/// - invalid extra data
/// - invalid transactions
/// - incorrect hash
/// - the versioned hashes passed with the payload do not exactly match transaction
/// versioned hashes
/// - the versioned hashes passed with the payload do not exactly match transaction versioned
/// hashes
/// - the block does not contain blob transactions if it is pre-cancun
///
/// The checks are done in the order that conforms with the engine-API specification.
Expand Down
4 changes: 2 additions & 2 deletions crates/primitives/src/prune/target.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,8 @@ impl PruneModes {
///
/// 1. For [PruneMode::Full], it fails if `MIN_BLOCKS > 0`.
/// 2. For [PruneMode::Distance(distance)], it fails if `distance < MIN_BLOCKS + 1`. `+ 1` is needed
/// because `PruneMode::Distance(0)` means that we leave zero blocks from the latest, meaning we
/// have one block in the database.
/// because `PruneMode::Distance(0)` means that we leave zero blocks from the latest, meaning we
/// have one block in the database.
fn deserialize_opt_prune_mode_with_min_blocks<'de, const MIN_BLOCKS: u64, D: Deserializer<'de>>(
deserializer: D,
) -> Result<Option<PruneMode>, D::Error> {
Expand Down
4 changes: 2 additions & 2 deletions crates/primitives/src/revm/env.rs
Original file line number Diff line number Diff line change
Expand Up @@ -138,8 +138,8 @@ pub fn tx_env_with_recovered(transaction: &TransactionSignedEcRecovered) -> TxEn
/// and therefore:
/// * the call must execute to completion
/// * the call does not count against the block’s gas limit
/// * the call does not follow the EIP-1559 burn semantics - no value should be transferred as
/// part of the call
/// * the call does not follow the EIP-1559 burn semantics - no value should be transferred as part
/// of the call
/// * if no code exists at `BEACON_ROOTS_ADDRESS`, the call must fail silently
pub fn fill_tx_env_with_beacon_root_contract_call(env: &mut Env, parent_beacon_block_root: B256) {
env.tx = TxEnv {
Expand Down
2 changes: 1 addition & 1 deletion crates/primitives/src/transaction/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1346,7 +1346,7 @@ impl TransactionSigned {
};

if !input_data.is_empty() {
return Err(RlpError::UnexpectedLength);
return Err(RlpError::UnexpectedLength)
}

Ok(output_data)
Expand Down
3 changes: 1 addition & 2 deletions crates/stages/src/stages/execution.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,7 @@ use tracing::*;
/// - [tables::BlockBodyIndices] get tx index to know what needs to be unwinded
/// - [tables::AccountsHistory] to remove change set and apply old values to
/// - [tables::PlainAccountState] [tables::StoragesHistory] to remove change set and apply old
/// values
/// to [tables::PlainStorageState]
/// values to [tables::PlainStorageState]
// false positive, we cannot derive it if !DB: Debug.
#[allow(missing_debug_implementations)]
pub struct ExecutionStage<E> {
Expand Down
9 changes: 4 additions & 5 deletions crates/stages/src/stages/hashing_account.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,11 +70,10 @@ impl Default for AccountHashingStage {
///
/// In order to check the "full hashing" mode of the stage you want to generate more
/// transitions than `AccountHashingStage.clean_threshold`. This requires:
/// 1. Creating enough blocks so there's enough transactions to generate
/// the required transition keys in the `BlockTransitionIndex` (which depends on the
/// `TxTransitionIndex` internally)
/// 2. Setting `blocks.len() > clean_threshold` so that there's enough diffs to actually
/// take the 2nd codepath
/// 1. Creating enough blocks so there's enough transactions to generate the required transition
/// keys in the `BlockTransitionIndex` (which depends on the `TxTransitionIndex` internally)
/// 2. Setting `blocks.len() > clean_threshold` so that there's enough diffs to actually take the
/// 2nd codepath
#[derive(Clone, Debug)]
pub struct SeedOpts {
/// The range of blocks to be generated
Expand Down
5 changes: 2 additions & 3 deletions crates/stages/src/stages/sender_recovery.rs
Original file line number Diff line number Diff line change
Expand Up @@ -507,10 +507,9 @@ mod tests {
/// # Panics
///
/// 1. If there are any entries in the [tables::TransactionSenders] table above a given
/// block number.
///
/// block number.
/// 2. If the is no requested block entry in the bodies table, but
/// [tables::TransactionSenders] is not empty.
/// [tables::TransactionSenders] is not empty.
fn ensure_no_senders_by_block(&self, block: BlockNumber) -> Result<(), TestRunnerError> {
let body_result = self
.db
Expand Down
5 changes: 2 additions & 3 deletions crates/stages/src/stages/tx_lookup.rs
Original file line number Diff line number Diff line change
Expand Up @@ -428,10 +428,9 @@ mod tests {
/// # Panics
///
/// 1. If there are any entries in the [tables::TransactionHashNumbers] table above a given
/// block number.
///
/// block number.
/// 2. If the is no requested block entry in the bodies table, but
/// [tables::TransactionHashNumbers] is not empty.
/// [tables::TransactionHashNumbers] is not empty.
fn ensure_no_hash_by_block(&self, number: BlockNumber) -> Result<(), TestRunnerError> {
let body_result = self
.db
Expand Down
3 changes: 1 addition & 2 deletions crates/storage/db/src/implementation/mdbx/cursor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -191,8 +191,7 @@ impl<K: TransactionKind, T: DupSort> DbDupCursorRO<T> for Cursor<K, T> {
/// - Some(key), Some(subkey): a `key` item whose data is >= than `subkey`
/// - Some(key), None: first item of a specified `key`
/// - None, Some(subkey): like first case, but in the first key
/// - None, None: first item in the table
/// of a DUPSORT table.
/// - None, None: first item in the table of a DUPSORT table.
fn walk_dup(
&mut self,
key: Option<T::Key>,
Expand Down
18 changes: 9 additions & 9 deletions crates/storage/provider/src/providers/database/provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -375,20 +375,20 @@ impl<TX: DbTxMut + DbTx> DatabaseProvider<TX> {
///
/// If UNWIND is false we will just read the state/blocks and return them.
///
/// 1. Iterate over the [BlockBodyIndices][tables::BlockBodyIndices] table to get all
/// the transaction ids.
/// 2. Iterate over the [StorageChangeSets][tables::StorageChangeSets] table
/// and the [AccountChangeSets][tables::AccountChangeSets] tables in reverse order to
/// reconstruct the changesets.
/// - In order to have both the old and new values in the changesets, we also access the
/// plain state tables.
/// 1. Iterate over the [BlockBodyIndices][tables::BlockBodyIndices] table to get all the
/// transaction ids.
/// 2. Iterate over the [StorageChangeSets][tables::StorageChangeSets] table and the
/// [AccountChangeSets][tables::AccountChangeSets] tables in reverse order to reconstruct
/// the changesets.
/// - In order to have both the old and new values in the changesets, we also access the
/// plain state tables.
/// 3. While iterating over the changeset tables, if we encounter a new account or storage slot,
/// we:
/// we:
/// 1. Take the old value from the changeset
/// 2. Take the new value from the plain state
/// 3. Save the old value to the local state
/// 4. While iterating over the changeset tables, if we encounter an account/storage slot we
/// have seen before we:
/// have seen before we:
/// 1. Take the old value from the changeset
/// 2. Take the new value from the local state
/// 3. Set the local state to the value in the changeset
Expand Down
2 changes: 1 addition & 1 deletion crates/tokio-util/src/event_stream.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ where
Poll::Ready(Some(Ok(item))) => return Poll::Ready(Some(item)),
Poll::Ready(Some(Err(e))) => {
warn!("BroadcastStream lagged: {e:?}");
continue;
continue
}
Poll::Ready(None) => return Poll::Ready(None),
Poll::Pending => return Poll::Pending,
Expand Down
16 changes: 8 additions & 8 deletions crates/transaction-pool/src/pool/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,16 +34,16 @@
//!
//! In essence the transaction pool is made of three separate sub-pools:
//!
//! - Pending Pool: Contains all transactions that are valid on the current state and satisfy
//! (3. a)(1): _No_ nonce gaps. A _pending_ transaction is considered _ready_ when it has the lowest
//! nonce of all transactions from the same sender. Once a _ready_ transaction with nonce `n` has
//! been executed, the next highest transaction from the same sender `n + 1` becomes ready.
//! - Pending Pool: Contains all transactions that are valid on the current state and satisfy (3.
//! a)(1): _No_ nonce gaps. A _pending_ transaction is considered _ready_ when it has the lowest
//! nonce of all transactions from the same sender. Once a _ready_ transaction with nonce `n` has
//! been executed, the next highest transaction from the same sender `n + 1` becomes ready.
//!
//! - Queued Pool: Contains all transactions that are currently blocked by missing
//! transactions: (3. a)(2): _With_ nonce gaps or due to lack of funds.
//! - Queued Pool: Contains all transactions that are currently blocked by missing transactions:
//! (3. a)(2): _With_ nonce gaps or due to lack of funds.
//!
//! - Basefee Pool: To account for the dynamic base fee requirement (3. b) which could render
//! an EIP-1559 and all subsequent transactions of the sender currently invalid.
//! - Basefee Pool: To account for the dynamic base fee requirement (3. b) which could render an
//! EIP-1559 and all subsequent transactions of the sender currently invalid.
//!
//! The classification of transactions is always dependent on the current state that is changed as
//! soon as a new block is mined. Once a new block is mined, the account changeset must be applied
Expand Down
6 changes: 3 additions & 3 deletions crates/transaction-pool/src/pool/pending.rs
Original file line number Diff line number Diff line change
Expand Up @@ -90,9 +90,9 @@ impl<T: TransactionOrdering> PendingPool<T> {
/// Returns an iterator over all transactions that are _currently_ ready.
///
/// 1. The iterator _always_ returns transaction in order: It never returns a transaction with
/// an unsatisfied dependency and only returns them if dependency transaction were yielded
/// previously. In other words: The nonces of transactions with the same sender will _always_
/// increase by exactly 1.
/// an unsatisfied dependency and only returns them if dependency transaction were yielded
/// previously. In other words: The nonces of transactions with the same sender will _always_
/// increase by exactly 1.
///
/// The order of transactions which satisfy (1.) is determent by their computed priority: A
/// transaction with a higher priority is returned before a transaction with a lower priority.
Expand Down
1 change: 1 addition & 0 deletions crates/transaction-pool/src/pool/txpool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1002,6 +1002,7 @@ impl<T: PoolTransaction> AllTransactions<T> {
/// For all transactions:
/// - decreased basefee: promotes from `basefee` to `pending` sub-pool.
/// - increased basefee: demotes from `pending` to `basefee` sub-pool.
///
/// Individually:
/// - decreased sender allowance: demote from (`basefee`|`pending`) to `queued`.
/// - increased sender allowance: promote from `queued` to
Expand Down
8 changes: 2 additions & 6 deletions examples/node-event-hooks/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,8 @@
//! ```
//!
//! This launch the regular reth node and also print:
//!
//! > "All components initialized"
//! once all components have been initialized and
//!
//! > "Node started"
//! once the node has been started.
//! > "All components initialized" – once all components have been initialized
//! > "Node started" – once the node has been started.

use reth::cli::Cli;
use reth_node_ethereum::EthereumNode;
Expand Down