Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .tool-versions
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
rust 1.87.0
rust 1.90.0
# golang 1.23.2
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM rust:1.87 AS chef
FROM rust:1.90 AS chef

RUN apt-get update && apt-get install -y \
build-essential \
Expand Down
67 changes: 33 additions & 34 deletions crates/blockchain/blockchain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -275,23 +275,21 @@ impl Blockchain {
ChainError::WitnessGeneration("Failed to access account from trie".to_string())
})?;
// Get storage trie at before updates
if !acc_keys.is_empty() {
if let Ok(Some(storage_trie)) = self.storage.storage_trie(parent_hash, *account)
{
let (storage_trie_witness, storage_trie) =
TrieLogger::open_trie(storage_trie);
// Access all the keys
for storage_key in acc_keys {
let hashed_key = hash_key(storage_key);
storage_trie.get(&hashed_key).map_err(|_e| {
ChainError::WitnessGeneration(
"Failed to access storage key".to_string(),
)
})?;
}
// Store the tries to reuse when applying account updates
used_storage_tries.insert(*account, (storage_trie_witness, storage_trie));
if !acc_keys.is_empty()
&& let Ok(Some(storage_trie)) = self.storage.storage_trie(parent_hash, *account)
{
let (storage_trie_witness, storage_trie) = TrieLogger::open_trie(storage_trie);
// Access all the keys
for storage_key in acc_keys {
let hashed_key = hash_key(storage_key);
storage_trie.get(&hashed_key).map_err(|_e| {
ChainError::WitnessGeneration(
"Failed to access storage key".to_string(),
)
})?;
}
// Store the tries to reuse when applying account updates
used_storage_tries.insert(*account, (storage_trie_witness, storage_trie));
}
}
// Store all the accessed evm bytecodes
Expand Down Expand Up @@ -343,10 +341,10 @@ impl Blockchain {
let state_trie_witness = std::mem::take(&mut *state_trie_witness);
used_trie_nodes.extend_from_slice(&Vec::from_iter(state_trie_witness.into_iter()));
// If the witness is empty at least try to store the root
if used_trie_nodes.is_empty() {
if let Some(root) = root_node {
used_trie_nodes.push(root.encode_raw());
}
if used_trie_nodes.is_empty()
&& let Some(root) = root_node
{
used_trie_nodes.push(root.encode_raw());
}

let mut needed_block_numbers = block_hashes.keys().collect::<Vec<_>>();
Expand All @@ -361,10 +359,10 @@ impl Blockchain {
.saturating_sub(1);
// The first block number we need is either the parent of the first block number or the earliest block number used by BLOCKHASH
let mut first_needed_block_number = first_block_header.number.saturating_sub(1);
if let Some(block_number_from_logger) = needed_block_numbers.first() {
if **block_number_from_logger < first_needed_block_number {
first_needed_block_number = **block_number_from_logger;
}
if let Some(block_number_from_logger) = needed_block_numbers.first()
&& **block_number_from_logger < first_needed_block_number
{
first_needed_block_number = **block_number_from_logger;
}
let mut block_headers_bytes = Vec::new();
for block_number in first_needed_block_number..=last_needed_block_number {
Expand Down Expand Up @@ -851,10 +849,11 @@ impl Blockchain {
// If it exists check if the new tx has higher fees
let tx_to_replace_hash = self.mempool.find_tx_to_replace(sender, nonce, tx)?;

if let Some(chain_id) = tx.chain_id() {
if chain_id != config.chain_id {
return Err(MempoolError::InvalidChainId(config.chain_id));
}
if tx
.chain_id()
.is_some_and(|chain_id| chain_id != config.chain_id)
{
return Err(MempoolError::InvalidChainId(config.chain_id));
}

Ok(tx_to_replace_hash)
Expand Down Expand Up @@ -1072,12 +1071,12 @@ pub fn validate_gas_used(
receipts: &[Receipt],
block_header: &BlockHeader,
) -> Result<(), ChainError> {
if let Some(last) = receipts.last() {
if last.cumulative_gas_used != block_header.gas_used {
return Err(ChainError::InvalidBlock(
InvalidBlockError::GasUsedMismatch(last.cumulative_gas_used, block_header.gas_used),
));
}
if let Some(last) = receipts.last()
&& last.cumulative_gas_used != block_header.gas_used
{
return Err(ChainError::InvalidBlock(
InvalidBlockError::GasUsedMismatch(last.cumulative_gas_used, block_header.gas_used),
));
}
Ok(())
}
Expand Down
30 changes: 14 additions & 16 deletions crates/blockchain/fork_choice.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,30 +72,28 @@ pub async fn apply_fork_choice(
};

// Check that finalized and safe blocks are part of the new canonical chain.
if let Some(ref finalized) = finalized_res {
if !((is_canonical(store, finalized.number, finalized_hash).await?
if let Some(ref finalized) = finalized_res
&& !((is_canonical(store, finalized.number, finalized_hash).await?
&& finalized.number <= link_block_number)
|| (finalized.number == head.number && finalized_hash == head_hash)
|| new_canonical_blocks.contains(&(finalized.number, finalized_hash)))
{
return Err(InvalidForkChoice::Disconnected(
error::ForkChoiceElement::Head,
error::ForkChoiceElement::Finalized,
));
};
{
return Err(InvalidForkChoice::Disconnected(
error::ForkChoiceElement::Head,
error::ForkChoiceElement::Finalized,
));
}

if let Some(ref safe) = safe_res {
if !((is_canonical(store, safe.number, safe_hash).await?
if let Some(ref safe) = safe_res
&& !((is_canonical(store, safe.number, safe_hash).await?
&& safe.number <= link_block_number)
|| (safe.number == head.number && safe_hash == head_hash)
|| new_canonical_blocks.contains(&(safe.number, safe_hash)))
{
return Err(InvalidForkChoice::Disconnected(
error::ForkChoiceElement::Head,
error::ForkChoiceElement::Safe,
));
};
{
return Err(InvalidForkChoice::Disconnected(
error::ForkChoiceElement::Head,
error::ForkChoiceElement::Safe,
));
}

// Finished all validations.
Expand Down
9 changes: 5 additions & 4 deletions crates/blockchain/mempool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -200,10 +200,11 @@ impl Mempool {
}

// Filter by blob gas fee
if let (true, Some(blob_fee)) = (is_blob_tx, filter.blob_fee) {
if tx.max_fee_per_blob_gas().is_none_or(|fee| fee < blob_fee) {
return false;
}
if is_blob_tx
&& let Some(blob_fee) = filter.blob_fee
&& tx.max_fee_per_blob_gas().is_none_or(|fee| fee < blob_fee)
{
return false;
}
true
};
Expand Down
22 changes: 11 additions & 11 deletions crates/blockchain/metrics/metrics_process.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,18 +42,18 @@ impl MetricsProcess {
})?;
}

if let Some(path) = DATADIR_PATH.get() {
if let Ok(size) = directory_size(path) {
let gauge = IntGauge::new(
"datadir_size_bytes",
"Total size in bytes consumed by the configured datadir.",
)
if let Some(path) = DATADIR_PATH.get()
&& let Ok(size) = directory_size(path)
{
let gauge = IntGauge::new(
"datadir_size_bytes",
"Total size in bytes consumed by the configured datadir.",
)
.map_err(|e| MetricsError::PrometheusErr(e.to_string()))?;
let clamped = size.min(i64::MAX as u64);
gauge.set(clamped as i64);
r.register(Box::new(gauge))
.map_err(|e| MetricsError::PrometheusErr(e.to_string()))?;
let clamped = size.min(i64::MAX as u64);
gauge.set(clamped as i64);
r.register(Box::new(gauge))
.map_err(|e| MetricsError::PrometheusErr(e.to_string()))?;
}
}

let encoder = TextEncoder::new();
Expand Down
18 changes: 9 additions & 9 deletions crates/blockchain/metrics/profiling.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,16 +32,16 @@ where
S: Subscriber + for<'a> LookupSpan<'a>,
{
fn on_enter(&self, id: &Id, ctx: Context<'_, S>) {
if let Some(span) = ctx.span(id) {
if span.metadata().target().starts_with("ethrex") {
let name = span.metadata().name();
if let Some(span) = ctx.span(id)
&& span.metadata().target().starts_with("ethrex")
{
let name = span.metadata().name();

let timer = METRICS_BLOCK_PROCESSING_PROFILE
.with_label_values(&[name])
.start_timer();
let mut timers = self.function_timers.lock().unwrap();
timers.insert(id.clone(), timer);
}
let timer = METRICS_BLOCK_PROCESSING_PROFILE
.with_label_values(&[name])
.start_timer();
let mut timers = self.function_timers.lock().unwrap();
timers.insert(id.clone(), timer);
}
}

Expand Down
10 changes: 5 additions & 5 deletions crates/common/trie/logger.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,11 @@ impl TrieLogger {
impl TrieDB for TrieLogger {
fn get(&self, key: NodeHash) -> Result<Option<Vec<u8>>, TrieError> {
let result = self.inner_db.get(key)?;
if let Some(result) = result.as_ref() {
if let Ok(decoded) = Node::decode(result) {
let mut lock = self.witness.lock().map_err(|_| TrieError::LockError)?;
lock.insert(decoded.encode_raw());
};
if let Some(result) = result.as_ref()
&& let Ok(decoded) = Node::decode(result)
{
let mut lock = self.witness.lock().map_err(|_| TrieError::LockError)?;
lock.insert(decoded.encode_raw());
}
Ok(result)
}
Expand Down
10 changes: 5 additions & 5 deletions crates/common/types/genesis.rs
Original file line number Diff line number Diff line change
Expand Up @@ -631,11 +631,11 @@ impl Genesis {
let mut blob_gas_used: Option<u64> = None;
let mut excess_blob_gas: Option<u64> = None;

if let Some(cancun_time) = self.config.cancun_time {
if cancun_time <= self.timestamp {
blob_gas_used = Some(self.blob_gas_used.unwrap_or(0));
excess_blob_gas = Some(self.excess_blob_gas.unwrap_or(0));
}
if let Some(cancun_time) = self.config.cancun_time
&& cancun_time <= self.timestamp
{
blob_gas_used = Some(self.blob_gas_used.unwrap_or(0));
excess_blob_gas = Some(self.excess_blob_gas.unwrap_or(0));
}
let base_fee_per_gas = self.base_fee_per_gas.or_else(|| {
self.config
Expand Down
2 changes: 1 addition & 1 deletion crates/l2/sdk/src/calldata.rs
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ impl DataType {
.consume_u256()?
.try_into()
.map_err(|_| CalldataDecodeError::OutOfBounds)?;
let size = if n % 32 == 0 {
let size = if n.is_multiple_of(32) {
n
} else {
n.next_multiple_of(32)
Expand Down
25 changes: 12 additions & 13 deletions crates/l2/sdk/src/sdk.rs
Original file line number Diff line number Diff line change
Expand Up @@ -719,15 +719,14 @@ pub async fn send_tx_bump_gas_exponential_backoff(
}

// Check blob gas fees only for EIP4844 transactions
if let Some(tx_max_fee_per_blob_gas) = &mut tx.max_fee_per_blob_gas {
if let Some(max_fee_per_blob_gas) = client.maximum_allowed_max_fee_per_blob_gas {
if *tx_max_fee_per_blob_gas > U256::from(max_fee_per_blob_gas) {
*tx_max_fee_per_blob_gas = U256::from(max_fee_per_blob_gas);
warn!(
"max_fee_per_blob_gas exceeds the allowed limit, adjusting it to {max_fee_per_blob_gas}"
);
}
}
if let Some(tx_max_fee_per_blob_gas) = &mut tx.max_fee_per_blob_gas
&& let Some(max_fee_per_blob_gas) = client.maximum_allowed_max_fee_per_blob_gas
&& *tx_max_fee_per_blob_gas > U256::from(max_fee_per_blob_gas)
{
*tx_max_fee_per_blob_gas = U256::from(max_fee_per_blob_gas);
warn!(
"max_fee_per_blob_gas exceeds the allowed limit, adjusting it to {max_fee_per_blob_gas}"
);
}
let Ok(tx_hash) = send_generic_transaction(client, tx.clone(), signer)
.await
Expand Down Expand Up @@ -905,10 +904,10 @@ async fn priority_fee_from_override_or_rpc(
return Ok(priority_fee);
}

if let Ok(priority_fee) = client.get_max_priority_fee().await {
if let Ok(priority_fee_u64) = priority_fee.try_into() {
return Ok(priority_fee_u64);
}
if let Ok(priority_fee) = client.get_max_priority_fee().await
&& let Ok(priority_fee_u64) = priority_fee.try_into()
{
return Ok(priority_fee_u64);
}

get_fee_from_override_or_get_gas_price(client, None).await
Expand Down
30 changes: 15 additions & 15 deletions crates/l2/sequencer/block_producer/payload_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -178,13 +178,13 @@ pub async fn fill_transactions(
.get_account_info(latest_block_number, head_tx.tx.sender())
.await?;

if let Some(acc_info) = maybe_sender_acc_info {
if head_tx.nonce() < acc_info.nonce && !head_tx.is_privileged() {
debug!("Removing transaction with nonce too low from mempool: {tx_hash:#x}");
txs.pop();
blockchain.remove_transaction_from_pool(&tx_hash)?;
continue;
}
if maybe_sender_acc_info.is_some_and(|acc_info| head_tx.nonce() < acc_info.nonce)
&& !head_tx.is_privileged()
{
debug!("Removing transaction with nonce too low from mempool: {tx_hash:#x}");
txs.pop();
blockchain.remove_transaction_from_pool(&tx_hash)?;
continue;
}

// Copy remaining gas and block value before executing the transaction
Expand Down Expand Up @@ -232,13 +232,11 @@ pub async fn fill_transactions(
continue;
}
let id = head_tx.nonce();
if let Some(last_nonce) = last_privileged_nonce {
if id != *last_nonce + 1 {
debug!("Ignoring out-of-order privileged transaction");
txs.pop();
undo_last_tx(context, previous_remaining_gas, previous_block_value)?;
continue;
}
if last_privileged_nonce.is_some_and(|last_nonce| id != last_nonce + 1) {
debug!("Ignoring out-of-order privileged transaction");
txs.pop();
undo_last_tx(context, previous_remaining_gas, previous_block_value)?;
continue;
}
last_privileged_nonce.replace(id);
privileged_tx_count += 1;
Expand Down Expand Up @@ -443,7 +441,9 @@ fn calculate_tx_diff_size(
if is_privileged_tx(head_tx) {
tx_state_diff_size += PRIVILEGED_TX_LOG_LEN;
}
let l1_message_count: u64 = get_block_l1_messages(&[receipt.clone()]).len().try_into()?;
let l1_message_count: u64 = get_block_l1_messages(std::slice::from_ref(receipt))
.len()
.try_into()?;
tx_state_diff_size += l1_message_count * L1MESSAGE_LOG_LEN;

Ok((tx_state_diff_size, new_accounts_diff_size))
Expand Down
14 changes: 7 additions & 7 deletions crates/l2/sequencer/l1_committer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -352,13 +352,13 @@ impl L1Committer {
let current_block_gas_used = block_to_commit_header.gas_used;

// Check if adding this block would exceed the batch gas limit
if let Some(batch_gas_limit) = self.batch_gas_limit {
if acc_gas_used + current_block_gas_used > batch_gas_limit {
debug!(
"Batch gas limit reached. Any remaining blocks will be processed in the next batch"
);
break;
}
if self.batch_gas_limit.is_some_and(|batch_gas_limit| {
acc_gas_used + current_block_gas_used > batch_gas_limit
}) {
debug!(
"Batch gas limit reached. Any remaining blocks will be processed in the next batch"
);
break;
}

// Get block transactions and receipts
Expand Down
Loading
Loading