Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

174 changes: 0 additions & 174 deletions core/src/banking_stage/consumer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -915,55 +915,6 @@ mod tests {
},
};

fn execute_transactions_with_dummy_poh_service(
bank: Arc<Bank>,
transactions: Vec<Transaction>,
) -> ProcessTransactionsSummary {
let transactions = sanitize_transactions(transactions);
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path())
.expect("Expected to be able to open database ledger");
let (poh_recorder, _entry_receiver, record_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.clone(),
Some((4, 4)),
bank.ticks_per_slot(),
Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&PohConfig::default(),
Arc::new(AtomicBool::default()),
);
let recorder = poh_recorder.new_recorder();
let poh_recorder = Arc::new(RwLock::new(poh_recorder));

poh_recorder
.write()
.unwrap()
.set_bank_for_test(bank.clone());

let poh_simulator = simulate_poh(record_receiver, &poh_recorder);

let (replay_vote_sender, _replay_vote_receiver) = unbounded();
let committer = Committer::new(
None,
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
let process_transactions_summary =
consumer.process_transactions(&bank, &Instant::now(), &transactions);

poh_recorder
.read()
.unwrap()
.is_exited
.store(true, Ordering::Relaxed);
let _ = poh_simulator.join();

process_transactions_summary
}

fn generate_new_address_lookup_table(
authority: Option<Pubkey>,
num_addresses: usize,
Expand Down Expand Up @@ -1694,131 +1645,6 @@ mod tests {
Blockstore::destroy(ledger_path.path()).unwrap();
}

#[test]
fn test_process_transactions_instruction_error() {
solana_logger::setup();
let lamports = 10_000;
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_slow_genesis_config(lamports);
let (bank, _bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config);
// set cost tracker limits to MAX so it will not filter out TXs
bank.write_cost_tracker()
.unwrap()
.set_limits(u64::MAX, u64::MAX, u64::MAX);

// Transfer more than the balance of the mint keypair, should cause a
// InstructionError::InsufficientFunds that is then committed. Needs to be
// MAX_NUM_TRANSACTIONS_PER_BATCH at least so it doesn't conflict on account locks
// with the below transaction
let mut transactions = vec![
system_transaction::transfer(
&mint_keypair,
&Pubkey::new_unique(),
lamports + 1,
genesis_config.hash(),
);
TARGET_NUM_TRANSACTIONS_PER_BATCH
];

// Make one transaction that will succeed.
transactions.push(system_transaction::transfer(
&mint_keypair,
&Pubkey::new_unique(),
1,
genesis_config.hash(),
));

let transactions_len = transactions.len();
let ProcessTransactionsSummary {
reached_max_poh_height,
transaction_counts,
retryable_transaction_indexes,
..
} = execute_transactions_with_dummy_poh_service(bank, transactions);

// All the transactions should have been replayed, but only 1 committed
assert!(!reached_max_poh_height);
assert_eq!(
transaction_counts,
CommittedTransactionsCounts {
attempted_processing_count: transactions_len as u64,
// Both transactions should have been committed, even though one was an error,
// because InstructionErrors are committed
committed_transactions_count: 2,
committed_transactions_with_successful_result_count: 1,
processed_but_failed_commit: 0,
}
);
assert_eq!(
retryable_transaction_indexes,
(1..transactions_len - 1).collect::<Vec<usize>>()
);
}

#[test]
fn test_process_transactions_account_in_use() {
solana_logger::setup();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_slow_genesis_config(10_000);
let (bank, _bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config);
// set cost tracker limits to MAX so it will not filter out TXs
bank.write_cost_tracker()
.unwrap()
.set_limits(u64::MAX, u64::MAX, u64::MAX);

// Make all repetitive transactions that conflict on the `mint_keypair`, so only 1 should be executed
let mut transactions = vec![
system_transaction::transfer(
&mint_keypair,
&Pubkey::new_unique(),
1,
genesis_config.hash()
);
TARGET_NUM_TRANSACTIONS_PER_BATCH
];

// Make one more in separate batch that also conflicts, but because it's in a separate batch, it
// should be executed
transactions.push(system_transaction::transfer(
&mint_keypair,
&Pubkey::new_unique(),
1,
genesis_config.hash(),
));

let transactions_len = transactions.len();
let ProcessTransactionsSummary {
reached_max_poh_height,
transaction_counts,
retryable_transaction_indexes,
..
} = execute_transactions_with_dummy_poh_service(bank, transactions);

// All the transactions should have been replayed, but only 2 committed (first and last)
assert!(!reached_max_poh_height);
assert_eq!(
transaction_counts,
CommittedTransactionsCounts {
attempted_processing_count: transactions_len as u64,
committed_transactions_count: 2,
committed_transactions_with_successful_result_count: 2,
processed_but_failed_commit: 0,
}
);

// Everything except first and last index of the transactions failed and are last retryable
assert_eq!(
retryable_transaction_indexes,
(1..transactions_len - 1).collect::<Vec<usize>>()
);
}

#[test]
fn test_process_transactions_returns_unprocessed_txs() {
solana_logger::setup();
Expand Down
7 changes: 1 addition & 6 deletions ledger/src/blockstore_processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3827,16 +3827,11 @@ pub mod tests {
&[&mint_keypair],
bank.last_blockhash(),
);
// First process attempt will fail but still update status cache
// First process attempt will fail
assert_eq!(
bank.process_transaction(&tx),
Err(TransactionError::ProgramAccountNotFound)
);
// Second attempt will be rejected since tx was already in status cache
assert_eq!(
bank.process_transaction(&tx),
Err(TransactionError::AlreadyProcessed)
);

// Make sure other errors don't update the signature cache
let tx = system_transaction::transfer(&mint_keypair, &pubkey, 1000, Hash::default());
Expand Down
27 changes: 19 additions & 8 deletions runtime/src/bank/check_transactions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,11 @@ use {
nonce_info::NonceInfo,
transaction_error_metrics::TransactionErrorMetrics,
},
std::collections::HashSet,
};

const DEFAULT_NUM_TRANSACTIONS_PER_BATCH: usize = 64;

impl Bank {
/// Checks a batch of sanitized transactions again bank for age and status
pub fn check_transactions_with_forwarding_delay(
Expand Down Expand Up @@ -61,7 +64,7 @@ impl Bank {
self.check_status_cache(sanitized_txs, lock_results, error_counters)
}

fn check_age(
pub fn check_age(
&self,
sanitized_txs: &[impl core::borrow::Borrow<SanitizedTransaction>],
lock_results: &[TransactionResult<()>],
Expand All @@ -71,18 +74,26 @@ impl Bank {
let hash_queue = self.blockhash_queue.read().unwrap();
let last_blockhash = hash_queue.last_hash();
let next_durable_nonce = DurableNonce::from_blockhash(&last_blockhash);
let mut entry_level_dedup_map = HashSet::with_capacity(DEFAULT_NUM_TRANSACTIONS_PER_BATCH);

sanitized_txs
.iter()
.zip(lock_results)
.map(|(tx, lock_res)| match lock_res {
Ok(()) => self.check_transaction_age(
tx.borrow(),
max_age,
&next_durable_nonce,
&hash_queue,
error_counters,
),
Ok(()) => {
let msg_hash = tx.borrow().message_hash();
if !entry_level_dedup_map.insert(msg_hash) {
return Err(TransactionError::AlreadyProcessed);
}

self.check_transaction_age(
tx.borrow(),
max_age,
&next_durable_nonce,
&hash_queue,
error_counters,
)
}
Err(e) => Err(e.clone()),
})
.collect()
Expand Down
Loading