Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion core/src/consensus.rs
Original file line number Diff line number Diff line change
Expand Up @@ -631,7 +631,6 @@ impl Tower {
}
}

#[cfg(test)]
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

think you can use the new guy here:

#[cfg(feature = "dev-context-only-utils")]

Copy link
Copy Markdown
Author

@carllin carllin Mar 23, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

didn't even realize this existed, works much wao

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

huh strange. I'm fine if you push it without the flag for now

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

cool, removed

pub fn record_vote(&mut self, slot: Slot, hash: Hash) -> Option<Slot> {
self.record_bank_vote_and_update_lockouts(slot, hash)
}
Expand Down
54 changes: 52 additions & 2 deletions local-cluster/src/cluster_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,10 @@ use {
connection_cache::{ConnectionCache, Protocol},
thin_client::ThinClient,
},
solana_core::consensus::VOTE_THRESHOLD_DEPTH,
solana_core::consensus::{
tower_storage::{FileTowerStorage, SavedTower, SavedTowerVersions, TowerStorage},
VOTE_THRESHOLD_DEPTH,
},
solana_entry::entry::{Entry, EntrySlice},
solana_gossip::{
cluster_info::{self, ClusterInfo},
Expand Down Expand Up @@ -43,7 +46,7 @@ use {
borrow::Borrow,
collections::{HashMap, HashSet, VecDeque},
net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener},
path::Path,
path::{Path, PathBuf},
sync::{
atomic::{AtomicBool, Ordering},
Arc, RwLock,
Expand Down Expand Up @@ -334,6 +337,53 @@ pub fn kill_entry_and_spend_and_verify_rest(
}
}

pub fn apply_votes_to_tower(node_keypair: &Keypair, votes: Vec<(Slot, Hash)>, tower_path: PathBuf) {
let tower_storage = FileTowerStorage::new(tower_path);
let mut tower = tower_storage.load(&node_keypair.pubkey()).unwrap();
for (slot, hash) in votes {
tower.record_vote(slot, hash);
}
let saved_tower = SavedTowerVersions::from(SavedTower::new(&tower, node_keypair).unwrap());
tower_storage.store(&saved_tower).unwrap();
}

pub fn check_min_slot_is_rooted(
min_slot: Slot,
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm confused: why is this min_slot instead of just slot? (How is min_slot different from a normal slot?)

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it's not guaranteed that at the time you check that the root is exactly some slot X, the validator could have made another root. Thus we just check for a minimum

contact_infos: &[ContactInfo],
connection_cache: &Arc<ConnectionCache>,
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: Probably better to describe in function name or comments we are testing the designated slot is root on all given contact_infos?

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it's not that we are checking that min_slot is rooted, we are checking that some slot greater than the min_slot is rooted

test_name: &str,
) {
let mut last_print = Instant::now();
let loop_start = Instant::now();
let loop_timeout = Duration::from_secs(180);
for ingress_node in contact_infos.iter() {
let (rpc, tpu) = LegacyContactInfo::try_from(ingress_node)
.map(|node| get_client_facing_addr(connection_cache.protocol(), node))
.unwrap();
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are we sure this unwrap() will always succeed?

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yup if the contact info is well formed seems like it should always work

let client = ThinClient::new(rpc, tpu, connection_cache.clone());
loop {
let root_slot = client
.get_slot_with_commitment(CommitmentConfig::finalized())
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

don't know the context of where you plan to use this yet, but is this more preferable than checking root from VoteState?

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That was the other option, but this was intended as a faster replacement for check_for_new_roots() which goes over the client.

Directly introspecting the tower has always been more of a hack, since LocalCluster was always supposed to support a generic interface for interacting with any cluster, even one that's remote. So when possible, trying to avoid it

.unwrap_or(0);
if root_slot >= min_slot || last_print.elapsed().as_secs() > 3 {
info!(
"{} waiting for node {} to see root >= {}.. observed latest root: {}",
test_name,
ingress_node.pubkey(),
min_slot,
root_slot
);
last_print = Instant::now();
if root_slot >= min_slot {
break;
}
}
sleep(Duration::from_millis(clock::DEFAULT_MS_PER_SLOT / 2));
assert!(loop_start.elapsed() < loop_timeout);
}
}
}

pub fn check_for_new_roots(
num_new_roots: usize,
contact_infos: &[ContactInfo],
Expand Down
39 changes: 34 additions & 5 deletions local-cluster/src/local_cluster.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ use {
solana_sdk::{
account::{Account, AccountSharedData},
client::SyncClient,
clock::{DEFAULT_DEV_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT},
clock::{Slot, DEFAULT_DEV_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT},
commitment_config::CommitmentConfig,
epoch_schedule::EpochSchedule,
feature_set,
Expand Down Expand Up @@ -557,12 +557,11 @@ impl LocalCluster {
Self::transfer_with_client(&client, source_keypair, dest_pubkey, lamports)
}

pub fn check_for_new_roots(
fn discover_nodes(
&self,
num_new_roots: usize,
test_name: &str,
socket_addr_space: SocketAddrSpace,
) {
test_name: &str,
) -> Vec<ContactInfo> {
let alive_node_contact_infos: Vec<_> = self
.validators
.values()
Expand All @@ -577,6 +576,36 @@ impl LocalCluster {
)
.unwrap();
info!("{} discovered {} nodes", test_name, cluster_nodes.len());
alive_node_contact_infos
}

pub fn check_min_slot_is_rooted(
&self,
min_root: Slot,
test_name: &str,
socket_addr_space: SocketAddrSpace,
) {
let alive_node_contact_infos = self.discover_nodes(socket_addr_space, test_name);
info!(
"{} looking minimum root {} on all nodes",
min_root, test_name
);
cluster_tests::check_min_slot_is_rooted(
min_root,
&alive_node_contact_infos,
&self.connection_cache,
test_name,
);
info!("{} done waiting for roots", test_name);
}

pub fn check_for_new_roots(
&self,
num_new_roots: usize,
test_name: &str,
socket_addr_space: SocketAddrSpace,
) {
let alive_node_contact_infos = self.discover_nodes(socket_addr_space, test_name);
info!("{} looking for new roots on all nodes", test_name);
cluster_tests::check_for_new_roots(
num_new_roots,
Expand Down