Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 9 additions & 8 deletions ledger-tool/src/args.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,13 +71,13 @@ pub fn accounts_db_args<'a, 'b>() -> Box<[Arg<'a, 'b>]> {
.long("accounts-db-skip-shrink")
.help(
"Enables faster starting of ledger-tool by skipping shrink. This option is for \
use during testing.",
use during testing.",
),
Arg::with_name("accounts_db_verify_refcounts")
.long("accounts-db-verify-refcounts")
.help(
"Debug option to scan all AppendVecs and verify account index refcounts prior to \
clean",
clean",
)
.hidden(hidden_unless_forced()),
Arg::with_name("accounts_db_scan_filter_for_shrinking")
Expand All @@ -86,12 +86,13 @@ pub fn accounts_db_args<'a, 'b>() -> Box<[Arg<'a, 'b>]> {
.possible_values(&["all", "only-abnormal", "only-abnormal-with-verify"])
.help(
"Debug option to use different type of filtering for accounts index scan in \
shrinking. \"all\" will scan both in-memory and on-disk accounts index, which is the default. \
\"only-abnormal\" will scan in-memory accounts index only for abnormal entries and \
skip scanning on-disk accounts index by assuming that on-disk accounts index contains \
only normal accounts index entry. \"only-abnormal-with-verify\" is similar to \
\"only-abnormal\", which will scan in-memory index for abnormal entries, but will also \
verify that on-disk account entries are indeed normal.",
shrinking. \"all\" will scan both in-memory and on-disk accounts index, which is \
the default. \"only-abnormal\" will scan in-memory accounts index only for \
abnormal entries and skip scanning on-disk accounts index by assuming that \
on-disk accounts index contains only normal accounts index entry. \
\"only-abnormal-with-verify\" is similar to \"only-abnormal\", which will scan \
in-memory index for abnormal entries, but will also verify that on-disk account \
entries are indeed normal.",
)
.hidden(hidden_unless_forced()),
Arg::with_name("accounts_db_skip_initial_hash_calculation")
Expand Down
102 changes: 49 additions & 53 deletions ledger-tool/src/bigtable.rs
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ async fn upload(
Arc::new(AtomicBool::new(false)),
)
.await?;
info!("last slot checked: {}", last_slot_checked);
info!("last slot checked: {last_slot_checked}");
starting_slot = last_slot_checked.saturating_add(1);
}
info!("No more blocks to upload.");
Expand Down Expand Up @@ -220,17 +220,17 @@ fn get_shred_config_from_ledger(
let ending_epoch = epoch_schedule.get_epoch(ending_slot);
if starting_epoch != ending_epoch {
eprintln!(
"The specified --starting-slot and --ending-slot must be in the\
same epoch. --starting-slot {starting_slot} is in epoch {starting_epoch},\
but --ending-slot {ending_slot} is in epoch {ending_epoch}."
"The specified --starting-slot and --ending-slot must be in the same epoch. \
--starting-slot {starting_slot} is in epoch {starting_epoch}, but --ending-slot \
{ending_slot} is in epoch {ending_epoch}."
);
exit(1);
}
if starting_epoch != working_bank_epoch {
eprintln!(
"The range of slots between --starting-slot and --ending-slot are in a \
different epoch than the working bank. The specified range is in epoch \
{starting_epoch}, but the working bank is in {working_bank_epoch}."
"The range of slots between --starting-slot and --ending-slot are in a different \
epoch than the working bank. The specified range is in epoch {starting_epoch}, \
but the working bank is in {working_bank_epoch}."
);
exit(1);
}
Expand Down Expand Up @@ -676,7 +676,7 @@ impl CopyArgs {
async fn copy(args: CopyArgs) -> Result<(), Box<dyn std::error::Error>> {
let from_slot = args.from_slot;
let to_slot = args.to_slot.unwrap_or(from_slot);
debug!("from_slot: {}, to_slot: {}", from_slot, to_slot);
debug!("from_slot: {from_slot}, to_slot: {to_slot}");

if from_slot > to_slot {
return Err("starting slot should be less than or equal to ending slot")?;
Expand Down Expand Up @@ -708,7 +708,7 @@ async fn copy(args: CopyArgs) -> Result<(), Box<dyn std::error::Error>> {
}

let workers = min(to_slot - from_slot + 1, num_cpus::get().try_into().unwrap());
debug!("worker num: {}", workers);
debug!("worker num: {workers}");

let success_slots = Arc::new(Mutex::new(vec![]));
let skip_slots = Arc::new(Mutex::new(vec![]));
Expand All @@ -727,7 +727,7 @@ async fn copy(args: CopyArgs) -> Result<(), Box<dyn std::error::Error>> {
let failed_slots_clone = Arc::clone(&failed_slots);
tokio::spawn(async move {
while let Ok(slot) = r.try_recv() {
debug!("worker {}: received slot {}", i, slot);
debug!("worker {i}: received slot {slot}");

if !args.force {
match destination_bigtable_clone
Expand All @@ -743,8 +743,7 @@ async fn copy(args: CopyArgs) -> Result<(), Box<dyn std::error::Error>> {
Err(err) => {
error!(
"confirmed_block_exists() failed from the destination \
Bigtable, slot: {}, err: {}",
slot, err
Bigtable, slot: {slot}, err: {err}"
);
failed_slots_clone.lock().unwrap().push(slot);
continue;
Expand All @@ -756,72 +755,69 @@ async fn copy(args: CopyArgs) -> Result<(), Box<dyn std::error::Error>> {
match source_bigtable_clone.confirmed_block_exists(slot).await {
Ok(exist) => {
if exist {
debug!("will write block: {}", slot);
debug!("will write block: {slot}");
success_slots_clone.lock().unwrap().push(slot);
} else {
debug!("block not found, slot: {}", slot);
debug!("block not found, slot: {slot}");
block_not_found_slots_clone.lock().unwrap().push(slot);
continue;
}
}
Err(err) => {
error!(
"failed to get a confirmed block from the source Bigtable, \
slot: {}, err: {}",
slot, err
slot: {slot}, err: {err}"
);
failed_slots_clone.lock().unwrap().push(slot);
continue;
}
};
} else {
let confirmed_block =
match source_bigtable_clone.get_confirmed_block(slot).await {
Ok(block) => match VersionedConfirmedBlock::try_from(block) {
Ok(block) => block,
Err(err) => {
error!(
"failed to convert confirmed block to versioned \
confirmed block, slot: {}, err: {}",
slot, err
);
failed_slots_clone.lock().unwrap().push(slot);
continue;
}
},
Err(solana_storage_bigtable::Error::BlockNotFound(slot)) => {
debug!("block not found, slot: {}", slot);
block_not_found_slots_clone.lock().unwrap().push(slot);
continue;
}
let confirmed_block = match source_bigtable_clone
.get_confirmed_block(slot)
.await
{
Ok(block) => match VersionedConfirmedBlock::try_from(block) {
Ok(block) => block,
Err(err) => {
error!(
"failed to get confirmed block, slot: {}, err: {}",
slot, err
"failed to convert confirmed block to versioned confirmed \
block, slot: {slot}, err: {err}"
);
failed_slots_clone.lock().unwrap().push(slot);
continue;
}
};
},
Err(solana_storage_bigtable::Error::BlockNotFound(slot)) => {
debug!("block not found, slot: {slot}");
block_not_found_slots_clone.lock().unwrap().push(slot);
continue;
}
Err(err) => {
error!("failed to get confirmed block, slot: {slot}, err: {err}");
failed_slots_clone.lock().unwrap().push(slot);
continue;
}
};

match destination_bigtable_clone
.upload_confirmed_block(slot, confirmed_block)
.await
{
Ok(()) => {
debug!("wrote block: {}", slot);
debug!("wrote block: {slot}");
success_slots_clone.lock().unwrap().push(slot);
}
Err(err) => {
error!("write failed, slot: {}, err: {}", slot, err);
error!("write failed, slot: {slot}, err: {err}");
failed_slots_clone.lock().unwrap().push(slot);
continue;
}
}
}
}

debug!("worker {}: exit", i);
debug!("worker {i}: exit");
})
})
.collect::<FuturesUnordered<_>>();
Expand All @@ -837,10 +833,10 @@ async fn copy(args: CopyArgs) -> Result<(), Box<dyn std::error::Error>> {
let mut failed_slots = failed_slots.lock().unwrap();
failed_slots.sort();

debug!("success slots: {:?}", success_slots);
debug!("skip slots: {:?}", skip_slots);
debug!("blocks not found slots: {:?}", block_not_found_slots);
debug!("failed slots: {:?}", failed_slots);
debug!("success slots: {success_slots:?}");
debug!("skip slots: {skip_slots:?}");
debug!("blocks not found slots: {block_not_found_slots:?}");
debug!("failed slots: {failed_slots:?}");

println!(
"success: {}, skip: {}, block not found: {}, failed: {}",
Expand Down Expand Up @@ -1092,9 +1088,9 @@ impl BigTableSubCommand for App<'_, '_> {
.subcommand(
SubCommand::with_name("shreds")
.about(
"Get confirmed blocks from BigTable, reassemble the transactions \
and entries, shred the block and then insert the shredded blocks into \
the local Blockstore",
"Get confirmed blocks from BigTable, reassemble the transactions and \
entries, shred the block and then insert the shredded blocks into \
the local Blockstore",
)
.arg(load_genesis_arg())
.args(&snapshot_args())
Expand Down Expand Up @@ -1122,9 +1118,9 @@ impl BigTableSubCommand for App<'_, '_> {
.takes_value(false)
.help(
"For slots where PoH entries are unavailable, allow the \
generation of mock PoH entries. The mock PoH entries enable \
the shredded block(s) to be replayable if PoH verification is \
disabled.",
generation of mock PoH entries. The mock PoH entries enable \
the shredded block(s) to be replayable if PoH verification \
is disabled.",
),
)
.arg(
Expand All @@ -1135,7 +1131,7 @@ impl BigTableSubCommand for App<'_, '_> {
.conflicts_with("allow_mock_poh")
.help(
"The version to encode in created shreds. Specifying this \
value will avoid determining the value from a rebuilt Bank.",
value will avoid determining the value from a rebuilt Bank.",
),
),
)
Expand Down Expand Up @@ -1439,7 +1435,7 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) {
if starting_slot > ending_slot {
eprintln!(
"The specified --starting-slot {starting_slot} must be less than or equal to \
the specified --ending-slot {ending_slot}."
the specified --ending-slot {ending_slot}."
);
exit(1);
}
Expand Down
Loading
Loading