diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 0b17c0972f1c43..28c943f47ed04d 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1698,7 +1698,7 @@ impl SplitAncientStorages { i += 1; if treat_as_ancient(storage) { // even though the slot is in range of being an ancient append vec, if it isn't actually a large append vec, - // then we are better off treating all these slots as normally cachable to reduce work in dedup. + // then we are better off treating all these slots as normally cacheable to reduce work in dedup. // Since this one is large, for the moment, this one becomes the highest slot where we want to individually cache files. len_truncate = i; } @@ -5798,7 +5798,7 @@ impl AccountsDb { /// This should only be called after the `Bank::drop()` runs in bank.rs, See BANK_DROP_SAFETY /// comment below for more explanation. - /// * `is_serialized_with_abs` - indicates whehter this call runs sequentially with all other + /// * `is_serialized_with_abs` - indicates whether this call runs sequentially with all other /// accounts_db relevant calls, such as shrinking, purging etc., in account background /// service. pub fn purge_slot(&self, slot: Slot, bank_id: BankId, is_serialized_with_abs: bool) { @@ -6182,7 +6182,7 @@ impl AccountsDb { // allocate a buffer on the stack that's big enough // to hold a token account or a stake account const META_SIZE: usize = 8 /* lamports */ + 8 /* rent_epoch */ + 1 /* executable */ + 32 /* owner */ + 32 /* pubkey */; - const DATA_SIZE: usize = 200; // stake acounts are 200 B and token accounts are 165-182ish B + const DATA_SIZE: usize = 200; // stake accounts are 200 B and token accounts are 165-182ish B const BUFFER_SIZE: usize = META_SIZE + DATA_SIZE; let mut buffer = SmallVec::<[u8; BUFFER_SIZE]>::new(); @@ -8950,7 +8950,7 @@ impl AccountsDb { // these write directly to disk, so the more threads, the better num_cpus::get() } else { - // seems to be a good hueristic given varying # cpus for in-mem disk index + // seems to be a good heuristic given varying # cpus for in-mem disk index 8 }; let chunk_size = (outer_slots_len / (std::cmp::max(1, threads.saturating_sub(1)))) + 1; // approximately 400k slots in a snapshot diff --git a/accounts-db/src/accounts_db/geyser_plugin_utils.rs b/accounts-db/src/accounts_db/geyser_plugin_utils.rs index 43ebba3c8d175f..6f94f4300de675 100644 --- a/accounts-db/src/accounts_db/geyser_plugin_utils.rs +++ b/accounts-db/src/accounts_db/geyser_plugin_utils.rs @@ -121,7 +121,7 @@ impl AccountsDb { // later entries in the same slot are more recent and override earlier accounts for the same pubkey // We can pass an incrementing number here for write_version in the future, if the storage does not have a write_version. - // As long as all accounts for this slot are in 1 append vec that can be itereated olest to newest. + // As long as all accounts for this slot are in 1 append vec that can be iterated oldest to newest. self.notify_filtered_accounts( slot, notified_accounts, diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index aa700557350fc1..c03e5eb536023e 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -250,7 +250,7 @@ impl AncientSlotInfos { /// remove entries from 'all_infos' such that combining /// the remaining entries into storages of 'ideal_storage_size' /// will get us below 'max_storages' - /// The entires that are removed will be reconsidered the next time around. + /// The entries that are removed will be reconsidered the next time around. /// Combining too many storages costs i/o and cpu so the goal is to find the sweet spot so /// that we make progress in cleaning/shrinking/combining but that we don't cause unnecessary /// churn. diff --git a/accounts-db/src/bucket_map_holder.rs b/accounts-db/src/bucket_map_holder.rs index bc7e19112e516f..fb90b68d124702 100644 --- a/accounts-db/src/bucket_map_holder.rs +++ b/accounts-db/src/bucket_map_holder.rs @@ -533,7 +533,7 @@ pub mod tests { (0..threads).into_par_iter().for_each(|_| { // This test used to be more strict with time, but in a parallel, multi test environment, // sometimes threads starve and this test intermittently fails. So, give it more time than it should require. - // This may be aggrevated by the strategy of only allowing thread 0 to advance the age. + // This may be aggravated by the strategy of only allowing thread 0 to advance the age. while now.elapsed().as_millis() < (time as u128) * 100 { if test.maybe_advance_age() { test.bucket_flushed_at_current_age(true); diff --git a/accounts-db/src/buffered_reader.rs b/accounts-db/src/buffered_reader.rs index a5628866abb0c1..5298b386793d2a 100644 --- a/accounts-db/src/buffered_reader.rs +++ b/accounts-db/src/buffered_reader.rs @@ -5,7 +5,7 @@ //! calling read(), advance_offset() and set_required_data_len(account_data_len) once the next account //! data length is known. //! -//! Unlike BufRead/BufReader, this type guarnatees that on the next read() after calling +//! Unlike BufRead/BufReader, this type guarantees that on the next read() after calling //! set_required_data_len(len), the whole account data is buffered _linearly_ in memory and available to //! be returned. use {