From cbee301b5c6cd27327101582fcd4e90c5667b648 Mon Sep 17 00:00:00 2001 From: Trent Nelson Date: Mon, 12 Oct 2020 14:12:28 -0600 Subject: [PATCH 1/8] Bank: Manually implement Default --- runtime/src/bank.rs | 53 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 52 insertions(+), 1 deletion(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 5f0ad18007cb6a..965cc439894b68 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -558,7 +558,7 @@ pub struct RewardInfo { /// Manager for the state of all accounts and programs after processing its entries. /// AbiExample is needed even without Serialize/Deserialize; actual (de-)serialization /// are implemented elsewhere for versioning -#[derive(AbiExample, Default)] +#[derive(AbiExample)] pub struct Bank { /// References to accounts, parent and signature status pub rc: BankRc, @@ -695,6 +695,57 @@ impl Default for BlockhashQueue { } } +impl Default for Bank { + fn default() -> Self { + Self { + rc: BankRc::default(), + src: StatusCacheRc::default(), + blockhash_queue: RwLock::default(), + ancestors: Ancestors::default(), + hash: RwLock::default(), + parent_hash: Hash::default(), + parent_slot: Slot::default(), + hard_forks: Arc::default(), + transaction_count: AtomicU64::default(), + tick_height: AtomicU64::default(), + signature_count: AtomicU64::default(), + capitalization: AtomicU64::default(), + max_tick_height: u64::default(), + hashes_per_tick: Option::default(), + ticks_per_slot: u64::default(), + ns_per_slot: u128::default(), + genesis_creation_time: UnixTimestamp::default(), + slots_per_year: f64::default(), + unused: u64::default(), + slot: Slot::default(), + epoch: Epoch::default(), + block_height: u64::default(), + collector_id: Pubkey::default(), + collector_fees: AtomicU64::default(), + fee_calculator: FeeCalculator::default(), + fee_rate_governor: FeeRateGovernor::default(), + collected_rent: AtomicU64::default(), + rent_collector: RentCollector::default(), + epoch_schedule: EpochSchedule::default(), + inflation: Arc::default(), + stakes: RwLock::default(), + epoch_stakes: HashMap::default(), + is_delta: AtomicBool::default(), + message_processor: MessageProcessor::default(), + feature_builtins: builtins::get().feature_builtins.into(), + last_vote_sync: AtomicU64::default(), + rewards: RwLock::default(), + skip_drop: AtomicBool::default(), + cluster_type: Option::default(), + lazy_rent_collection: AtomicBool::default(), + rewards_pool_pubkeys: Arc::default(), + cached_executors: RwLock::default(), + transaction_debug_keys: Option::default(), + feature_set: Arc::default(), + } + } +} + impl Bank { pub fn new(genesis_config: &GenesisConfig) -> Self { Self::new_with_paths(&genesis_config, Vec::new(), &[], None, None) From df4d6344ce72aa11f5acc6da7ac41b599d31b0f3 Mon Sep 17 00:00:00 2001 From: Trent Nelson Date: Thu, 8 Oct 2020 10:37:03 -0600 Subject: [PATCH 2/8] clippy: stable_sort_primitive (stable) --- ci/test-checks.sh | 3 +-- core/src/banking_stage.rs | 2 +- core/src/cluster_info.rs | 2 +- core/src/cluster_slots_service.rs | 4 ++-- core/src/rpc_subscriptions.rs | 2 +- core/tests/fork-selection.rs | 2 +- ledger/src/bigtable_upload.rs | 2 +- ledger/src/blockstore.rs | 6 +++--- ledger/src/blockstore_processor.rs | 2 +- ledger/src/leader_schedule_cache.rs | 2 +- ledger/tests/blockstore.rs | 2 +- runtime/src/accounts_db.rs | 12 ++++++------ runtime/src/bank.rs | 8 ++++---- runtime/src/bloom.rs | 4 ++-- sdk/src/hard_forks.rs | 2 +- 15 files changed, 27 insertions(+), 28 deletions(-) diff --git a/ci/test-checks.sh b/ci/test-checks.sh index 04b67396a2a33f..bb98a82d79fc8b 100755 --- a/ci/test-checks.sh +++ b/ci/test-checks.sh @@ -54,8 +54,7 @@ _ cargo +"$rust_stable" fmt --all -- --check # -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612 # run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there _ cargo +"$rust_nightly" clippy \ - -Zunstable-options --workspace --all-targets \ - -- --deny=warnings --allow=clippy::stable_sort_primitive + -Zunstable-options --workspace --all-targets -- --deny=warnings cargo_audit_ignores=( # failure is officially deprecated/unmaintained diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 8fdf5f9f8317d8..e16b8a71a3d6d1 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -1961,7 +1961,7 @@ mod tests { assert_eq!(processed_transactions_count, 0,); - retryable_txs.sort(); + retryable_txs.sort_unstable(); let expected: Vec = (0..transactions.len()).collect(); assert_eq!(retryable_txs, expected); } diff --git a/core/src/cluster_info.rs b/core/src/cluster_info.rs index 38d442e08bc972..1346c9dd9da09a 100644 --- a/core/src/cluster_info.rs +++ b/core/src/cluster_info.rs @@ -657,7 +657,7 @@ impl ClusterInfo { )) }) .collect(); - current_slots.sort(); + current_slots.sort_unstable(); let min_slot: Slot = current_slots .iter() .map(|((_, s), _)| *s) diff --git a/core/src/cluster_slots_service.rs b/core/src/cluster_slots_service.rs index 27daa9d3bf36b2..29c584ef494a81 100644 --- a/core/src/cluster_slots_service.rs +++ b/core/src/cluster_slots_service.rs @@ -125,7 +125,7 @@ impl ClusterSlotsService { while let Ok(mut more) = completed_slots_receiver.try_recv() { slots.append(&mut more); } - slots.sort(); + slots.sort_unstable(); if !slots.is_empty() { cluster_info.push_epoch_slots(&slots); } @@ -163,7 +163,7 @@ impl ClusterSlotsService { while let Ok(mut more) = completed_slots_receiver.try_recv() { slots.append(&mut more); } - slots.sort(); + slots.sort_unstable(); slots.dedup(); if !slots.is_empty() { cluster_info.push_epoch_slots(&slots); diff --git a/core/src/rpc_subscriptions.rs b/core/src/rpc_subscriptions.rs index d8896d51facbaf..8eb04d5375d152 100644 --- a/core/src/rpc_subscriptions.rs +++ b/core/src/rpc_subscriptions.rs @@ -737,7 +737,7 @@ impl RpcSubscriptions { } pub fn notify_roots(&self, mut rooted_slots: Vec) { - rooted_slots.sort(); + rooted_slots.sort_unstable(); rooted_slots.into_iter().for_each(|root| { self.enqueue_notification(NotificationEntry::Root(root)); }); diff --git a/core/tests/fork-selection.rs b/core/tests/fork-selection.rs index 01acb7dd972ee4..8cf3d8b20dfe1f 100644 --- a/core/tests/fork-selection.rs +++ b/core/tests/fork-selection.rs @@ -190,7 +190,7 @@ impl Tower { .map(|(i, v)| (*scores.get(&v).unwrap_or(&0), v.time, i)) .collect(); // highest score, latest vote first - best.sort(); + best.sort_unstable(); if self.parasite { best.reverse(); } diff --git a/ledger/src/bigtable_upload.rs b/ledger/src/bigtable_upload.rs index dd4397553141ed..2ebc29f59f9bdf 100644 --- a/ledger/src/bigtable_upload.rs +++ b/ledger/src/bigtable_upload.rs @@ -107,7 +107,7 @@ pub async fn upload_confirmed_blocks( .difference(&bigtable_slots) .cloned() .collect::>(); - blocks_to_upload.sort(); + blocks_to_upload.sort_unstable(); blocks_to_upload }; diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index c73ac9eb0e8fce..52697a643a1b0a 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -1598,7 +1598,7 @@ impl Blockstore { .map(|(iter_slot, _)| iter_slot) .take(timestamp_sample_range) .collect(); - timestamp_slots.sort(); + timestamp_slots.sort_unstable(); get_slots.stop(); datapoint_info!( "blockstore-get-timestamp-slots", @@ -4299,9 +4299,9 @@ pub mod tests { all_shreds.shuffle(&mut thread_rng()); ledger.insert_shreds(all_shreds, None, false).unwrap(); let mut result = recvr.try_recv().unwrap(); - result.sort(); + result.sort_unstable(); slots.push(disconnected_slot); - slots.sort(); + slots.sort_unstable(); assert_eq!(result, slots); } diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 9cc52134ada6f0..ddd15f086f1d58 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -2835,7 +2835,7 @@ pub mod tests { fn frozen_bank_slots(bank_forks: &BankForks) -> Vec { let mut slots: Vec<_> = bank_forks.frozen_banks().keys().cloned().collect(); - slots.sort(); + slots.sort_unstable(); slots } diff --git a/ledger/src/leader_schedule_cache.rs b/ledger/src/leader_schedule_cache.rs index 104f890379e1c9..b452ae1e29b40b 100644 --- a/ledger/src/leader_schedule_cache.rs +++ b/ledger/src/leader_schedule_cache.rs @@ -322,7 +322,7 @@ mod tests { LeaderScheduleCache::retain_latest(&mut cached_schedules, &mut order, MAX_SCHEDULES); assert_eq!(cached_schedules.len(), MAX_SCHEDULES); let mut keys: Vec<_> = cached_schedules.keys().cloned().collect(); - keys.sort(); + keys.sort_unstable(); let expected: Vec<_> = (1..=MAX_SCHEDULES as u64).collect(); let expected_order: VecDeque<_> = (1..=MAX_SCHEDULES as u64).collect(); assert_eq!(expected, keys); diff --git a/ledger/tests/blockstore.rs b/ledger/tests/blockstore.rs index 50393832af0527..4840b57a16b81d 100644 --- a/ledger/tests/blockstore.rs +++ b/ledger/tests/blockstore.rs @@ -37,7 +37,7 @@ fn test_multiple_threads_insert_shred() { // Check slot 0 has the correct children let mut meta0 = blockstore.meta(0).unwrap().unwrap(); - meta0.next_slots.sort(); + meta0.next_slots.sort_unstable(); let expected_next_slots: Vec<_> = (1..num_threads + 1).collect(); assert_eq!(meta0.next_slots, expected_next_slots); diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index e701b2c75b56ec..d3879ffb4ea818 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -2370,7 +2370,7 @@ impl AccountsDB { let mut accounts_index = self.accounts_index.write().unwrap(); let storage = self.storage.read().unwrap(); let mut slots: Vec = storage.0.keys().cloned().collect(); - slots.sort(); + slots.sort_unstable(); let mut last_log_update = Instant::now(); for (index, slot) in slots.iter().enumerate() { @@ -2467,7 +2467,7 @@ impl AccountsDB { .iter() .cloned() .collect(); - roots.sort(); + roots.sort_unstable(); info!("{}: accounts_index roots: {:?}", label, roots,); for (pubkey, list) in &self.accounts_index.read().unwrap().account_maps { info!(" key: {}", pubkey); @@ -2478,13 +2478,13 @@ impl AccountsDB { fn print_count_and_status(&self, label: &'static str) { let storage = self.storage.read().unwrap(); let mut slots: Vec<_> = storage.0.keys().cloned().collect(); - slots.sort(); + slots.sort_unstable(); info!("{}: count_and status for {} slots:", label, slots.len()); for slot in &slots { let slot_stores = storage.0.get(slot).unwrap(); let mut ids: Vec<_> = slot_stores.keys().cloned().collect(); - ids.sort(); + ids.sort_unstable(); for id in &ids { let entry = slot_stores.get(id).unwrap(); info!( @@ -4692,7 +4692,7 @@ pub mod tests { accounts.reset_uncleaned_roots(); let mut actual_slots = accounts.shrink_candidate_slots.lock().unwrap().clone(); - actual_slots.sort(); + actual_slots.sort_unstable(); assert_eq!(actual_slots, vec![0, 1, 2]); accounts.accounts_index.write().unwrap().roots.clear(); @@ -4878,7 +4878,7 @@ pub mod tests { store_counts.insert(3, (1, HashSet::from_iter(vec![key2]))); AccountsDB::calc_delete_dependencies(&purges, &mut store_counts); let mut stores: Vec<_> = store_counts.keys().cloned().collect(); - stores.sort(); + stores.sort_unstable(); for store in &stores { info!( "store: {:?} : {:?}", diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 965cc439894b68..9996793312be70 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1084,7 +1084,7 @@ impl Bank { } let mut ancestors: Vec<_> = roots.into_iter().collect(); - ancestors.sort(); + ancestors.sort_unstable(); ancestors } @@ -4236,13 +4236,13 @@ mod tests { impl Bank { fn epoch_stake_keys(&self) -> Vec { let mut keys: Vec = self.epoch_stakes.keys().copied().collect(); - keys.sort(); + keys.sort_unstable(); keys } fn epoch_stake_key_info(&self) -> (Epoch, Epoch, usize) { let mut keys: Vec = self.epoch_stakes.keys().copied().collect(); - keys.sort(); + keys.sort_unstable(); (*keys.first().unwrap(), *keys.last().unwrap(), keys.len()) } } @@ -9027,7 +9027,7 @@ mod tests { let mut consumed_budgets = (0..3) .map(|_| bank.process_stale_slot_with_budget(0, force_to_return_alive_account)) .collect::>(); - consumed_budgets.sort(); + consumed_budgets.sort_unstable(); // consumed_budgets represents the count of alive accounts in the three slots 0,1,2 assert_eq!(consumed_budgets, vec![0, 1, 9]); } diff --git a/runtime/src/bloom.rs b/runtime/src/bloom.rs index 680555f016fbff..6f3cadb693c714 100644 --- a/runtime/src/bloom.rs +++ b/runtime/src/bloom.rs @@ -218,8 +218,8 @@ mod test { fn test_random() { let mut b1: Bloom = Bloom::random(10, 0.1, 100); let mut b2: Bloom = Bloom::random(10, 0.1, 100); - b1.keys.sort(); - b2.keys.sort(); + b1.keys.sort_unstable(); + b2.keys.sort_unstable(); assert_ne!(b1.keys, b2.keys); } // Bloom filter math in python diff --git a/sdk/src/hard_forks.rs b/sdk/src/hard_forks.rs index 2b65419db196c3..fb8c22d4619e55 100644 --- a/sdk/src/hard_forks.rs +++ b/sdk/src/hard_forks.rs @@ -20,7 +20,7 @@ impl HardForks { } else { self.hard_forks.push((new_slot, 1)); } - self.hard_forks.sort(); + self.hard_forks.sort_unstable(); } // Returns a sorted-by-slot iterator over the registered hark forks From 9fe2fdd47007edfc25d8ebeaed06c88070e70a31 Mon Sep 17 00:00:00 2001 From: Trent Nelson Date: Thu, 8 Oct 2020 12:48:11 -0600 Subject: [PATCH 3/8] clippy: unnecessary_lazy_evaluations (nightly) --- account-decoder/src/parse_account_data.rs | 2 +- core/src/cluster_info.rs | 2 +- core/src/crds_gossip_pull.rs | 7 ++----- core/src/crds_gossip_push.rs | 9 ++------- core/src/validator.rs | 2 +- ledger-tool/src/main.rs | 2 +- ledger/src/blockstore/blockstore_purge.rs | 2 +- programs/vote/src/vote_state/mod.rs | 2 +- ramp-tps/src/results.rs | 3 +-- runtime/src/status_cache.rs | 2 +- storage-bigtable/src/bigtable.rs | 2 +- transaction-status/src/parse_instruction.rs | 2 +- 12 files changed, 14 insertions(+), 23 deletions(-) diff --git a/account-decoder/src/parse_account_data.rs b/account-decoder/src/parse_account_data.rs index a9f4aedf99ef1d..b1a467b4656e00 100644 --- a/account-decoder/src/parse_account_data.rs +++ b/account-decoder/src/parse_account_data.rs @@ -81,7 +81,7 @@ pub fn parse_account_data( ) -> Result { let program_name = PARSABLE_PROGRAM_IDS .get(program_id) - .ok_or_else(|| ParseAccountError::ProgramNotParsable)?; + .ok_or(ParseAccountError::ProgramNotParsable)?; let additional_data = additional_data.unwrap_or_default(); let parsed_json = match program_name { ParsableAccount::Config => serde_json::to_value(parse_config(data, pubkey)?)?, diff --git a/core/src/cluster_info.rs b/core/src/cluster_info.rs index 1346c9dd9da09a..cfe4f53c51b977 100644 --- a/core/src/cluster_info.rs +++ b/core/src/cluster_info.rs @@ -1012,7 +1012,7 @@ impl ClusterInfo { self.get_lowest_slot_for_node(&x.id, None, |lowest_slot, _| { lowest_slot.lowest <= slot }) - .unwrap_or_else(|| /* fallback to legacy behavior */ true) + .unwrap_or(/* fallback to legacy behavior */ true) } }) .collect(); diff --git a/core/src/crds_gossip_pull.rs b/core/src/crds_gossip_pull.rs index f65090b160171f..b2d375ab590407 100644 --- a/core/src/crds_gossip_pull.rs +++ b/core/src/crds_gossip_pull.rs @@ -327,10 +327,7 @@ impl CrdsGossipPull { for r in responses { let owner = r.label().pubkey(); // Check if the crds value is older than the msg_timeout - if now - > r.wallclock() - .checked_add(self.msg_timeout) - .unwrap_or_else(|| 0) + if now > r.wallclock().checked_add(self.msg_timeout).unwrap_or(0) || now + self.msg_timeout < r.wallclock() { match &r.label() { @@ -340,7 +337,7 @@ impl CrdsGossipPull { let timeout = *timeouts .get(&owner) .unwrap_or_else(|| timeouts.get(&Pubkey::default()).unwrap()); - if now > r.wallclock().checked_add(timeout).unwrap_or_else(|| 0) + if now > r.wallclock().checked_add(timeout).unwrap_or(0) || now + timeout < r.wallclock() { stats.timeout_count += 1; diff --git a/core/src/crds_gossip_push.rs b/core/src/crds_gossip_push.rs index 07a9dc791af633..b7c3cdc6f1244d 100644 --- a/core/src/crds_gossip_push.rs +++ b/core/src/crds_gossip_push.rs @@ -172,12 +172,7 @@ impl CrdsGossipPush { now: u64, ) -> Result, CrdsGossipError> { self.num_total += 1; - if now - > value - .wallclock() - .checked_add(self.msg_timeout) - .unwrap_or_else(|| 0) - { + if now > value.wallclock().checked_add(self.msg_timeout).unwrap_or(0) { return Err(CrdsGossipError::PushMessageTimeout); } if now + self.msg_timeout < value.wallclock() { @@ -205,7 +200,7 @@ impl CrdsGossipPush { /// push pull responses pub fn push_pull_responses(&mut self, values: Vec<(CrdsValueLabel, Hash, u64)>, now: u64) { for (label, value_hash, wc) in values { - if now > wc.checked_add(self.msg_timeout).unwrap_or_else(|| 0) { + if now > wc.checked_add(self.msg_timeout).unwrap_or(0) { continue; } self.push_messages.insert(label, value_hash); diff --git a/core/src/validator.rs b/core/src/validator.rs index 6b751a2b6c93d2..1bcb337498a502 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -1056,7 +1056,7 @@ fn report_target_features() { // Validator binaries built on a machine with AVX support will generate invalid opcodes // when run on machines without AVX causing a non-obvious process abort. Instead detect // the mismatch and error cleanly. - #[target_feature(enable = "avx")] + #[cfg(target_feature = "avx")] { if is_x86_feature_detected!("avx") { info!("AVX detected"); diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 04a1c96a2516c5..1b0239933db039 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -2155,7 +2155,7 @@ fn main() { println!("Ledger is empty"); } else { let first = slots.first().unwrap(); - let last = slots.last().unwrap_or_else(|| first); + let last = slots.last().unwrap_or(first); if first != last { println!("Ledger has data for slots {:?} to {:?}", first, last); if all { diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index 5030858c8c110f..8b67ab95e7c4c5 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -91,7 +91,7 @@ impl Blockstore { .batch() .expect("Database Error: Failed to get write batch"); // delete range cf is not inclusive - let to_slot = to_slot.checked_add(1).unwrap_or_else(|| std::u64::MAX); + let to_slot = to_slot.checked_add(1).unwrap_or(std::u64::MAX); let mut delete_range_timer = Measure::start("delete_range"); let mut columns_purged = self diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index bf2cc92c9bee38..b1e88e9df98a2a 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -713,7 +713,7 @@ pub fn process_vote( vote.slots .iter() .max() - .ok_or_else(|| VoteError::EmptySlots) + .ok_or(VoteError::EmptySlots) .and_then(|slot| vote_state.process_timestamp(*slot, timestamp))?; } vote_account.set_state(&VoteStateVersions::Current(Box::new(vote_state))) diff --git a/ramp-tps/src/results.rs b/ramp-tps/src/results.rs index d201b4417d9249..42a1b69674a333 100644 --- a/ramp-tps/src/results.rs +++ b/ramp-tps/src/results.rs @@ -37,8 +37,7 @@ impl Results { ) -> Self { let mut results: BTreeMap> = BTreeMap::new(); previous_results.drain().for_each(|(key, value)| { - if key.starts_with(ROUND_KEY_PREFIX) { - let round_str = &key[ROUND_KEY_PREFIX.len()..]; + if let Some(round_str) = key.strip_prefix(ROUND_KEY_PREFIX) { dbg!(round_str); if let Ok(round) = u32::from_str(round_str) { if round < start_round { diff --git a/runtime/src/status_cache.rs b/runtime/src/status_cache.rs index 80da95b86debb9..ee59283a7f14d2 100644 --- a/runtime/src/status_cache.rs +++ b/runtime/src/status_cache.rs @@ -226,7 +226,7 @@ impl StatusCache { ( *slot, self.roots.contains(slot), - self.slot_deltas.get(slot).unwrap_or_else(|| &empty).clone(), + self.slot_deltas.get(slot).unwrap_or(&empty).clone(), ) }) .collect() diff --git a/storage-bigtable/src/bigtable.rs b/storage-bigtable/src/bigtable.rs index dddbafdf05d526..9077a4beb823ea 100644 --- a/storage-bigtable/src/bigtable.rs +++ b/storage-bigtable/src/bigtable.rs @@ -445,7 +445,7 @@ impl BigTable { rows.into_iter() .next() .map(|r| r.1) - .ok_or_else(|| Error::RowNotFound) + .ok_or(Error::RowNotFound) } /// Store data for one or more `table` rows in the `family_name` Column family diff --git a/transaction-status/src/parse_instruction.rs b/transaction-status/src/parse_instruction.rs index ebaac0922a3524..c2447da1fe3d27 100644 --- a/transaction-status/src/parse_instruction.rs +++ b/transaction-status/src/parse_instruction.rs @@ -66,7 +66,7 @@ pub fn parse( ) -> Result { let program_name = PARSABLE_PROGRAM_IDS .get(program_id) - .ok_or_else(|| ParseInstructionError::ProgramNotParsable)?; + .ok_or(ParseInstructionError::ProgramNotParsable)?; let parsed_json = match program_name { ParsableProgram::SplMemo => parse_memo(instruction), ParsableProgram::SplToken => serde_json::to_value(parse_token(instruction, account_keys)?)?, From dc1f8a81516162d3340703c21e9a425f0d1c04e8 Mon Sep 17 00:00:00 2001 From: Trent Nelson Date: Fri, 9 Oct 2020 19:24:57 -0600 Subject: [PATCH 4/8] clippy: useless_vec (nightly) --- core/benches/cluster_info.rs | 2 +- core/benches/retransmit_stage.rs | 4 +-- core/src/broadcast_stage.rs | 32 +++++++++---------- .../broadcast_fake_shreds_run.rs | 10 +++--- .../fail_entry_verification_broadcast_run.rs | 10 +++--- .../broadcast_stage/standard_broadcast_run.rs | 14 ++++---- core/src/retransmit_stage.rs | 6 ++-- core/src/tvu.rs | 2 +- runtime/src/bank.rs | 6 ++-- 9 files changed, 42 insertions(+), 44 deletions(-) diff --git a/core/benches/cluster_info.rs b/core/benches/cluster_info.rs index 697db76e81b683..339cc46314ffca 100644 --- a/core/benches/cluster_info.rs +++ b/core/benches/cluster_info.rs @@ -38,7 +38,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) { let stakes = Arc::new(stakes); let cluster_info = Arc::new(cluster_info); let (peers, peers_and_stakes) = get_broadcast_peers(&cluster_info, Some(stakes)); - let shreds = Arc::new(shreds); + let shreds: Arc<[Shred]> = shreds.into(); let last_datapoint = Arc::new(AtomicU64::new(0)); bencher.iter(move || { let shreds = shreds.clone(); diff --git a/core/benches/retransmit_stage.rs b/core/benches/retransmit_stage.rs index 65bdc0d7ef638a..3557f7a1f180a9 100644 --- a/core/benches/retransmit_stage.rs +++ b/core/benches/retransmit_stage.rs @@ -56,7 +56,7 @@ fn bench_retransmitter(bencher: &mut Bencher) { let (packet_sender, packet_receiver) = channel(); let packet_receiver = Arc::new(Mutex::new(packet_receiver)); const NUM_THREADS: usize = 2; - let sockets = (0..NUM_THREADS) + let sockets: Vec<_> = (0..NUM_THREADS) .map(|_| UdpSocket::bind("0.0.0.0:0").unwrap()) .collect(); @@ -73,7 +73,7 @@ fn bench_retransmitter(bencher: &mut Bencher) { info!("batches: {}", batches.len()); let retransmitter_handles = retransmitter( - Arc::new(sockets), + sockets.into(), bank_forks, &leader_schedule_cache, cluster_info, diff --git a/core/src/broadcast_stage.rs b/core/src/broadcast_stage.rs index 04970cd7022148..8f681ba988008f 100644 --- a/core/src/broadcast_stage.rs +++ b/core/src/broadcast_stage.rs @@ -43,7 +43,7 @@ mod standard_broadcast_run; pub(crate) const NUM_INSERT_THREADS: usize = 2; pub(crate) type RetransmitSlotsSender = CrossbeamSender>>; pub(crate) type RetransmitSlotsReceiver = CrossbeamReceiver>>; -pub(crate) type RecordReceiver = Receiver<(Arc>, Option)>; +pub(crate) type RecordReceiver = Receiver<(Arc<[Shred]>, Option)>; pub(crate) type TransmitReceiver = Receiver<(TransmitShreds, Option)>; #[derive(Debug, PartialEq, Eq, Clone)] @@ -104,14 +104,14 @@ impl BroadcastStageType { } } -pub type TransmitShreds = (Option>>, Arc>); +pub type TransmitShreds = (Option>>, Arc<[Shred]>); trait BroadcastRun { fn run( &mut self, blockstore: &Arc, receiver: &Receiver, socket_sender: &Sender<(TransmitShreds, Option)>, - blockstore_sender: &Sender<(Arc>, Option)>, + blockstore_sender: &Sender<(Arc<[Shred]>, Option)>, ) -> Result<()>; fn transmit( &mut self, @@ -154,7 +154,7 @@ impl BroadcastStage { blockstore: &Arc, receiver: &Receiver, socket_sender: &Sender<(TransmitShreds, Option)>, - blockstore_sender: &Sender<(Arc>, Option)>, + blockstore_sender: &Sender<(Arc<[Shred]>, Option)>, mut broadcast_stage_run: impl BroadcastRun, ) -> BroadcastStageReturnType { loop { @@ -307,21 +307,19 @@ impl BroadcastStage { let bank_epoch = bank.get_leader_schedule_epoch(bank.slot()); let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch); let stakes = stakes.map(Arc::new); - let data_shreds = Arc::new( - blockstore - .get_data_shreds_for_slot(bank.slot(), 0) - .expect("My own shreds must be reconstructable"), - ); + let data_shreds: Arc<[Shred]> = blockstore + .get_data_shreds_for_slot(bank.slot(), 0) + .expect("My own shreds must be reconstructable") + .into(); if !data_shreds.is_empty() { socket_sender.send(((stakes.clone(), data_shreds), None))?; } - let coding_shreds = Arc::new( - blockstore - .get_coding_shreds_for_slot(bank.slot(), 0) - .expect("My own shreds must be reconstructable"), - ); + let coding_shreds: Arc<[Shred]> = blockstore + .get_coding_shreds_for_slot(bank.slot(), 0) + .expect("My own shreds must be reconstructable") + .into(); if !coding_shreds.is_empty() { socket_sender.send(((stakes.clone(), coding_shreds), None))?; @@ -371,7 +369,7 @@ pub fn get_broadcast_peers( /// # Remarks pub fn broadcast_shreds( s: &UdpSocket, - shreds: &Arc>, + shreds: &Arc<[Shred]>, peers_and_stakes: &[(u64, usize)], peers: &[ContactInfo], last_datapoint_submit: &Arc, @@ -480,11 +478,11 @@ pub mod test { coding_shreds.clone(), data_shreds .into_iter() - .map(|s| (stakes.clone(), Arc::new(vec![s]))) + .map(|s| (stakes.clone(), vec![s].into())) .collect(), coding_shreds .into_iter() - .map(|s| (stakes.clone(), Arc::new(vec![s]))) + .map(|s| (stakes.clone(), vec![s].into())) .collect(), ) } diff --git a/core/src/broadcast_stage/broadcast_fake_shreds_run.rs b/core/src/broadcast_stage/broadcast_fake_shreds_run.rs index 46f001fe51265c..21ed8cf4e0867c 100644 --- a/core/src/broadcast_stage/broadcast_fake_shreds_run.rs +++ b/core/src/broadcast_stage/broadcast_fake_shreds_run.rs @@ -29,7 +29,7 @@ impl BroadcastRun for BroadcastFakeShredsRun { blockstore: &Arc, receiver: &Receiver, socket_sender: &Sender<(TransmitShreds, Option)>, - blockstore_sender: &Sender<(Arc>, Option)>, + blockstore_sender: &Sender<(Arc<[Shred]>, Option)>, ) -> Result<()> { // 1) Pull entries from banking stage let receive_results = broadcast_utils::recv_slot_entries(receiver)?; @@ -82,22 +82,22 @@ impl BroadcastRun for BroadcastFakeShredsRun { self.last_blockhash = Hash::default(); } - let data_shreds = Arc::new(data_shreds); + let data_shreds: Arc<[Shred]> = data_shreds.into(); blockstore_sender.send((data_shreds.clone(), None))?; // 3) Start broadcast step //some indicates fake shreds socket_sender.send(( - (Some(Arc::new(HashMap::new())), Arc::new(fake_data_shreds)), + (Some(Arc::new(HashMap::new())), fake_data_shreds.into()), None, ))?; socket_sender.send(( - (Some(Arc::new(HashMap::new())), Arc::new(fake_coding_shreds)), + (Some(Arc::new(HashMap::new())), fake_coding_shreds.into()), None, ))?; //none indicates real shreds socket_sender.send(((None, data_shreds), None))?; - socket_sender.send(((None, Arc::new(coding_shreds)), None))?; + socket_sender.send(((None, coding_shreds.into()), None))?; Ok(()) } diff --git a/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs b/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs index a53596bd03587b..623c978c9afde9 100644 --- a/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs +++ b/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs @@ -34,7 +34,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { blockstore: &Arc, receiver: &Receiver, socket_sender: &Sender<(TransmitShreds, Option)>, - blockstore_sender: &Sender<(Arc>, Option)>, + blockstore_sender: &Sender<(Arc<[Shred]>, Option)>, ) -> Result<()> { // 1) Pull entries from banking stage let mut receive_results = broadcast_utils::recv_slot_entries(receiver)?; @@ -52,7 +52,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { info!("Resolving bad shreds"); let mut shreds = vec![]; std::mem::swap(&mut shreds, &mut self.good_shreds); - blockstore_sender.send((Arc::new(shreds), None))?; + blockstore_sender.send((shreds.into(), None))?; } // 3) Convert entries to shreds + generate coding shreds. Set a garbage PoH on the last entry @@ -98,7 +98,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { (good_last_data_shred, bad_last_data_shred) }); - let data_shreds = Arc::new(data_shreds); + let data_shreds: Arc<[Shred]> = data_shreds.into(); blockstore_sender.send((data_shreds.clone(), None))?; // 4) Start broadcast step let bank_epoch = bank.get_leader_schedule_epoch(bank.slot()); @@ -108,8 +108,8 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { if let Some((good_last_data_shred, bad_last_data_shred)) = last_shreds { // Stash away the good shred so we can rewrite them later self.good_shreds.extend(good_last_data_shred.clone()); - let good_last_data_shred = Arc::new(good_last_data_shred); - let bad_last_data_shred = Arc::new(bad_last_data_shred); + let good_last_data_shred: Arc<[Shred]> = good_last_data_shred.into(); + let bad_last_data_shred: Arc<[Shred]> = bad_last_data_shred.into(); // Store the good shred so that blockstore will signal ClusterSlots // that the slot is complete blockstore_sender.send((good_last_data_shred, None))?; diff --git a/core/src/broadcast_stage/standard_broadcast_run.rs b/core/src/broadcast_stage/standard_broadcast_run.rs index 1228c385463126..e89a85648c6d92 100644 --- a/core/src/broadcast_stage/standard_broadcast_run.rs +++ b/core/src/broadcast_stage/standard_broadcast_run.rs @@ -154,7 +154,7 @@ impl StandardBroadcastRun { &mut self, blockstore: &Arc, socket_sender: &Sender<(TransmitShreds, Option)>, - blockstore_sender: &Sender<(Arc>, Option)>, + blockstore_sender: &Sender<(Arc<[Shred]>, Option)>, receive_results: ReceiveResults, ) -> Result<()> { let mut receive_elapsed = receive_results.time_elapsed; @@ -219,7 +219,7 @@ impl StandardBroadcastRun { was interrupted", ), }); - let last_shred = Arc::new(vec![last_shred]); + let last_shred: Arc<[Shred]> = vec![last_shred].into(); socket_sender.send(((stakes.clone(), last_shred.clone()), batch_info.clone()))?; blockstore_sender.send((last_shred, batch_info))?; } @@ -242,11 +242,11 @@ impl StandardBroadcastRun { .expect("Start timestamp must exist for a slot if we're broadcasting the slot"), }); - let data_shreds = Arc::new(data_shreds); + let data_shreds: Arc<[Shred]> = data_shreds.into(); socket_sender.send(((stakes.clone(), data_shreds.clone()), batch_info.clone()))?; blockstore_sender.send((data_shreds.clone(), batch_info.clone()))?; let coding_shreds = shredder.data_shreds_to_coding_shreds(&data_shreds[0..last_data_shred]); - let coding_shreds = Arc::new(coding_shreds); + let coding_shreds: Arc<[Shred]> = coding_shreds.into(); socket_sender.send(((stakes, coding_shreds.clone()), batch_info.clone()))?; blockstore_sender.send((coding_shreds, batch_info))?; self.process_shreds_stats.update(&ProcessShredsStats { @@ -264,7 +264,7 @@ impl StandardBroadcastRun { fn insert( &mut self, blockstore: &Arc, - shreds: Arc>, + shreds: Arc<[Shred]>, broadcast_shred_batch_info: Option, ) -> Result<()> { // Insert shreds into blockstore @@ -301,7 +301,7 @@ impl StandardBroadcastRun { sock: &UdpSocket, cluster_info: &ClusterInfo, stakes: Option>>, - shreds: Arc>, + shreds: Arc<[Shred]>, broadcast_shred_batch_info: Option, ) -> Result<()> { const BROADCAST_PEER_UPDATE_INTERVAL_MS: u64 = 1000; @@ -385,7 +385,7 @@ impl BroadcastRun for StandardBroadcastRun { blockstore: &Arc, receiver: &Receiver, socket_sender: &Sender<(TransmitShreds, Option)>, - blockstore_sender: &Sender<(Arc>, Option)>, + blockstore_sender: &Sender<(Arc<[Shred]>, Option)>, ) -> Result<()> { let receive_results = broadcast_utils::recv_slot_entries(receiver)?; self.process_receive_results( diff --git a/core/src/retransmit_stage.rs b/core/src/retransmit_stage.rs index 3ef9575c1ec972..9a01c184fc1ff2 100644 --- a/core/src/retransmit_stage.rs +++ b/core/src/retransmit_stage.rs @@ -344,7 +344,7 @@ fn retransmit( /// * `cluster_info` - This structure needs to be updated and populated by the bank and via gossip. /// * `r` - Receive channel for shreds to be retransmitted to all the layer 1 nodes. pub fn retransmitter( - sockets: Arc>, + sockets: Arc<[UdpSocket]>, bank_forks: Arc>, leader_schedule_cache: &Arc, cluster_info: Arc, @@ -408,7 +408,7 @@ impl RetransmitStage { leader_schedule_cache: &Arc, blockstore: Arc, cluster_info: &Arc, - retransmit_sockets: Arc>, + retransmit_sockets: Arc<[UdpSocket]>, repair_socket: Arc, verified_receiver: Receiver>, exit: &Arc, @@ -537,7 +537,7 @@ mod tests { let cluster_info = ClusterInfo::new_with_invalid_keypair(other); cluster_info.insert_info(me); - let retransmit_socket = Arc::new(vec![UdpSocket::bind("0.0.0.0:0").unwrap()]); + let retransmit_socket: Arc<_> = vec![UdpSocket::bind("0.0.0.0:0").unwrap()].into(); let cluster_info = Arc::new(cluster_info); let (retransmit_sender, retransmit_receiver) = channel(); diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 97e1413c08c8c8..04f40d4637ba32 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -150,7 +150,7 @@ impl Tvu { leader_schedule_cache, blockstore.clone(), &cluster_info, - Arc::new(retransmit_sockets), + retransmit_sockets.into(), repair_socket, verified_receiver, &exit, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 9996793312be70..9084f2efa669f6 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -664,7 +664,7 @@ pub struct Bank { message_processor: MessageProcessor, /// Builtin programs activated dynamically by feature - feature_builtins: Arc>, + feature_builtins: Arc<[(Builtin, Pubkey)]>, /// Last time when the cluster info vote listener has synced with this bank pub last_vote_sync: AtomicU64, @@ -959,7 +959,7 @@ impl Bank { epoch_stakes: fields.epoch_stakes, is_delta: AtomicBool::new(fields.is_delta), message_processor: new(), - feature_builtins: new(), + feature_builtins: builtins::get().feature_builtins.into(), last_vote_sync: new(), rewards: new(), skip_drop: new(), @@ -3383,7 +3383,7 @@ impl Bank { for builtin in builtins.genesis_builtins { self.add_builtin(&builtin.name, builtin.id, builtin.entrypoint); } - self.feature_builtins = Arc::new(builtins.feature_builtins); + self.feature_builtins = builtins.feature_builtins.into(); self.apply_feature_activations(true); } From 9d4232ac7f15c03afeea42176aa905faf8fad1f1 Mon Sep 17 00:00:00 2001 From: Trent Nelson Date: Thu, 8 Oct 2020 14:47:50 -0600 Subject: [PATCH 5/8] Stable fmt --- sys-tuner/src/main.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/sys-tuner/src/main.rs b/sys-tuner/src/main.rs index 45f6f59afa33b0..5e9985ded7ae79 100644 --- a/sys-tuner/src/main.rs +++ b/sys-tuner/src/main.rs @@ -149,10 +149,6 @@ fn main() { for stream in listener.incoming() { if stream.is_ok() { info!("Tuning the system now"); - #[cfg(target_os = "linux")] - { - // tune_poh_service_priority(peer_uid); - } } } From 19597b8697c0b595ff5f8dceaf2bb9b96c9672c9 Mon Sep 17 00:00:00 2001 From: Trent Nelson Date: Thu, 8 Oct 2020 16:12:23 -0600 Subject: [PATCH 6/8] Allow incomplete features --- core/src/lib.rs | 6 +++++- programs/stake/src/lib.rs | 6 +++++- programs/vote/src/lib.rs | 6 +++++- runtime/src/lib.rs | 6 +++++- sdk/src/lib.rs | 6 +++++- version/src/lib.rs | 6 +++++- 6 files changed, 30 insertions(+), 6 deletions(-) diff --git a/core/src/lib.rs b/core/src/lib.rs index acbc51854dd6c2..f31132f93f62df 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -1,4 +1,8 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] +#![cfg_attr( + RUSTC_WITH_SPECIALIZATION, + allow(incomplete_features), + feature(specialization) +)] //! The `solana` library implements the Solana high-performance blockchain architecture. //! It includes a full Rust implementation of the architecture (see //! [Validator](server/struct.Validator.html)) as well as hooks to GPU implementations of its most diff --git a/programs/stake/src/lib.rs b/programs/stake/src/lib.rs index de15175b0a8ced..5598a139cbbf21 100644 --- a/programs/stake/src/lib.rs +++ b/programs/stake/src/lib.rs @@ -1,4 +1,8 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] +#![cfg_attr( + RUSTC_WITH_SPECIALIZATION, + allow(incomplete_features), + feature(specialization) +)] use solana_sdk::genesis_config::GenesisConfig; pub mod config; diff --git a/programs/vote/src/lib.rs b/programs/vote/src/lib.rs index 3ab1cb58287642..1937569f55e9b4 100644 --- a/programs/vote/src/lib.rs +++ b/programs/vote/src/lib.rs @@ -1,4 +1,8 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] +#![cfg_attr( + RUSTC_WITH_SPECIALIZATION, + allow(incomplete_features), + feature(specialization) +)] pub mod authorized_voters; pub mod vote_instruction; diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 5b8eea61a1491c..c8cbdfbf5502ed 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -1,4 +1,8 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] +#![cfg_attr( + RUSTC_WITH_SPECIALIZATION, + allow(incomplete_features), + feature(specialization) +)] pub mod accounts; pub mod accounts_background_service; pub mod accounts_db; diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index c69001323e85bb..cfde7e1120a8af 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -1,4 +1,8 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] +#![cfg_attr( + RUSTC_WITH_SPECIALIZATION, + allow(incomplete_features), + feature(specialization) +)] #![cfg_attr(RUSTC_NEEDS_PROC_MACRO_HYGIENE, feature(proc_macro_hygiene))] // Allows macro expansion of `use ::solana_sdk::*` to work within this crate diff --git a/version/src/lib.rs b/version/src/lib.rs index 37474804bae0ee..2fdc8ec39c33a3 100644 --- a/version/src/lib.rs +++ b/version/src/lib.rs @@ -1,4 +1,8 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] +#![cfg_attr( + RUSTC_WITH_SPECIALIZATION, + allow(incomplete_features), + feature(specialization) +)] extern crate serde_derive; use serde_derive::{Deserialize, Serialize}; From fbd4cec0098415d27edd15fcc9dc7211d95b2087 Mon Sep 17 00:00:00 2001 From: Trent Nelson Date: Thu, 8 Oct 2020 10:36:35 -0600 Subject: [PATCH 7/8] Bump Rust to 1.47.0/nightly-2020-10-08 --- ci/docker-rust-nightly/Dockerfile | 2 +- ci/docker-rust/Dockerfile | 2 +- ci/rust-version.sh | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ci/docker-rust-nightly/Dockerfile b/ci/docker-rust-nightly/Dockerfile index ef0893739e9e42..91b75b437c5d48 100644 --- a/ci/docker-rust-nightly/Dockerfile +++ b/ci/docker-rust-nightly/Dockerfile @@ -1,4 +1,4 @@ -FROM solanalabs/rust:1.46.0 +FROM solanalabs/rust:1.47.0 ARG date RUN set -x \ diff --git a/ci/docker-rust/Dockerfile b/ci/docker-rust/Dockerfile index 708930700c0103..5b667a84dd3022 100644 --- a/ci/docker-rust/Dockerfile +++ b/ci/docker-rust/Dockerfile @@ -1,6 +1,6 @@ # Note: when the rust version is changed also modify # ci/rust-version.sh to pick up the new image tag -FROM rust:1.46.0 +FROM rust:1.47.0 # Add Google Protocol Buffers for Libra's metrics library. ENV PROTOC_VERSION 3.8.0 diff --git a/ci/rust-version.sh b/ci/rust-version.sh index 0d816d4bf62a21..fa6780efc18782 100644 --- a/ci/rust-version.sh +++ b/ci/rust-version.sh @@ -18,13 +18,13 @@ if [[ -n $RUST_STABLE_VERSION ]]; then stable_version="$RUST_STABLE_VERSION" else - stable_version=1.46.0 + stable_version=1.47.0 fi if [[ -n $RUST_NIGHTLY_VERSION ]]; then nightly_version="$RUST_NIGHTLY_VERSION" else - nightly_version=2020-08-17 + nightly_version=2020-10-08 fi From 7b06cc4a7538e593c3b4ee3f653e343aa501f505 Mon Sep 17 00:00:00 2001 From: Trent Nelson Date: Fri, 9 Oct 2020 10:26:19 -0600 Subject: [PATCH 8/8] REVERTME: Local crates --- Cargo.lock | 113 ++++++++++++++++++++++++++++++++++++- account-decoder/Cargo.toml | 2 +- ci/docker-run.sh | 7 ++- 3 files changed, 117 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4718ed985a2992..58b6afcaea0287 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3350,7 +3350,7 @@ dependencies = [ "solana-sdk 1.5.0", "solana-stake-program", "solana-vote-program", - "spl-token", + "spl-token 2.0.7", "thiserror", ] @@ -3732,7 +3732,7 @@ dependencies = [ "solana-version", "solana-vote-program", "solana-vote-signer", - "spl-token", + "spl-token 2.0.6", "systemstat", "tempfile", "thiserror", @@ -3768,6 +3768,29 @@ dependencies = [ "winapi 0.3.8", ] +[[package]] +name = "solana-crate-features" +version = "1.3.17" +dependencies = [ + "backtrace", + "bytes 0.4.12", + "cc", + "curve25519-dalek 2.1.0", + "ed25519-dalek", + "either", + "failure", + "lazy_static", + "libc", + "rand_chacha 0.2.2", + "regex-syntax", + "reqwest", + "serde", + "syn 0.15.44", + "syn 1.0.27", + "tokio 0.1.22", + "winapi 0.3.8", +] + [[package]] name = "solana-crate-features" version = "1.5.0" @@ -4089,6 +4112,15 @@ dependencies = [ "log 0.4.8", ] +[[package]] +name = "solana-logger" +version = "1.3.17" +dependencies = [ + "env_logger", + "lazy_static", + "log 0.4.8", +] + [[package]] name = "solana-logger" version = "1.5.0" @@ -4377,6 +4409,46 @@ dependencies = [ "thiserror", ] +[[package]] +name = "solana-sdk" +version = "1.3.17" +dependencies = [ + "assert_matches", + "bincode", + "bs58", + "bv", + "byteorder", + "chrono", + "curve25519-dalek 2.1.0", + "digest 0.9.0", + "ed25519-dalek", + "generic-array 0.14.3", + "hex", + "hmac", + "itertools 0.9.0", + "libsecp256k1", + "log 0.4.8", + "memmap", + "num-derive", + "num-traits", + "pbkdf2", + "rand 0.7.3", + "rand_chacha 0.2.2", + "rustc_version", + "rustversion", + "serde", + "serde_bytes", + "serde_derive", + "serde_json", + "sha2", + "sha3", + "solana-crate-features 1.3.17", + "solana-logger 1.3.17", + "solana-sdk-macro 1.3.17", + "solana-sdk-macro-frozen-abi 1.3.17", + "thiserror", +] + [[package]] name = "solana-sdk" version = "1.5.0" @@ -4431,6 +4503,17 @@ dependencies = [ "syn 1.0.27", ] +[[package]] +name = "solana-sdk-macro" +version = "1.3.17" +dependencies = [ + "bs58", + "proc-macro2 1.0.19", + "quote 1.0.6", + "rustversion", + "syn 1.0.27", +] + [[package]] name = "solana-sdk-macro" version = "1.5.0" @@ -4455,6 +4538,17 @@ dependencies = [ "syn 1.0.27", ] +[[package]] +name = "solana-sdk-macro-frozen-abi" +version = "1.3.17" +dependencies = [ + "lazy_static", + "proc-macro2 1.0.19", + "quote 1.0.6", + "rustc_version", + "syn 1.0.27", +] + [[package]] name = "solana-sdk-macro-frozen-abi" version = "1.5.0" @@ -4660,7 +4754,7 @@ dependencies = [ "solana-stake-program", "solana-vote-program", "spl-memo", - "spl-token", + "spl-token 2.0.6", "thiserror", ] @@ -4833,6 +4927,19 @@ dependencies = [ "thiserror", ] +[[package]] +name = "spl-token" +version = "2.0.7" +dependencies = [ + "arrayref", + "num-derive", + "num-traits", + "num_enum", + "remove_dir_all", + "solana-sdk 1.3.17", + "thiserror", +] + [[package]] name = "standback" version = "0.2.9" diff --git a/account-decoder/Cargo.toml b/account-decoder/Cargo.toml index f304c36196d235..69ae8682a78c99 100644 --- a/account-decoder/Cargo.toml +++ b/account-decoder/Cargo.toml @@ -22,7 +22,7 @@ solana-config-program = { path = "../programs/config", version = "1.5.0" } solana-sdk = { path = "../sdk", version = "1.5.0" } solana-stake-program = { path = "../programs/stake", version = "1.5.0" } solana-vote-program = { path = "../programs/vote", version = "1.5.0" } -spl-token-v2-0 = { package = "spl-token", version = "=2.0.6", features = ["skip-no-mangle"] } +spl-token-v2-0 = { path = "../../solana-program-library/token/program", package = "spl-token", version = "=2.0.7" } thiserror = "1.0" [package.metadata.docs.rs] diff --git a/ci/docker-run.sh b/ci/docker-run.sh index dbe30b96bfdb13..280d53374c7e8d 100755 --- a/ci/docker-run.sh +++ b/ci/docker-run.sh @@ -47,7 +47,12 @@ if [[ -n $CI ]]; then else # Avoid sharing ~/.cargo when building locally to avoid a mixed macOS/Linux # ~/.cargo - ARGS+=(--volume "$PWD:/home") + ARGS+=( + --volume "$PWD:/home" + --volume "/home/trent/code/solana/solana-2:/solana-2" + --volume "/home/trent/code/solana/solana-program-library:/solana-program-library" + ) + fi ARGS+=(--env "HOME=/home" --env "CARGO_HOME=/home/.cargo")