diff --git a/.cargo/config.toml b/.cargo/config.toml index 5355758f7a4fa..492f552bff8e6 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -28,4 +28,20 @@ rustflags = [ "-Aclippy::while_immutable_condition", # false positives "-Aclippy::needless_option_as_deref", # false positives "-Aclippy::derivable_impls", # false positives + + # opt into specific uncontravercial style category lints: + # (These lists have no downside) + "-Dclippy::field_reassign_with_default", + "-Dclippy::into_iter_on_ref", + "-Dclippy::iter_cloned_collect", + "-Dclippy::manual_saturating_arithmetic", + "-Dclippy::map_collect_result_unit", + "-Dclippy::match_ref_pats", + "-Dclippy::mem_replace_option_with_none", + "-Dclippy::mem_replace_with_default", + "-Dclippy::needless_borrow", + "-Dclippy::redundant_static_lifetimes", + "-Dclippy::single_match", + "-Dclippy::unwrap_or_else_default", + "-Dclippy::redundant_closure", # may alter side-effect timing. ] diff --git a/bin/node/cli/benches/block_production.rs b/bin/node/cli/benches/block_production.rs index c0f3b96e093cb..132680dafb1a9 100644 --- a/bin/node/cli/benches/block_production.rs +++ b/bin/node/cli/benches/block_production.rs @@ -198,7 +198,7 @@ fn block_production(c: &mut Criterion) { block_builder.push(extrinsic_set_time(1)).unwrap(); import_block(client, block_builder.build().unwrap()); - let (max_transfer_count, extrinsics) = prepare_benchmark(&client); + let (max_transfer_count, extrinsics) = prepare_benchmark(client); log::info!("Maximum transfer count: {}", max_transfer_count); let mut group = c.benchmark_group("Block production"); diff --git a/bin/node/cli/tests/export_import_flow.rs b/bin/node/cli/tests/export_import_flow.rs index 750b4f7acc121..d5b4832873646 100644 --- a/bin/node/cli/tests/export_import_flow.rs +++ b/bin/node/cli/tests/export_import_flow.rs @@ -88,7 +88,7 @@ impl<'a> ExportImportRevertExecutor<'a> { // Setting base_path to be a temporary folder if we are importing blocks. // This allows us to make sure we are importing from scratch. let base_path = match sub_command { - SubCommand::ExportBlocks => &self.base_path.path(), + SubCommand::ExportBlocks => self.base_path.path(), SubCommand::ImportBlocks => { tmp = tempdir().unwrap(); tmp.path() diff --git a/client/api/src/leaves.rs b/client/api/src/leaves.rs index cdcb80a110b74..217ed6c606737 100644 --- a/client/api/src/leaves.rs +++ b/client/api/src/leaves.rs @@ -87,17 +87,14 @@ where pub fn read_from_db(db: &dyn Database, column: u32, prefix: &[u8]) -> Result { let mut storage = BTreeMap::new(); - match db.get(column, prefix) { - Some(leaves) => { - let vals: Vec<_> = match Decode::decode(&mut leaves.as_ref()) { - Ok(vals) => vals, - Err(_) => return Err(Error::Backend("Error decoding leaves".into())), - }; - for (number, hashes) in vals.into_iter() { - storage.insert(Reverse(number), hashes); - } - }, - None => {}, + if let Some(leaves) = db.get(column, prefix) { + let vals: Vec<_> = match Decode::decode(&mut leaves.as_ref()) { + Ok(vals) => vals, + Err(_) => return Err(Error::Backend("Error decoding leaves".into())), + }; + for (number, hashes) in vals.into_iter() { + storage.insert(Reverse(number), hashes); + } } Ok(Self { storage }) } diff --git a/client/authority-discovery/src/tests.rs b/client/authority-discovery/src/tests.rs index 334b2638ca58c..342851354c8d6 100644 --- a/client/authority-discovery/src/tests.rs +++ b/client/authority-discovery/src/tests.rs @@ -95,7 +95,7 @@ fn cryptos_are_compatible() { libp2p::identity::Keypair::Ed25519(x) => x, _ => panic!("generate_ed25519 should have generated an Ed25519 key ¯\\_(ツ)_/¯"), }; - sp_core::ed25519::Pair::from_seed_slice(&libp2p_ed_secret.secret().as_ref()).unwrap() + sp_core::ed25519::Pair::from_seed_slice(libp2p_ed_secret.secret().as_ref()).unwrap() }; let sp_core_public = sp_core_secret.public(); diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index f5ccd9023a3db..70e122f644802 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -723,7 +723,7 @@ mod tests { let proposer = proposer_factory.init_with_now( &client.header(&block_id).unwrap().unwrap(), - Box::new(move || time::Instant::now()), + Box::new(time::Instant::now), ); let deadline = time::Duration::from_secs(9); @@ -792,7 +792,7 @@ mod tests { expected_pool_transactions| { let proposer = proposer_factory.init_with_now( &client.header(&BlockId::number(number)).unwrap().unwrap(), - Box::new(move || time::Instant::now()), + Box::new(time::Instant::now), ); // when @@ -955,7 +955,7 @@ mod tests { // add 2 * MAX_SKIPPED_TRANSACTIONS that exhaust resources (0..MAX_SKIPPED_TRANSACTIONS * 2) .into_iter() - .map(|i| exhausts_resources_extrinsic_from(i)) + .map(exhausts_resources_extrinsic_from) // and some transactions that are okay. .chain((0..MAX_SKIPPED_TRANSACTIONS).into_iter().map(|i| extrinsic(i as _))) .collect(), @@ -1018,7 +1018,7 @@ mod tests { SOURCE, (0..MAX_SKIPPED_TRANSACTIONS + 2) .into_iter() - .map(|i| exhausts_resources_extrinsic_from(i)) + .map(exhausts_resources_extrinsic_from) // and some transactions that are okay. .chain((0..MAX_SKIPPED_TRANSACTIONS).into_iter().map(|i| extrinsic(i as _))) .collect(), diff --git a/client/beefy/rpc/src/lib.rs b/client/beefy/rpc/src/lib.rs index 3be182ceb8f39..3934767888a63 100644 --- a/client/beefy/rpc/src/lib.rs +++ b/client/beefy/rpc/src/lib.rs @@ -197,7 +197,7 @@ mod tests { let (rpc, _) = setup_io_handler(); let request = r#"{"jsonrpc":"2.0","method":"beefy_getFinalizedHead","params":[],"id":1}"#; let expected_response = r#"{"jsonrpc":"2.0","error":{"code":1,"message":"BEEFY RPC endpoint not ready"},"id":1}"#.to_string(); - let (response, _) = rpc.raw_json_request(&request).await.unwrap(); + let (response, _) = rpc.raw_json_request(request).await.unwrap(); assert_eq!(expected_response, response.result); } diff --git a/client/beefy/src/import.rs b/client/beefy/src/import.rs index 129484199de89..b3c3821db8920 100644 --- a/client/beefy/src/import.rs +++ b/client/beefy/src/import.rs @@ -165,28 +165,25 @@ where // Run inner block import. let inner_import_result = self.inner.import_block(block, new_cache).await?; - match (beefy_proof, &inner_import_result) { - (Some(proof), ImportResult::Imported(_)) => { - let status = self.backend.blockchain().info(); - if number <= status.finalized_number && - Some(hash) == - self.backend - .blockchain() - .hash(number) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))? - { - // The proof is valid and the block is imported and final, we can import. - self.import_beefy_justification_unchecked(number, proof); - } else { - error!( - target: "beefy", - "🥩 Cannot import justification: {:?} for, not yet final, block number {:?}", - proof, - number, - ); - } - }, - _ => (), + if let (Some(proof), ImportResult::Imported(_)) = (beefy_proof, &inner_import_result) { + let status = self.backend.blockchain().info(); + if number <= status.finalized_number && + Some(hash) == + self.backend + .blockchain() + .hash(number) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + { + // The proof is valid and the block is imported and final, we can import. + self.import_beefy_justification_unchecked(number, proof); + } else { + error!( + target: "beefy", + "🥩 Cannot import justification: {:?} for, not yet final, block number {:?}", + proof, + number, + ); + } } Ok(inner_import_result) diff --git a/client/beefy/src/justification.rs b/client/beefy/src/justification.rs index d9be18593dac7..d4e0d7f13b439 100644 --- a/client/beefy/src/justification.rs +++ b/client/beefy/src/justification.rs @@ -60,7 +60,7 @@ fn verify_with_validator_set( let message = signed_commitment.commitment.encode(); let valid_signatures = validator_set .validators() - .into_iter() + .iter() .zip(signed_commitment.signatures.iter()) .filter(|(id, signature)| { signature diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index f0257d179cb33..0a6056e14c5ba 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -64,7 +64,7 @@ use crate::{ keystore::tests::Keyring as BeefyKeyring, BeefyRPCLinks, BeefyVoterLinks, }; -pub(crate) const BEEFY_PROTOCOL_NAME: &'static str = "/beefy/1"; +pub(crate) const BEEFY_PROTOCOL_NAME: &str = "/beefy/1"; const GOOD_MMR_ROOT: MmrRootHash = MmrRootHash::repeat_byte(0xbf); const BAD_MMR_ROOT: MmrRootHash = MmrRootHash::repeat_byte(0x42); @@ -502,12 +502,12 @@ fn finalize_block_and_wait_for_beefy( if expected_beefy.is_empty() { // run for quarter second then verify no new best beefy block available let timeout = Some(Duration::from_millis(250)); - streams_empty_after_timeout(best_blocks, &net, runtime, timeout); - streams_empty_after_timeout(versioned_finality_proof, &net, runtime, None); + streams_empty_after_timeout(best_blocks, net, runtime, timeout); + streams_empty_after_timeout(versioned_finality_proof, net, runtime, None); } else { // run until expected beefy blocks are received - wait_for_best_beefy_blocks(best_blocks, &net, runtime, expected_beefy); - wait_for_beefy_signed_commitments(versioned_finality_proof, &net, runtime, expected_beefy); + wait_for_best_beefy_blocks(best_blocks, net, runtime, expected_beefy); + wait_for_beefy_signed_commitments(versioned_finality_proof, net, runtime, expected_beefy); } } diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 9f1938fa91c33..9601c303b2d28 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -1147,7 +1147,7 @@ pub(crate) mod tests { let keys = &[Keyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); let mut net = BeefyTestNet::new(1, 0); - let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); + let mut worker = create_beefy_worker(net.peer(0), &keys[0], 1); // keystore doesn't contain other keys than validators' assert_eq!(worker.verify_validator_set(&1, &validator_set), Ok(())); @@ -1170,7 +1170,7 @@ pub(crate) mod tests { let keys = &[Keyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); let mut net = BeefyTestNet::new(1, 0); - let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); + let mut worker = create_beefy_worker(net.peer(0), &keys[0], 1); let (mut best_block_streams, mut finality_proofs) = get_beefy_streams(&mut net, keys); let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); @@ -1236,7 +1236,7 @@ pub(crate) mod tests { let keys = &[Keyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); let mut net = BeefyTestNet::new(1, 0); - let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); + let mut worker = create_beefy_worker(net.peer(0), &keys[0], 1); assert!(worker.voting_oracle.sessions.is_empty()); @@ -1270,7 +1270,7 @@ pub(crate) mod tests { let keys = &[Keyring::Alice, Keyring::Bob]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); let mut net = BeefyTestNet::new(1, 0); - let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); + let mut worker = create_beefy_worker(net.peer(0), &keys[0], 1); fn new_vote( block_number: NumberFor, diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index 803e9c1e8bf26..8199f5fd8b697 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -344,7 +344,7 @@ mod tests { .unwrap(); assert!(backend - .storage(&sp_core::storage::well_known_keys::CODE) + .storage(sp_core::storage::well_known_keys::CODE) .unwrap_err() .contains("Database missing expected key"),); } diff --git a/client/consensus/common/src/block_import.rs b/client/consensus/common/src/block_import.rs index 10739f63ef779..dcac6a5ae74e2 100644 --- a/client/consensus/common/src/block_import.rs +++ b/client/consensus/common/src/block_import.rs @@ -77,17 +77,14 @@ impl ImportResult { ) where B: BlockT, { - match self { - ImportResult::Imported(aux) => { - if aux.clear_justification_requests { - justification_sync_link.clear_justification_requests(); - } - - if aux.needs_justification { - justification_sync_link.request_justification(hash, number); - } - }, - _ => {}, + if let ImportResult::Imported(aux) = self { + if aux.clear_justification_requests { + justification_sync_link.clear_justification_requests(); + } + + if aux.needs_justification { + justification_sync_link.request_justification(hash, number); + } } } } diff --git a/client/consensus/manual-seal/src/rpc.rs b/client/consensus/manual-seal/src/rpc.rs index b9bb06551f818..d0cbd6d27ef17 100644 --- a/client/consensus/manual-seal/src/rpc.rs +++ b/client/consensus/manual-seal/src/rpc.rs @@ -142,7 +142,7 @@ impl ManualSealApiServer for ManualSeal { let (sender, receiver) = oneshot::channel(); let command = EngineCommand::FinalizeBlock { hash, sender: Some(sender), justification }; sink.send(command).await?; - receiver.await.map(|_| true).map_err(|e| JsonRpseeError::to_call_error(e)) + receiver.await.map(|_| true).map_err(JsonRpseeError::to_call_error) } } diff --git a/client/consensus/slots/src/aux_schema.rs b/client/consensus/slots/src/aux_schema.rs index c1d01500ffe47..2b7feff6b83c4 100644 --- a/client/consensus/slots/src/aux_schema.rs +++ b/client/consensus/slots/src/aux_schema.rs @@ -73,7 +73,7 @@ where // Get headers of this slot. let mut headers_with_sig = - load_decode::<_, Vec<(H, P)>>(backend, &curr_slot_key[..])?.unwrap_or_else(Vec::new); + load_decode::<_, Vec<(H, P)>>(backend, &curr_slot_key[..])?.unwrap_or_default(); // Get first slot saved. let slot_header_start = SLOT_HEADER_START.to_vec(); diff --git a/client/db/benches/state_access.rs b/client/db/benches/state_access.rs index 78aed7858e342..e3bb6d0769f58 100644 --- a/client/db/benches/state_access.rs +++ b/client/db/benches/state_access.rs @@ -179,7 +179,7 @@ fn state_access_benchmarks(c: &mut Criterion) { || backend.state_at(BlockId::Hash(block_hash)).expect("Creates state"), |state| { for key in keys.iter().cycle().take(keys.len() * multiplier) { - let _ = state.storage(&key).expect("Doesn't fail").unwrap(); + let _ = state.storage(key).expect("Doesn't fail").unwrap(); } }, BatchSize::SmallInput, @@ -217,7 +217,7 @@ fn state_access_benchmarks(c: &mut Criterion) { || backend.state_at(BlockId::Hash(block_hash)).expect("Creates state"), |state| { for key in keys.iter().take(1).cycle().take(multiplier) { - let _ = state.storage(&key).expect("Doesn't fail").unwrap(); + let _ = state.storage(key).expect("Doesn't fail").unwrap(); } }, BatchSize::SmallInput, @@ -255,7 +255,7 @@ fn state_access_benchmarks(c: &mut Criterion) { || backend.state_at(BlockId::Hash(block_hash)).expect("Creates state"), |state| { for key in keys.iter().take(1).cycle().take(multiplier) { - let _ = state.storage_hash(&key).expect("Doesn't fail").unwrap(); + let _ = state.storage_hash(key).expect("Doesn't fail").unwrap(); } }, BatchSize::SmallInput, diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 465db08fe3afc..b35016c4e2874 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -299,7 +299,7 @@ impl AsTrieBackend> for RefTrackingState { fn as_trie_backend( &self, ) -> &sp_state_machine::TrieBackend> { - &self.state.as_trie_backend() + self.state.as_trie_backend() } } diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index 7610c4c8f32e0..f115659fbee09 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -648,7 +648,7 @@ impl CodeExecutor for NativeElseWasmExecut ); used_native = true; - let res = with_externalities_safe(&mut **ext, move || (call)()) + let res = with_externalities_safe(&mut **ext, call) .and_then(|r| r.map(NativeOrEncoded::Native).map_err(Error::ApiError)); Ok(res) diff --git a/client/executor/wasmtime/src/tests.rs b/client/executor/wasmtime/src/tests.rs index 9126cb336bde6..192217343ce27 100644 --- a/client/executor/wasmtime/src/tests.rs +++ b/client/executor/wasmtime/src/tests.rs @@ -134,7 +134,7 @@ impl RuntimeBuilder { }, }; - RuntimeBlob::uncompress_if_needed(&wasm) + RuntimeBlob::uncompress_if_needed(wasm) .expect("failed to create a runtime blob out of test runtime") }; diff --git a/client/finality-grandpa/rpc/src/lib.rs b/client/finality-grandpa/rpc/src/lib.rs index 85df72de77b54..328b71150043f 100644 --- a/client/finality-grandpa/rpc/src/lib.rs +++ b/client/finality-grandpa/rpc/src/lib.rs @@ -283,7 +283,7 @@ mod tests { let (rpc, _) = setup_io_handler(EmptyVoterState); let expected_response = r#"{"jsonrpc":"2.0","error":{"code":1,"message":"GRANDPA RPC endpoint not ready"},"id":0}"#.to_string(); let request = r#"{"jsonrpc":"2.0","method":"grandpa_roundState","params":[],"id":0}"#; - let (response, _) = rpc.raw_json_request(&request).await.unwrap(); + let (response, _) = rpc.raw_json_request(request).await.unwrap(); assert_eq!(expected_response, response.result); } @@ -306,7 +306,7 @@ mod tests { },\"id\":0}".to_string(); let request = r#"{"jsonrpc":"2.0","method":"grandpa_roundState","params":[],"id":0}"#; - let (response, _) = rpc.raw_json_request(&request).await.unwrap(); + let (response, _) = rpc.raw_json_request(request).await.unwrap(); assert_eq!(expected_response, response.result); } diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 5f94a4d1b65be..6a4cc284d5e66 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -2262,10 +2262,10 @@ mod tests { // add 60 peers, 30 authorities and 30 full nodes let mut authorities = Vec::new(); - authorities.resize_with(30, || PeerId::random()); + authorities.resize_with(30, PeerId::random); let mut full_nodes = Vec::new(); - full_nodes.resize_with(30, || PeerId::random()); + full_nodes.resize_with(30, PeerId::random); for i in 0..30 { val.inner.write().peers.new_peer(authorities[i], ObservedRole::Authority); diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 3dd21d51b6a2d..d194dc21e05e8 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -1509,8 +1509,8 @@ fn grandpa_environment_never_overwrites_round_voter_state() { let environment = test_environment(&link, Some(keystore), network_service.clone(), ()); let round_state = || finality_grandpa::round::State::genesis(Default::default()); - let base = || Default::default(); - let historical_votes = || finality_grandpa::HistoricalVotes::new(); + let base = Default::default; + let historical_votes = finality_grandpa::HistoricalVotes::new; let get_current_round = |n| { let current_rounds = environment diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index b63e3f64a1256..4c914202a0b90 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -554,8 +554,7 @@ mod tests { spawn(gossip_engine); - let mut subscribers = - subscribers.into_iter().map(|s| block_on_stream(s)).collect::>(); + let mut subscribers = subscribers.into_iter().map(block_on_stream).collect::>(); // Expect each subscriber to receive both events. for message in messages { @@ -690,7 +689,7 @@ mod tests { // Send messages into the network event stream. for (i_notification, messages) in notifications.iter().enumerate() { let messages = messages - .into_iter() + .iter() .enumerate() .map(|(i_message, Message { topic })| { // Embed the topic in the first 256 bytes of the message to be extracted by @@ -751,13 +750,13 @@ mod tests { // Compare amount of expected messages with amount of received messages. for (expected_topic, expected_num) in expected_msgs_per_topic_all_chan.iter() { assert_eq!( - received_msgs_per_topic_all_chan.get(&expected_topic).unwrap_or(&0), + received_msgs_per_topic_all_chan.get(expected_topic).unwrap_or(&0), expected_num, ); } for (received_topic, received_num) in expected_msgs_per_topic_all_chan.iter() { assert_eq!( - expected_msgs_per_topic_all_chan.get(&received_topic).unwrap_or(&0), + expected_msgs_per_topic_all_chan.get(received_topic).unwrap_or(&0), received_num, ); } diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 52fa28e76e207..bb2b7b676b742 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -668,7 +668,7 @@ mod tests { fn secret_bytes(kp: &Keypair) -> Vec { match kp { - Keypair::Ed25519(p) => p.secret().as_ref().iter().cloned().collect(), + Keypair::Ed25519(p) => p.secret().as_ref().to_vec(), Keypair::Secp256k1(p) => p.secret().to_bytes().to_vec(), _ => panic!("Unexpected keypair."), } diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index ab93662968dc2..5fd766429b87b 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -1077,49 +1077,38 @@ mod tests { let fut = futures::future::poll_fn(move |cx| { 'polling: loop { for swarm_n in 0..swarms.len() { - match swarms[swarm_n].0.poll_next_unpin(cx) { - Poll::Ready(Some(e)) => { - match e { - SwarmEvent::Behaviour(behavior) => { - match behavior { - DiscoveryOut::UnroutablePeer(other) | - DiscoveryOut::Discovered(other) => { - // Call `add_self_reported_address` to simulate identify - // happening. - let addr = swarms - .iter() - .find_map(|(s, a)| { - if s.behaviour().local_peer_id == other { - Some(a.clone()) - } else { - None - } - }) - .unwrap(); - swarms[swarm_n] - .0 - .behaviour_mut() - .add_self_reported_address( - &other, - [protocol_name_from_protocol_id(&protocol_id)] - .iter(), - addr, - ); - - to_discover[swarm_n].remove(&other); - }, - DiscoveryOut::RandomKademliaStarted(_) => {}, - e => { - panic!("Unexpected event: {:?}", e) - }, - } + if let Poll::Ready(Some(e)) = swarms[swarm_n].0.poll_next_unpin(cx) { + if let SwarmEvent::Behaviour(behavior) = e { + match behavior { + DiscoveryOut::UnroutablePeer(other) | + DiscoveryOut::Discovered(other) => { + // Call `add_self_reported_address` to simulate identify + // happening. + let addr = swarms + .iter() + .find_map(|(s, a)| { + if s.behaviour().local_peer_id == other { + Some(a.clone()) + } else { + None + } + }) + .unwrap(); + swarms[swarm_n].0.behaviour_mut().add_self_reported_address( + &other, + [protocol_name_from_protocol_id(&protocol_id)].iter(), + addr, + ); + + to_discover[swarm_n].remove(&other); + }, + DiscoveryOut::RandomKademliaStarted(_) => {}, + e => { + panic!("Unexpected event: {:?}", e) }, - // ignore non Behaviour events - _ => {}, } - continue 'polling - }, - _ => {}, + } // else ignore non Behaviour events + continue 'polling } } break diff --git a/client/network/src/request_responses.rs b/client/network/src/request_responses.rs index 9eab85a4c1ce1..b3ad60ada6132 100644 --- a/client/network/src/request_responses.rs +++ b/client/network/src/request_responses.rs @@ -1020,11 +1020,10 @@ mod tests { .spawn_obj({ async move { loop { - match swarm.select_next_some().await { - SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => { - result.unwrap(); - }, - _ => {}, + if let SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) = + swarm.select_next_some().await + { + result.unwrap(); } } } @@ -1122,12 +1121,11 @@ mod tests { .spawn_obj({ async move { loop { - match swarm.select_next_some().await { - SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => { - assert!(result.is_ok()); - break - }, - _ => {}, + if let SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) = + swarm.select_next_some().await + { + assert!(result.is_ok()); + break } } } @@ -1250,11 +1248,10 @@ mod tests { .spawn_obj( async move { loop { - match swarm_2.select_next_some().await { - SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => { - result.unwrap(); - }, - _ => {}, + if let SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) = + swarm_2.select_next_some().await + { + result.unwrap(); } } } diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index e2fe58423abfe..1c1261bec98e9 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -463,12 +463,10 @@ fn notifications_back_pressure() { async_std::task::block_on(async move { // Wait for the `NotificationStreamOpened`. - loop { - match events_stream1.next().await.unwrap() { - Event::NotificationStreamOpened { .. } => break, - _ => {}, - }; - } + while !matches!( + events_stream1.next().await.unwrap(), + Event::NotificationStreamOpened { .. } + ) {} // Sending! for num in 0..TOTAL_NOTIFS { @@ -528,14 +526,13 @@ fn fallback_name_working() { let receiver = async_std::task::spawn(async move { // Wait for the `NotificationStreamOpened`. loop { - match events_stream2.next().await.unwrap() { - Event::NotificationStreamOpened { protocol, negotiated_fallback, .. } => { - assert_eq!(protocol, PROTOCOL_NAME); - assert_eq!(negotiated_fallback, None); - break - }, - _ => {}, - }; + if let Event::NotificationStreamOpened { protocol, negotiated_fallback, .. } = + events_stream2.next().await.unwrap() + { + assert_eq!(protocol, PROTOCOL_NAME); + assert_eq!(negotiated_fallback, None); + break + } } }); diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index aae5f4de353fe..d96d42707b572 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -2998,7 +2998,7 @@ mod test { let fork_blocks = { let mut client = Arc::new(TestClientBuilder::new().build()); let fork_blocks = blocks[..MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2] - .into_iter() + .iter() .inspect(|b| block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap()) .cloned() .collect::>(); @@ -3129,7 +3129,7 @@ mod test { let fork_blocks = { let mut client = Arc::new(TestClientBuilder::new().build()); let fork_blocks = blocks[..MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2] - .into_iter() + .iter() .inspect(|b| block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap()) .cloned() .collect::>(); diff --git a/client/network/test/src/block_import.rs b/client/network/test/src/block_import.rs index a2bd5276c31d6..a2aef1081f341 100644 --- a/client/network/test/src/block_import.rs +++ b/client/network/test/src/block_import.rs @@ -66,8 +66,7 @@ fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock) fn import_single_good_block_works() { let (_, _hash, number, peer_id, block) = prepare_good_block(); - let mut expected_aux = ImportedAux::default(); - expected_aux.is_new_best = true; + let expected_aux = ImportedAux { is_new_best: true, ..Default::default() }; match block_on(import_single_block( &mut substrate_test_runtime_client::new(), diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index c0778767b75af..7ad97aa923628 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -1086,13 +1086,14 @@ fn syncs_state() { genesis_storage .children_default .insert(child3.child_info.storage_key().to_vec(), child3); - let mut config_one = FullPeerConfig::default(); - config_one.extra_storage = Some(genesis_storage.clone()); + let config_one = + FullPeerConfig { extra_storage: Some(genesis_storage.clone()), ..Default::default() }; net.add_full_peer_with_config(config_one); - let mut config_two = FullPeerConfig::default(); - config_two.extra_storage = Some(genesis_storage); - config_two.sync_mode = - SyncMode::Fast { skip_proofs: *skip_proofs, storage_chain_mode: false }; + let config_two = FullPeerConfig { + extra_storage: Some(genesis_storage), + sync_mode: SyncMode::Fast { skip_proofs: *skip_proofs, storage_chain_mode: false }, + ..Default::default() + }; net.add_full_peer_with_config(config_two); net.peer(0).push_blocks(64, false); // Wait for peer 1 to sync header chain. diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index 5eed142ff3871..afd9c554601b0 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -358,7 +358,7 @@ mod tests { extrinsic: ::Extrinsic, ) -> Result<(), ()> { let source = sc_transaction_pool_api::TransactionSource::Local; - futures::executor::block_on(self.0.submit_one(&at, source, extrinsic)) + futures::executor::block_on(self.0.submit_one(at, source, extrinsic)) .map(|_| ()) .map_err(|_| ()) } @@ -414,7 +414,7 @@ mod tests { let block = block_builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); - assert_eq!(value, &offchain_db.get(sp_offchain::STORAGE_PREFIX, &key).unwrap()); + assert_eq!(value, &offchain_db.get(sp_offchain::STORAGE_PREFIX, key).unwrap()); let mut block_builder = client.new_block(Default::default()).unwrap(); block_builder @@ -426,6 +426,6 @@ mod tests { let block = block_builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); - assert!(offchain_db.get(sp_offchain::STORAGE_PREFIX, &key).is_none()); + assert!(offchain_db.get(sp_offchain::STORAGE_PREFIX, key).is_none()); } } diff --git a/client/rpc/src/author/mod.rs b/client/rpc/src/author/mod.rs index 7d0ffdc62e080..241e9e09791b7 100644 --- a/client/rpc/src/author/mod.rs +++ b/client/rpc/src/author/mod.rs @@ -102,7 +102,7 @@ where .await .map_err(|e| { e.into_pool_error() - .map(|e| Error::Pool(e)) + .map(Error::Pool) .unwrap_or_else(|e| Error::Verification(Box::new(e))) .into() }) @@ -179,7 +179,7 @@ where fn watch_extrinsic(&self, mut sink: SubscriptionSink, xt: Bytes) -> SubscriptionResult { let best_block_hash = self.client.info().best_hash; - let dxt = match TransactionFor::

::decode(&mut &xt[..]).map_err(|e| Error::from(e)) { + let dxt = match TransactionFor::

::decode(&mut &xt[..]).map_err(Error::from) { Ok(dxt) => dxt, Err(e) => { let _ = sink.reject(JsonRpseeError::from(e)); diff --git a/client/rpc/src/author/tests.rs b/client/rpc/src/author/tests.rs index f969812e5b14c..0eee457f0d7f6 100644 --- a/client/rpc/src/author/tests.rs +++ b/client/rpc/src/author/tests.rs @@ -150,7 +150,7 @@ async fn author_should_watch_extrinsic() { #[tokio::test] async fn author_should_return_watch_validation_error() { - const METHOD: &'static str = "author_submitAndWatchExtrinsic"; + const METHOD: &str = "author_submitAndWatchExtrinsic"; let api = TestSetup::into_rpc(); let failed_sub = api @@ -179,7 +179,7 @@ async fn author_should_return_pending_extrinsics() { #[tokio::test] async fn author_should_remove_extrinsics() { - const METHOD: &'static str = "author_removeExtrinsic"; + const METHOD: &str = "author_removeExtrinsic"; let setup = TestSetup::default(); let api = setup.author().into_rpc(); diff --git a/client/rpc/src/system/mod.rs b/client/rpc/src/system/mod.rs index ea24524cd2ea9..08d7f15c885c7 100644 --- a/client/rpc/src/system/mod.rs +++ b/client/rpc/src/system/mod.rs @@ -107,19 +107,19 @@ impl SystemApiServer::Number> async fn system_health(&self) -> RpcResult { let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::Health(tx)); - rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) + rx.await.map_err(JsonRpseeError::to_call_error) } async fn system_local_peer_id(&self) -> RpcResult { let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::LocalPeerId(tx)); - rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) + rx.await.map_err(JsonRpseeError::to_call_error) } async fn system_local_listen_addresses(&self) -> RpcResult> { let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::LocalListenAddresses(tx)); - rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) + rx.await.map_err(JsonRpseeError::to_call_error) } async fn system_peers( @@ -128,14 +128,14 @@ impl SystemApiServer::Number> self.deny_unsafe.check_if_safe()?; let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::Peers(tx)); - rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) + rx.await.map_err(JsonRpseeError::to_call_error) } async fn system_network_state(&self) -> RpcResult { self.deny_unsafe.check_if_safe()?; let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::NetworkState(tx)); - rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) + rx.await.map_err(JsonRpseeError::to_call_error) } async fn system_add_reserved_peer(&self, peer: String) -> RpcResult<()> { @@ -163,19 +163,19 @@ impl SystemApiServer::Number> async fn system_reserved_peers(&self) -> RpcResult> { let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::NetworkReservedPeers(tx)); - rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) + rx.await.map_err(JsonRpseeError::to_call_error) } async fn system_node_roles(&self) -> RpcResult> { let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::NodeRoles(tx)); - rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) + rx.await.map_err(JsonRpseeError::to_call_error) } async fn system_sync_state(&self) -> RpcResult::Number>> { let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::SyncState(tx)); - rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) + rx.await.map_err(JsonRpseeError::to_call_error) } fn system_add_log_filter(&self, directives: String) -> RpcResult<()> { diff --git a/client/rpc/src/system/tests.rs b/client/rpc/src/system/tests.rs index 2f91648008ff7..3a8b4c04aa8d7 100644 --- a/client/rpc/src/system/tests.rs +++ b/client/rpc/src/system/tests.rs @@ -340,9 +340,9 @@ async fn system_network_reserved_peers() { #[test] fn test_add_reset_log_filter() { - const EXPECTED_BEFORE_ADD: &'static str = "EXPECTED_BEFORE_ADD"; - const EXPECTED_AFTER_ADD: &'static str = "EXPECTED_AFTER_ADD"; - const EXPECTED_WITH_TRACE: &'static str = "EXPECTED_WITH_TRACE"; + const EXPECTED_BEFORE_ADD: &str = "EXPECTED_BEFORE_ADD"; + const EXPECTED_AFTER_ADD: &str = "EXPECTED_AFTER_ADD"; + const EXPECTED_WITH_TRACE: &str = "EXPECTED_WITH_TRACE"; // Enter log generation / filter reload if std::env::var("TEST_LOG_FILTER").is_ok() { diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index de851ac848919..a34286c48f770 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -226,7 +226,7 @@ where Some(recorder) => { let trie_state = state.as_trie_backend(); - let backend = sp_state_machine::TrieBackendBuilder::wrap(&trie_state) + let backend = sp_state_machine::TrieBackendBuilder::wrap(trie_state) .with_recorder(recorder.clone()) .build(); diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 919c90400e94c..1f713f7a65c6e 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -451,10 +451,8 @@ pub fn sync( let mut network = TestNet::new( &temp, spec, - (0..NUM_FULL_NODES).map(|_| |cfg| full_builder(cfg)), - // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise - // the type of the closure cannot be inferred. - (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg) })), + (0..NUM_FULL_NODES).map(|_| &full_builder), + iter::empty::<(String, Fb)>(), 30500, ); info!("Checking block sync"); diff --git a/client/sysinfo/src/sysinfo.rs b/client/sysinfo/src/sysinfo.rs index fc347c1cc2eb3..8f205f1f42152 100644 --- a/client/sysinfo/src/sysinfo.rs +++ b/client/sysinfo/src/sysinfo.rs @@ -338,7 +338,7 @@ pub fn benchmark_disk_random_writes( // Also the chunk's size is deliberately exactly half of a modern disk's // sector size to trigger an RMW cycle. let chunk = &buffer[position..position + 2048]; - fp.write_all(&chunk) + fp.write_all(chunk) .map_err(|error| format!("failed to write to the test file: {}", error))?; } @@ -381,7 +381,7 @@ pub fn benchmark_sr25519_verify(limit: ExecutionLimit) -> f64 { let run = || -> Result<(), String> { for (sig, msg) in sigs.iter().zip(msgs.iter()) { - let mut ok = sr25519_verify(&sig, &msg[..], &pair.public()); + let mut ok = sr25519_verify(sig, &msg[..], &pair.public()); clobber_value(&mut ok); } Ok(()) diff --git a/client/sysinfo/src/sysinfo_linux.rs b/client/sysinfo/src/sysinfo_linux.rs index 41ab6014cbef0..37b0d786db940 100644 --- a/client/sysinfo/src/sysinfo_linux.rs +++ b/client/sysinfo/src/sysinfo_linux.rs @@ -36,7 +36,7 @@ where { Regex::new(regex) .expect("regex is correct; qed") - .captures(&data)? + .captures(data)? .get(1)? .as_str() .parse() diff --git a/client/tracing/src/logging/mod.rs b/client/tracing/src/logging/mod.rs index 58941617bfb6a..f393760443194 100644 --- a/client/tracing/src/logging/mod.rs +++ b/client/tracing/src/logging/mod.rs @@ -340,8 +340,8 @@ mod tests { }; use tracing::{metadata::Kind, subscriber::Interest, Callsite, Level, Metadata}; - const EXPECTED_LOG_MESSAGE: &'static str = "yeah logging works as expected"; - const EXPECTED_NODE_NAME: &'static str = "THE_NODE"; + const EXPECTED_LOG_MESSAGE: &str = "yeah logging works as expected"; + const EXPECTED_NODE_NAME: &str = "THE_NODE"; fn init_logger(directives: &str) { let _ = LoggerBuilder::new(directives).init().unwrap(); @@ -371,7 +371,7 @@ mod tests { run_test_in_another_process("test_logger_filters", || { let test_directives = "afg=debug,sync=trace,client=warn,telemetry,something-with-dash=error"; - init_logger(&test_directives); + init_logger(test_directives); tracing::dispatcher::get_default(|dispatcher| { let test_filter = |target, level| { @@ -431,7 +431,7 @@ mod tests { fn log_something_with_dash_target_name() { if env::var("ENABLE_LOGGING").is_ok() { let test_directives = "test-target=info"; - let _guard = init_logger(&test_directives); + let _guard = init_logger(test_directives); log::info!(target: "test-target", "{}", EXPECTED_LOG_MESSAGE); } diff --git a/client/transaction-pool/src/graph/pool.rs b/client/transaction-pool/src/graph/pool.rs index 19acbddbe7843..2cea3a85e4ac8 100644 --- a/client/transaction-pool/src/graph/pool.rs +++ b/client/transaction-pool/src/graph/pool.rs @@ -995,8 +995,7 @@ mod tests { // given let (ready, is_ready) = std::sync::mpsc::sync_channel(0); let (tx, rx) = std::sync::mpsc::sync_channel(1); - let mut api = TestApi::default(); - api.delay = Arc::new(Mutex::new(rx.into())); + let api = TestApi { delay: Arc::new(Mutex::new(rx.into())), ..Default::default() }; let pool = Arc::new(Pool::new(Default::default(), true.into(), api.into())); // when diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index 0f4d43505e3f9..b758509ea46e4 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -898,7 +898,7 @@ impl, I: 'static> Pallet { /// Check if an account has a given role. pub fn is_member_of(who: &T::AccountId, role: MemberRole) -> bool { - Members::::get(role).contains(&who) + Members::::get(role).contains(who) } /// Check if an account is a founder. diff --git a/frame/assets/src/mock.rs b/frame/assets/src/mock.rs index 0fd4dd3281516..a332bd307a411 100644 --- a/frame/assets/src/mock.rs +++ b/frame/assets/src/mock.rs @@ -163,7 +163,7 @@ pub(crate) fn new_test_ext() -> sp_io::TestExternalities { let mut ext: sp_io::TestExternalities = storage.into(); // Clear thread local vars for https://github.com/paritytech/substrate/issues/10479. - ext.execute_with(|| take_hooks()); + ext.execute_with(take_hooks); ext.execute_with(|| System::set_block_number(1)); ext } diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index 8ddccfd9cf939..9fc81bf892d8b 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -275,20 +275,15 @@ pub mod pallet { Default::default(); for uncle in uncles { - match Self::verify_uncle(&uncle, &existing_hashes, &mut acc) { - Ok(_) => { - let hash = uncle.hash(); - new_uncles.push(uncle); - existing_hashes.push(hash); - - if new_uncles.len() == MAX_UNCLES { - break - } - }, - Err(_) => { - // skip this uncle - }, - } + if Self::verify_uncle(&uncle, &existing_hashes, &mut acc).is_ok() { + let hash = uncle.hash(); + new_uncles.push(uncle); + existing_hashes.push(hash); + + if new_uncles.len() == MAX_UNCLES { + break + } + } // else skip this uncle } } diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index c2ba3c2be06d8..e43b72a21034a 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -344,7 +344,7 @@ pub fn make_vrf_output( let transcript = sp_consensus_babe::make_transcript(&Babe::randomness(), slot, 0); let vrf_inout = pair.vrf_sign(transcript); let vrf_randomness: sp_consensus_vrf::schnorrkel::Randomness = - vrf_inout.0.make_bytes::<[u8; 32]>(&sp_consensus_babe::BABE_VRF_INOUT_CONTEXT); + vrf_inout.0.make_bytes::<[u8; 32]>(sp_consensus_babe::BABE_VRF_INOUT_CONTEXT); let vrf_output = VRFOutput(vrf_inout.0.to_output()); let vrf_proof = VRFProof(vrf_inout.1); diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index ece0883387709..8edaff175ac55 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -194,7 +194,7 @@ fn no_author_vrf_output_for_secondary_plain() { fn authority_index() { new_test_ext(4).execute_with(|| { assert_eq!( - Babe::find_author((&[(BABE_ENGINE_ID, &[][..])]).into_iter().cloned()), + Babe::find_author([(BABE_ENGINE_ID, &[][..])].iter().cloned()), None, "Trivially invalid authorities are ignored" ) @@ -837,7 +837,7 @@ fn valid_equivocation_reports_dont_pay_fees() { // generate an equivocation proof. let equivocation_proof = - generate_equivocation_proof(0, &offending_authority_pair, CurrentSlot::::get()); + generate_equivocation_proof(0, offending_authority_pair, CurrentSlot::::get()); // create the key ownership proof. let key_owner_proof = diff --git a/frame/bags-list/src/lib.rs b/frame/bags-list/src/lib.rs index 5163a579c6f43..264c7f71eef6f 100644 --- a/frame/bags-list/src/lib.rs +++ b/frame/bags-list/src/lib.rs @@ -275,7 +275,7 @@ impl, I: 'static> Pallet { new_score: T::Score, ) -> Result, ListError> { // If no voter at that node, don't do anything. the caller just wasted the fee to call this. - let node = list::Node::::get(&account).ok_or(ListError::NodeNotFound)?; + let node = list::Node::::get(account).ok_or(ListError::NodeNotFound)?; let maybe_movement = List::update_position_for(node, new_score); if let Some((from, to)) = maybe_movement { Self::deposit_event(Event::::Rebagged { who: account.clone(), from, to }); diff --git a/frame/bags-list/src/list/mod.rs b/frame/bags-list/src/list/mod.rs index b4f852685842d..65064ac880c6b 100644 --- a/frame/bags-list/src/list/mod.rs +++ b/frame/bags-list/src/list/mod.rs @@ -278,7 +278,7 @@ impl, I: 'static> List { let thresholds = T::BagThresholds::get(); let idx = thresholds.partition_point(|&threshold| start_node_upper > threshold); let leftover_bags = thresholds - .into_iter() + .iter() .take(idx) .copied() .rev() @@ -439,21 +439,21 @@ impl, I: 'static> List { lighter_id: &T::AccountId, heavier_id: &T::AccountId, ) -> Result<(), ListError> { - let lighter_node = Node::::get(&lighter_id).ok_or(ListError::NodeNotFound)?; - let heavier_node = Node::::get(&heavier_id).ok_or(ListError::NodeNotFound)?; + let lighter_node = Node::::get(lighter_id).ok_or(ListError::NodeNotFound)?; + let heavier_node = Node::::get(heavier_id).ok_or(ListError::NodeNotFound)?; ensure!(lighter_node.bag_upper == heavier_node.bag_upper, ListError::NotInSameBag); // this is the most expensive check, so we do it last. ensure!( - T::ScoreProvider::score(&heavier_id) > T::ScoreProvider::score(&lighter_id), + T::ScoreProvider::score(heavier_id) > T::ScoreProvider::score(lighter_id), ListError::NotHeavier ); // remove the heavier node from this list. Note that this removes the node from storage and // decrements the node counter. let _ = - Self::remove(&heavier_id).defensive_proof("both nodes have been checked to exist; qed"); + Self::remove(heavier_id).defensive_proof("both nodes have been checked to exist; qed"); // re-fetch `lighter_node` from storage since it may have been updated when `heavier_node` // was removed. diff --git a/frame/bags-list/src/list/tests.rs b/frame/bags-list/src/list/tests.rs index 9bdd54289fd88..7d22fd3bd0030 100644 --- a/frame/bags-list/src/list/tests.rs +++ b/frame/bags-list/src/list/tests.rs @@ -132,8 +132,7 @@ fn migrate_works() { assert_eq!(old_thresholds, vec![10, 20, 30, 40, 50, 60, 1_000, 2_000, 10_000]); // when the new thresholds adds `15` and removes `2_000` - const NEW_THRESHOLDS: &'static [VoteWeight] = - &[10, 15, 20, 30, 40, 50, 60, 1_000, 10_000]; + const NEW_THRESHOLDS: &[VoteWeight] = &[10, 15, 20, 30, 40, 50, 60, 1_000, 10_000]; BagThresholds::set(NEW_THRESHOLDS); // and we call List::::migrate(old_thresholds); diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 0cb32a4e3ecd6..407a9e02f228f 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -1557,7 +1557,7 @@ where } for attempt in 0..2 { - match Self::try_mutate_account( + if let Ok((imbalance, not_slashed)) = Self::try_mutate_account( who, |account, _is_new| @@ -1597,14 +1597,11 @@ where } }, ) { - Ok((imbalance, not_slashed)) => { - Self::deposit_event(Event::Slashed { - who: who.clone(), - amount: value.saturating_sub(not_slashed), - }); - return (imbalance, not_slashed) - }, - Err(_) => (), + Self::deposit_event(Event::Slashed { + who: who.clone(), + amount: value.saturating_sub(not_slashed), + }); + return (imbalance, not_slashed) } } @@ -1775,7 +1772,7 @@ where account.free.checked_sub(&value).ok_or(Error::::InsufficientBalance)?; account.reserved = account.reserved.checked_add(&value).ok_or(ArithmeticError::Overflow)?; - Self::ensure_can_withdraw(&who, value, WithdrawReasons::RESERVE, account.free) + Self::ensure_can_withdraw(who, value, WithdrawReasons::RESERVE, account.free) })?; Self::deposit_event(Event::Reserved { who: who.clone(), amount: value }); @@ -1833,7 +1830,7 @@ where // account is attempted to be illegally destroyed. for attempt in 0..2 { - match Self::mutate_account(who, |account| { + if let Ok((imbalance, not_slashed)) = Self::mutate_account(who, |account| { let best_value = match attempt { 0 => value, // If acting as a critical provider (i.e. first attempt failed), then ensure @@ -1850,14 +1847,11 @@ where // underflow should never happen, but it if does, there's nothing to be done here. (NegativeImbalance::new(actual), value - actual) }) { - Ok((imbalance, not_slashed)) => { - Self::deposit_event(Event::Slashed { - who: who.clone(), - amount: value.saturating_sub(not_slashed), - }); - return (imbalance, not_slashed) - }, - Err(_) => (), + Self::deposit_event(Event::Slashed { + who: who.clone(), + amount: value.saturating_sub(not_slashed), + }); + return (imbalance, not_slashed) } } // Should never get here as we ensure that ED is left in the second attempt. diff --git a/frame/beefy-mmr/src/lib.rs b/frame/beefy-mmr/src/lib.rs index 456d6e77aa8eb..395cfcc60ce1d 100644 --- a/frame/beefy-mmr/src/lib.rs +++ b/frame/beefy-mmr/src/lib.rs @@ -215,7 +215,7 @@ where let id = validator_set.id(); let beefy_addresses = validator_set .validators() - .into_iter() + .iter() .cloned() .map(T::BeefyAuthorityToMerkleLeaf::convert) .collect::>(); diff --git a/frame/contracts/proc-macro/src/lib.rs b/frame/contracts/proc-macro/src/lib.rs index 65c13bb1fc607..9b5b3c404a5ac 100644 --- a/frame/contracts/proc-macro/src/lib.rs +++ b/frame/contracts/proc-macro/src/lib.rs @@ -214,26 +214,26 @@ impl HostFn { let ret_ty = match item.clone().sig.output { syn::ReturnType::Type(_, ty) => Ok(ty.clone()), - _ => Err(err(span, &msg)), + _ => Err(err(span, msg)), }?; match *ret_ty { syn::Type::Path(tp) => { - let result = &tp.path.segments.last().ok_or(err(span, &msg))?; + let result = &tp.path.segments.last().ok_or(err(span, msg))?; let (id, span) = (result.ident.to_string(), result.ident.span()); - id.eq(&"Result".to_string()).then_some(()).ok_or(err(span, &msg))?; + id.eq(&"Result".to_string()).then_some(()).ok_or(err(span, msg))?; match &result.arguments { syn::PathArguments::AngleBracketed(group) => { if group.args.len() != 2 { - return Err(err(span, &msg)) + return Err(err(span, msg)) }; - let arg2 = group.args.last().ok_or(err(span, &msg))?; + let arg2 = group.args.last().ok_or(err(span, msg))?; let err_ty = match arg2 { syn::GenericArgument::Type(ty) => Ok(ty.clone()), - _ => Err(err(arg2.span(), &msg)), + _ => Err(err(arg2.span(), msg)), }?; match err_ty { @@ -241,49 +241,49 @@ impl HostFn { .path .segments .first() - .ok_or(err(arg2.span(), &msg))? + .ok_or(err(arg2.span(), msg))? .ident .to_string()), - _ => Err(err(tp.span(), &msg)), + _ => Err(err(tp.span(), msg)), }? .eq("TrapReason") .then_some(()) - .ok_or(err(span, &msg))?; + .ok_or(err(span, msg))?; - let arg1 = group.args.first().ok_or(err(span, &msg))?; + let arg1 = group.args.first().ok_or(err(span, msg))?; let ok_ty = match arg1 { syn::GenericArgument::Type(ty) => Ok(ty.clone()), - _ => Err(err(arg1.span(), &msg)), + _ => Err(err(arg1.span(), msg)), }?; let ok_ty_str = match ok_ty { syn::Type::Path(tp) => Ok(tp .path .segments .first() - .ok_or(err(arg1.span(), &msg))? + .ok_or(err(arg1.span(), msg))? .ident .to_string()), syn::Type::Tuple(tt) => { if !tt.elems.is_empty() { - return Err(err(arg1.span(), &msg)) + return Err(err(arg1.span(), msg)) }; Ok("()".to_string()) }, - _ => Err(err(ok_ty.span(), &msg)), + _ => Err(err(ok_ty.span(), msg)), }?; let returns = match ok_ty_str.as_str() { "()" => Ok(HostFnReturn::Unit), "u32" => Ok(HostFnReturn::U32), "ReturnCode" => Ok(HostFnReturn::ReturnCode), - _ => Err(err(arg1.span(), &msg)), + _ => Err(err(arg1.span(), msg)), }?; Ok(Self { item, module, name, returns }) }, - _ => Err(err(span, &msg)), + _ => Err(err(span, msg)), } }, - _ => Err(err(span, &msg)), + _ => Err(err(span, msg)), } } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 204908cc4a989..9a91e530915a6 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -85,7 +85,7 @@ pub mod test_utils { }); let trie_id = Storage::::generate_trie_id(address, nonce); set_balance(address, ::Currency::minimum_balance() * 10); - let contract = Storage::::new_contract(&address, trie_id, code_hash).unwrap(); + let contract = Storage::::new_contract(address, trie_id, code_hash).unwrap(); >::insert(address, contract); } pub fn set_balance(who: &AccountIdOf, amount: u64) { diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 02a360fe86b45..2b31dcd19f5ce 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -1156,7 +1156,7 @@ mod tests { #[test] fn contract_call_limited_gas() { let mut mock_ext = MockExt::default(); - assert_ok!(execute(&CODE_TRANSFER_LIMITED_GAS, vec![], &mut mock_ext)); + assert_ok!(execute(CODE_TRANSFER_LIMITED_GAS, vec![], &mut mock_ext)); assert_eq!( &mock_ext.calls, @@ -1203,7 +1203,7 @@ mod tests { #[test] fn contract_ecdsa_recover() { let mut mock_ext = MockExt::default(); - assert_ok!(execute(&CODE_ECDSA_RECOVER, vec![], &mut mock_ext)); + assert_ok!(execute(CODE_ECDSA_RECOVER, vec![], &mut mock_ext)); assert_eq!(mock_ext.ecdsa_recover.into_inner(), [([1; 65], [1; 32])]); } diff --git a/frame/conviction-voting/src/lib.rs b/frame/conviction-voting/src/lib.rs index cd16fccd6661d..4387609c264bf 100644 --- a/frame/conviction-voting/src/lib.rs +++ b/frame/conviction-voting/src/lib.rs @@ -582,7 +582,7 @@ impl, I: 'static> Pallet { fn try_undelegate(who: T::AccountId, class: ClassOf) -> Result { let votes = VotingFor::::try_mutate(&who, &class, |voting| -> Result { - match sp_std::mem::replace(voting, Voting::default()) { + match sp_std::mem::take(voting) { Voting::Delegating(Delegating { balance, target, diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index de25355f0ca5b..309aaab908bb5 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -1227,7 +1227,7 @@ mod tests { // first, ensure that a successful execution releases the lock let (mut ext, pool) = ExtBuilder::default().build_offchainify(0); ext.execute_with(|| { - let guard = StorageValueRef::persistent(&OFFCHAIN_LOCK); + let guard = StorageValueRef::persistent(OFFCHAIN_LOCK); let last_block = StorageValueRef::persistent(OFFCHAIN_LAST_BLOCK); roll_to(25); @@ -1284,7 +1284,7 @@ mod tests { // we must clear the offchain storage to ensure the offchain execution check doesn't get // in the way. - let mut storage = StorageValueRef::persistent(&OFFCHAIN_LAST_BLOCK); + let mut storage = StorageValueRef::persistent(OFFCHAIN_LAST_BLOCK); MultiPhase::offchain_worker(24); assert!(pool.read().transactions.len().is_zero()); @@ -1375,7 +1375,7 @@ mod tests { // we must clear the offchain storage to ensure the offchain execution check doesn't get // in the way. - let mut storage = StorageValueRef::persistent(&OFFCHAIN_LAST_BLOCK); + let mut storage = StorageValueRef::persistent(OFFCHAIN_LAST_BLOCK); MultiPhase::offchain_worker(block_plus(-1)); assert!(pool.read().transactions.len().is_zero()); @@ -1413,7 +1413,7 @@ mod tests { // we must clear the offchain storage to ensure the offchain execution check doesn't get // in the way. - let mut storage = StorageValueRef::persistent(&OFFCHAIN_LAST_BLOCK); + let mut storage = StorageValueRef::persistent(OFFCHAIN_LAST_BLOCK); MultiPhase::offchain_worker(block_plus(-1)); assert!(pool.read().transactions.len().is_zero()); @@ -1429,7 +1429,7 @@ mod tests { // remove the cached submitted tx // this ensures that when the resubmit window rolls around, we're ready to regenerate // from scratch if necessary - let mut call_cache = StorageValueRef::persistent(&OFFCHAIN_CACHED_CALL); + let mut call_cache = StorageValueRef::persistent(OFFCHAIN_CACHED_CALL); assert!(matches!(call_cache.get::>(), Ok(Some(_call)))); call_cache.clear(); diff --git a/frame/merkle-mountain-range/src/mmr/mmr.rs b/frame/merkle-mountain-range/src/mmr/mmr.rs index 44e684c1bdcac..44f90f4e97d09 100644 --- a/frame/merkle-mountain-range/src/mmr/mmr.rs +++ b/frame/merkle-mountain-range/src/mmr/mmr.rs @@ -48,7 +48,7 @@ where let leaves_and_position_data = proof .leaf_indices .into_iter() - .map(|index| mmr_lib::leaf_index_to_pos(index)) + .map(mmr_lib::leaf_index_to_pos) .zip(leaves.into_iter()) .collect(); @@ -109,7 +109,7 @@ where let leaves_positions_and_data = proof .leaf_indices .into_iter() - .map(|index| mmr_lib::leaf_index_to_pos(index)) + .map(mmr_lib::leaf_index_to_pos) .zip(leaves.into_iter().map(|leaf| Node::Data(leaf))) .collect(); let root = self.mmr.get_root().map_err(|e| Error::GetRoot.log_error(e))?; diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs index 566a051823d5e..30ccfd6dc69d0 100644 --- a/frame/merkle-mountain-range/src/tests.rs +++ b/frame/merkle-mountain-range/src/tests.rs @@ -374,7 +374,7 @@ fn should_verify_batch_proofs() { // verify that up to n=10, valid proofs are generated for all possible leaf combinations for n in 0..10 { - ext.execute_with(|| new_block()); + ext.execute_with(new_block); ext.persist_offchain_overlay(); // generate powerset (skipping empty set) of all possible leaf combinations for mmr size n @@ -389,7 +389,7 @@ fn should_verify_batch_proofs() { // verify that up to n=15, valid proofs are generated for all possible 2-leaf combinations for n in 10..15 { // (MMR Leafs) - ext.execute_with(|| new_block()); + ext.execute_with(new_block); ext.persist_offchain_overlay(); // generate all possible 2-leaf combinations for mmr size n @@ -424,7 +424,7 @@ fn verification_should_be_stateless() { // when crate::Pallet::::generate_batch_proof(vec![5]).unwrap() }); - let root = ext.execute_with(|| crate::Pallet::::mmr_root_hash()); + let root = ext.execute_with(crate::Pallet::::mmr_root_hash); // Verify proof without relying on any on-chain data. let leaf = crate::primitives::DataOrHash::Data(leaves[0].clone()); @@ -451,16 +451,13 @@ fn should_verify_batch_proof_statelessly() { // when crate::Pallet::::generate_batch_proof(vec![0, 4, 5]).unwrap() }); - let root = ext.execute_with(|| crate::Pallet::::mmr_root_hash()); + let root = ext.execute_with(crate::Pallet::::mmr_root_hash); // Verify proof without relying on any on-chain data. assert_eq!( crate::verify_leaves_proof::<::Hashing, _>( root, - leaves - .into_iter() - .map(|leaf| crate::primitives::DataOrHash::Data(leaf)) - .collect(), + leaves.into_iter().map(crate::primitives::DataOrHash::Data).collect(), proof ), Ok(()) diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index 62d0b3ddd55cb..e98ac9e264c8d 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -292,7 +292,7 @@ use sp_staking::{EraIndex, OnStakerSlash, StakingInterface}; use sp_std::{collections::btree_map::BTreeMap, fmt::Debug, ops::Div, vec::Vec}; /// The log target of this pallet. -pub const LOG_TARGET: &'static str = "runtime::nomination-pools"; +pub const LOG_TARGET: &str = "runtime::nomination-pools"; // syntactic sugar for logging. #[macro_export] @@ -876,7 +876,7 @@ impl BondedPool { // Cache the value let bonded_account = self.bonded_account(); T::Currency::transfer( - &who, + who, &bonded_account, amount, match ty { @@ -1786,10 +1786,10 @@ pub mod pallet { .iter() .fold(BalanceOf::::zero(), |accumulator, (era, unlocked_points)| { sum_unlocked_points = sum_unlocked_points.saturating_add(*unlocked_points); - if let Some(era_pool) = sub_pools.with_era.get_mut(&era) { + if let Some(era_pool) = sub_pools.with_era.get_mut(era) { let balance_to_unbond = era_pool.dissolve(*unlocked_points); if era_pool.points.is_zero() { - sub_pools.with_era.remove(&era); + sub_pools.with_era.remove(era); } accumulator.saturating_add(balance_to_unbond) } else { @@ -2267,8 +2267,8 @@ impl Pallet { current_points: BalanceOf, new_funds: BalanceOf, ) -> BalanceOf { - let u256 = |x| T::BalanceToU256::convert(x); - let balance = |x| T::U256ToBalance::convert(x); + let to_u256 = T::BalanceToU256::convert; + let to_balance = T::U256ToBalance::convert; match (current_balance.is_zero(), current_points.is_zero()) { (_, true) => new_funds.saturating_mul(POINTS_TO_BALANCE_INIT_RATIO.into()), (true, false) => { @@ -2278,11 +2278,11 @@ impl Pallet { }, (false, false) => { // Equivalent to (current_points / current_balance) * new_funds - balance( - u256(current_points) - .saturating_mul(u256(new_funds)) + to_balance( + to_u256(current_points) + .saturating_mul(to_u256(new_funds)) // We check for zero above - .div(u256(current_balance)), + .div(to_u256(current_balance)), ) }, } @@ -2295,15 +2295,15 @@ impl Pallet { current_points: BalanceOf, points: BalanceOf, ) -> BalanceOf { - let u256 = |x| T::BalanceToU256::convert(x); - let balance = |x| T::U256ToBalance::convert(x); + let to_u256 = T::BalanceToU256::convert; + let to_balance = T::U256ToBalance::convert; if current_balance.is_zero() || current_points.is_zero() || points.is_zero() { // There is nothing to unbond return Zero::zero() } // Equivalent of (current_balance / current_points) * points - balance(u256(current_balance).saturating_mul(u256(points))) + to_balance(to_u256(current_balance).saturating_mul(to_u256(points))) // We check for zero above .div(current_points) } @@ -2337,7 +2337,7 @@ impl Pallet { // Transfer payout to the member. T::Currency::transfer( &bonded_pool.reward_account(), - &member_account, + member_account, pending_rewards, ExistenceRequirement::AllowDeath, )?; diff --git a/frame/nomination-pools/src/migration.rs b/frame/nomination-pools/src/migration.rs index 243e5489b5445..25c2f6752d01f 100644 --- a/frame/nomination-pools/src/migration.rs +++ b/frame/nomination-pools/src/migration.rs @@ -234,7 +234,7 @@ pub mod v2 { let mut sum_paid_out = BalanceOf::::zero(); members - .into_iter() + .iter() .filter_map(|(who, points)| { let bonded_pool = match BondedPool::::get(id) { Some(x) => x, @@ -267,7 +267,7 @@ pub mod v2 { .for_each(|(who, last_claim)| { let outcome = T::Currency::transfer( &reward_account, - &who, + who, last_claim, ExistenceRequirement::KeepAlive, ); diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index 45260b577f700..2a79f35eb5a30 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -707,6 +707,6 @@ impl Pallet { /// Check that a user is a friend in the friends list. fn is_friend(friends: &Vec, friend: &T::AccountId) -> bool { - friends.binary_search(&friend).is_ok() + friends.binary_search(friend).is_ok() } } diff --git a/frame/referenda/src/lib.rs b/frame/referenda/src/lib.rs index e9e14e1d4a96a..f5f2ce381bf0e 100644 --- a/frame/referenda/src/lib.rs +++ b/frame/referenda/src/lib.rs @@ -793,10 +793,10 @@ impl, I: 'static> Pallet { ) -> Option<(ReferendumIndex, ReferendumStatusOf)> { loop { let (index, _) = track_queue.pop()?; - match Self::ensure_ongoing(index) { - Ok(s) => return Some((index, s)), - Err(_) => {}, // referendum already timedout or was cancelled. + if let Ok(s) = Self::ensure_ongoing(index) { + return Some((index, s)) } + // else: referendum already timedout or was cancelled. } } diff --git a/frame/support/procedural/src/pallet_error.rs b/frame/support/procedural/src/pallet_error.rs index 216168131e43d..a7cb1b5a3b564 100644 --- a/frame/support/procedural/src/pallet_error.rs +++ b/frame/support/procedural/src/pallet_error.rs @@ -39,7 +39,7 @@ pub fn derive_pallet_error(input: proc_macro::TokenStream) -> proc_macro::TokenS syn::Fields::Unnamed(syn::FieldsUnnamed { unnamed: fields, .. }) => { let maybe_field_tys = fields .iter() - .map(|f| generate_field_types(f, &frame_support)) + .map(|f| generate_field_types(f, frame_support)) .collect::>>(); let field_tys = match maybe_field_tys { Ok(tys) => tys.into_iter().flatten(), @@ -59,7 +59,7 @@ pub fn derive_pallet_error(input: proc_macro::TokenStream) -> proc_macro::TokenS syn::Data::Enum(syn::DataEnum { variants, .. }) => { let field_tys = variants .iter() - .map(|variant| generate_variant_field_types(variant, &frame_support)) + .map(|variant| generate_variant_field_types(variant, frame_support)) .collect::>>, syn::Error>>(); let field_tys = match field_tys { diff --git a/frame/support/src/storage/types/counted_map.rs b/frame/support/src/storage/types/counted_map.rs index c4027acfe7232..c01a183a27bc0 100644 --- a/frame/support/src/storage/types/counted_map.rs +++ b/frame/support/src/storage/types/counted_map.rs @@ -177,7 +177,7 @@ where F: FnOnce(&mut QueryKind::Query) -> Result, { Self::try_mutate_exists(key, |option_value_ref| { - let option_value = core::mem::replace(option_value_ref, None); + let option_value = option_value_ref.take(); let mut query = ::Map::from_optional_value_to_query(option_value); let res = f(&mut query); let option_value = ::Map::from_query_to_optional_value(query); diff --git a/frame/support/src/traits/tokens/fungible.rs b/frame/support/src/traits/tokens/fungible.rs index 90aadb6d8daa6..27425ab6210be 100644 --- a/frame/support/src/traits/tokens/fungible.rs +++ b/frame/support/src/traits/tokens/fungible.rs @@ -89,7 +89,7 @@ pub trait Mutate: Inspect { dest: &AccountId, amount: Self::Balance, ) -> Result { - let extra = Self::can_withdraw(&source, amount).into_result()?; + let extra = Self::can_withdraw(source, amount).into_result()?; // As we first burn and then mint, we don't need to check if `mint` fits into the supply. // If we can withdraw/burn it, we can also mint it again. Self::can_deposit(dest, amount.saturating_add(extra), false).into_result()?; diff --git a/frame/support/src/traits/tokens/fungibles.rs b/frame/support/src/traits/tokens/fungibles.rs index dab50d56962f6..01ccda52c5523 100644 --- a/frame/support/src/traits/tokens/fungibles.rs +++ b/frame/support/src/traits/tokens/fungibles.rs @@ -142,7 +142,7 @@ pub trait Mutate: Inspect { dest: &AccountId, amount: Self::Balance, ) -> Result { - let extra = Self::can_withdraw(asset, &source, amount).into_result()?; + let extra = Self::can_withdraw(asset, source, amount).into_result()?; // As we first burn and then mint, we don't need to check if `mint` fits into the supply. // If we can withdraw/burn it, we can also mint it again. Self::can_deposit(asset, dest, amount.saturating_add(extra), false).into_result()?; diff --git a/frame/system/src/limits.rs b/frame/system/src/limits.rs index 6076414ba6bcb..678ce85d9bc98 100644 --- a/frame/system/src/limits.rs +++ b/frame/system/src/limits.rs @@ -231,14 +231,14 @@ impl BlockWeights { error_assert!( (max_for_class > self.base_block && max_for_class > base_for_class) || max_for_class == 0, - &mut error, + error, "[{:?}] {:?} (total) has to be greater than {:?} (base block) & {:?} (base extrinsic)", class, max_for_class, self.base_block, base_for_class, ); // Max extrinsic can't be greater than max_for_class. error_assert!( weights.max_extrinsic.unwrap_or(0) <= max_for_class.saturating_sub(base_for_class), - &mut error, + error, "[{:?}] {:?} (max_extrinsic) can't be greater than {:?} (max for class)", class, weights.max_extrinsic, @@ -247,14 +247,14 @@ impl BlockWeights { // Max extrinsic should not be 0 error_assert!( weights.max_extrinsic.unwrap_or_else(Weight::max_value) > 0, - &mut error, + error, "[{:?}] {:?} (max_extrinsic) must not be 0. Check base cost and average initialization cost.", class, weights.max_extrinsic, ); // Make sure that if reserved is set it's greater than base_for_class. error_assert!( reserved > base_for_class || reserved == 0, - &mut error, + error, "[{:?}] {:?} (reserved) has to be greater than {:?} (base extrinsic) if set", class, reserved, @@ -263,7 +263,7 @@ impl BlockWeights { // Make sure max block is greater than max_total if it's set. error_assert!( self.max_block >= weights.max_total.unwrap_or(0), - &mut error, + error, "[{:?}] {:?} (max block) has to be greater than {:?} (max for class)", class, self.max_block, @@ -272,7 +272,7 @@ impl BlockWeights { // Make sure we can fit at least one extrinsic. error_assert!( self.max_block > base_for_class + self.base_block, - &mut error, + error, "[{:?}] {:?} (max block) must fit at least one extrinsic {:?} (base weight)", class, self.max_block, diff --git a/frame/tips/src/tests.rs b/frame/tips/src/tests.rs index bcaa99285d2e7..aa7632aefda70 100644 --- a/frame/tips/src/tests.rs +++ b/frame/tips/src/tests.rs @@ -522,8 +522,7 @@ fn test_migration_v4() { (pallet_tips::Tips::::hashed_key_for(hash1), tip.encode().to_vec()), ]; - let mut s = Storage::default(); - s.top = data.into_iter().collect(); + let s = Storage { top: data.into_iter().collect(), ..Default::default() }; sp_io::TestExternalities::new(s).execute_with(|| { use frame_support::traits::PalletInfoAccess; diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index aac4491720c34..1d3c8c2bc62b8 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -167,7 +167,7 @@ fn generate_versioned_api_traits( // Add the methods from the current version and all previous one. Versions are sorted so // it's safe to stop early. for (_, m) in methods.iter().take_while(|(v, _)| v <= &version) { - versioned_trait.items.extend(m.iter().cloned().map(|m| TraitItem::Method(m))); + versioned_trait.items.extend(m.iter().cloned().map(TraitItem::Method)); } result.push(versioned_trait); @@ -231,8 +231,8 @@ fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { // Process the items in the declaration. The filter_map function below does a lot of stuff // because the method attributes are stripped at this point - decl.items.iter_mut().for_each(|i| match i { - TraitItem::Method(ref mut method) => { + decl.items.iter_mut().for_each(|i| { + if let TraitItem::Method(ref mut method) = i { let method_attrs = remove_supported_attributes(&mut method.attrs); let mut method_version = trait_api_version; // validate the api version for the method (if any) and generate default @@ -281,8 +281,7 @@ fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { // partition methods by api version methods_by_version.entry(method_version).or_default().push(method.clone()); } - }, - _ => (), + } }); let versioned_api_traits = generate_versioned_api_traits(decl.clone(), methods_by_version); @@ -431,7 +430,7 @@ impl<'a> ToClientSideDecl<'a> { for (_, a) in found_attributes.iter().filter(|a| a.0 == &RENAMED_ATTRIBUTE) { match parse_renamed_attribute(a) { Ok((old_name, version)) => { - renames.push((version, prefix_function_with_trait(&self.trait_, &old_name))); + renames.push((version, prefix_function_with_trait(self.trait_, &old_name))); }, Err(e) => self.errors.push(e.to_compile_error()), } @@ -449,7 +448,7 @@ impl<'a> ToClientSideDecl<'a> { // Generate the function name before we may rename it below to // `function_name_before_version_{}`. - let function_name = prefix_function_with_trait(&self.trait_, &method.sig.ident); + let function_name = prefix_function_with_trait(self.trait_, &method.sig.ident); // If the method has a `changed_in` attribute, we need to alter the method name to // `method_before_version_VERSION`. diff --git a/primitives/consensus/common/src/block_validation.rs b/primitives/consensus/common/src/block_validation.rs index 71f3a80b27a64..d7c134b4a3795 100644 --- a/primitives/consensus/common/src/block_validation.rs +++ b/primitives/consensus/common/src/block_validation.rs @@ -30,7 +30,7 @@ pub trait Chain { impl, B: Block> Chain for Arc { fn block_status(&self, id: &BlockId) -> Result> { - (&**self).block_status(id) + (**self).block_status(id) } } diff --git a/primitives/core/src/ecdsa.rs b/primitives/core/src/ecdsa.rs index d56f65fd289e7..d823a2900aa2c 100644 --- a/primitives/core/src/ecdsa.rs +++ b/primitives/core/src/ecdsa.rs @@ -435,7 +435,7 @@ impl TraitPair for Pair { SecretKey::from_slice(seed_slice).map_err(|_| SecretStringError::InvalidSeedLength)?; #[cfg(feature = "std")] - let context = SECP256K1; + let context = *SECP256K1; #[cfg(not(feature = "std"))] let context = Secp256k1::signing_only(); diff --git a/primitives/core/src/offchain/mod.rs b/primitives/core/src/offchain/mod.rs index 3f0eed87a3a64..42b6cdbf0e783 100644 --- a/primitives/core/src/offchain/mod.rs +++ b/primitives/core/src/offchain/mod.rs @@ -436,23 +436,23 @@ pub trait Externalities: Send { impl Externalities for Box { fn is_validator(&self) -> bool { - (&**self).is_validator() + (**self).is_validator() } fn network_state(&self) -> Result { - (&**self).network_state() + (**self).network_state() } fn timestamp(&mut self) -> Timestamp { - (&mut **self).timestamp() + (**self).timestamp() } fn sleep_until(&mut self, deadline: Timestamp) { - (&mut **self).sleep_until(deadline) + (**self).sleep_until(deadline) } fn random_seed(&mut self) -> [u8; 32] { - (&mut **self).random_seed() + (**self).random_seed() } fn http_request_start( @@ -461,7 +461,7 @@ impl Externalities for Box { uri: &str, meta: &[u8], ) -> Result { - (&mut **self).http_request_start(method, uri, meta) + (**self).http_request_start(method, uri, meta) } fn http_request_add_header( @@ -470,7 +470,7 @@ impl Externalities for Box { name: &str, value: &str, ) -> Result<(), ()> { - (&mut **self).http_request_add_header(request_id, name, value) + (**self).http_request_add_header(request_id, name, value) } fn http_request_write_body( @@ -479,7 +479,7 @@ impl Externalities for Box { chunk: &[u8], deadline: Option, ) -> Result<(), HttpError> { - (&mut **self).http_request_write_body(request_id, chunk, deadline) + (**self).http_request_write_body(request_id, chunk, deadline) } fn http_response_wait( @@ -487,11 +487,11 @@ impl Externalities for Box { ids: &[HttpRequestId], deadline: Option, ) -> Vec { - (&mut **self).http_response_wait(ids, deadline) + (**self).http_response_wait(ids, deadline) } fn http_response_headers(&mut self, request_id: HttpRequestId) -> Vec<(Vec, Vec)> { - (&mut **self).http_response_headers(request_id) + (**self).http_response_headers(request_id) } fn http_response_read_body( @@ -500,11 +500,11 @@ impl Externalities for Box { buffer: &mut [u8], deadline: Option, ) -> Result { - (&mut **self).http_response_read_body(request_id, buffer, deadline) + (**self).http_response_read_body(request_id, buffer, deadline) } fn set_authorized_nodes(&mut self, nodes: Vec, authorized_only: bool) { - (&mut **self).set_authorized_nodes(nodes, authorized_only) + (**self).set_authorized_nodes(nodes, authorized_only) } } @@ -671,11 +671,11 @@ pub trait DbExternalities: Send { impl DbExternalities for Box { fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { - (&mut **self).local_storage_set(kind, key, value) + (**self).local_storage_set(kind, key, value) } fn local_storage_clear(&mut self, kind: StorageKind, key: &[u8]) { - (&mut **self).local_storage_clear(kind, key) + (**self).local_storage_clear(kind, key) } fn local_storage_compare_and_set( @@ -685,11 +685,11 @@ impl DbExternalities for Box { old_value: Option<&[u8]>, new_value: &[u8], ) -> bool { - (&mut **self).local_storage_compare_and_set(kind, key, old_value, new_value) + (**self).local_storage_compare_and_set(kind, key, old_value, new_value) } fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { - (&mut **self).local_storage_get(kind, key) + (**self).local_storage_get(kind, key) } } diff --git a/primitives/npos-elections/fuzzer/src/reduce.rs b/primitives/npos-elections/fuzzer/src/reduce.rs index 602467a343884..d698841ec2b40 100644 --- a/primitives/npos-elections/fuzzer/src/reduce.rs +++ b/primitives/npos-elections/fuzzer/src/reduce.rs @@ -131,7 +131,7 @@ fn reduce_and_compare(assignment: &Vec>, winners: &V num_changed, ); - assert_assignments_equal(&assignment, &altered_assignment); + assert_assignments_equal(assignment, &altered_assignment); } fn assignment_len(assignments: &[StakedAssignment]) -> u32 { diff --git a/primitives/npos-elections/src/mock.rs b/primitives/npos-elections/src/mock.rs index 5a06e3f3c88ca..e2404a19b7300 100644 --- a/primitives/npos-elections/src/mock.rs +++ b/primitives/npos-elections/src/mock.rs @@ -182,7 +182,7 @@ pub(crate) fn equalize_float( for _i in 0..iterations { let mut max_diff = 0.0; for (voter, assignment) in assignments.iter_mut() { - let voter_budget = stake_of(&voter); + let voter_budget = stake_of(voter); let diff = do_equalize_float(voter, voter_budget, assignment, supports, tolerance); if diff > max_diff { max_diff = diff; @@ -226,10 +226,10 @@ where if backing_backed_stake.len() > 0 { let max_stake = backing_backed_stake .iter() - .max_by(|x, y| x.partial_cmp(&y).unwrap_or(sp_std::cmp::Ordering::Equal)) + .max_by(|x, y| x.partial_cmp(y).unwrap_or(sp_std::cmp::Ordering::Equal)) .expect("vector with positive length will have a max; qed"); let min_stake = backed_stakes_iter - .min_by(|x, y| x.partial_cmp(&y).unwrap_or(sp_std::cmp::Ordering::Equal)) + .min_by(|x, y| x.partial_cmp(y).unwrap_or(sp_std::cmp::Ordering::Equal)) .expect("iterator with positive length will have a min; qed"); difference = max_stake - min_stake; @@ -265,7 +265,7 @@ where let stake_mul = stake * (idx as f64); let stake_sub = stake_mul - cumulative_stake; if stake_sub > budget { - last_index = idx.checked_sub(1).unwrap_or(0); + last_index = idx.saturating_sub(1); return } cumulative_stake = cumulative_stake + stake; diff --git a/primitives/runtime-interface/src/impls.rs b/primitives/runtime-interface/src/impls.rs index e801931c306cf..78f9ea4c718a6 100644 --- a/primitives/runtime-interface/src/impls.rs +++ b/primitives/runtime-interface/src/impls.rs @@ -497,7 +497,7 @@ macro_rules! for_u128_i128 { type Owned = (); fn into_ffi_value(&self) -> WrappedFFIValue { - unsafe { (mem::transmute::<&Self, *const u8>(self) as u32).into() } + (self as *const $type as *const u8 as u32).into() } } diff --git a/primitives/runtime/src/bounded/bounded_vec.rs b/primitives/runtime/src/bounded/bounded_vec.rs index aed1a156ad699..b3ccee2f60021 100644 --- a/primitives/runtime/src/bounded/bounded_vec.rs +++ b/primitives/runtime/src/bounded/bounded_vec.rs @@ -229,7 +229,7 @@ where impl<'a, T: Ord, Bound: Get> Ord for BoundedSlice<'a, T, Bound> { fn cmp(&self, other: &Self) -> sp_std::cmp::Ordering { - self.0.cmp(&other.0) + self.0.cmp(other.0) } } @@ -839,7 +839,7 @@ where BoundRhs: Get, { fn partial_cmp(&self, other: &BoundedSlice<'a, T, BoundRhs>) -> Option { - (&*self.0).partial_cmp(other.0) + (*self.0).partial_cmp(other.0) } } diff --git a/primitives/runtime/src/bounded/weak_bounded_vec.rs b/primitives/runtime/src/bounded/weak_bounded_vec.rs index a447e7285f906..06ac53a25073f 100644 --- a/primitives/runtime/src/bounded/weak_bounded_vec.rs +++ b/primitives/runtime/src/bounded/weak_bounded_vec.rs @@ -424,7 +424,7 @@ where BoundRhs: Get, { fn partial_cmp(&self, other: &BoundedSlice<'a, T, BoundRhs>) -> Option { - (&*self.0).partial_cmp(other.0) + (*self.0).partial_cmp(other.0) } } diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index cd43038522914..1c6317f228f3d 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -1066,7 +1066,7 @@ mod tests { let msg = &b"test-message"[..]; let (pair, _) = ecdsa::Pair::generate(); - let signature = pair.sign(&msg); + let signature = pair.sign(msg); assert!(ecdsa::Pair::verify(&signature, msg, &pair.public())); let multi_sig = MultiSignature::from(signature); diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index a82ae1d62f56a..7dac74df981ae 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -2198,7 +2198,7 @@ mod tests { let msg = &b"test-message"[..]; let (pair, _) = ecdsa::Pair::generate(); - let signature = pair.sign(&msg); + let signature = pair.sign(msg); assert!(ecdsa::Pair::verify(&signature, msg, &pair.public())); assert!(signature.verify(msg, &pair.public())); diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 236a515a2412d..c0352ba497f17 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -245,7 +245,7 @@ impl Externalities for BasicExternalities { } fn storage_append(&mut self, key: Vec, value: Vec) { - let current_value = self.overlay.value_mut_or_insert_with(&key, || Default::default()); + let current_value = self.overlay.value_mut_or_insert_with(&key, Default::default); crate::ext::StorageAppend::new(current_value).append(value); } @@ -283,7 +283,7 @@ impl Externalities for BasicExternalities { let delta = data.into_iter().map(|(k, v)| (k.as_ref(), v.value().map(|v| v.as_slice()))); crate::in_memory_backend::new_in_mem::>() - .child_storage_root(&child_info, delta, state_version) + .child_storage_root(child_info, delta, state_version) .0 } else { empty_child_trie_root::>() diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 447c276a6049c..31e9b764a0b3d 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -771,7 +771,7 @@ pub mod tests { } } - let proving = TrieBackendBuilder::wrap(&trie) + let proving = TrieBackendBuilder::wrap(trie) .with_recorder(Recorder::default()) .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) .build(); @@ -816,7 +816,7 @@ pub mod tests { assert_eq!(in_memory_root, trie_root); (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); - let proving = TrieBackendBuilder::wrap(&trie) + let proving = TrieBackendBuilder::wrap(trie) .with_recorder(Recorder::default()) .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) .build(); @@ -888,7 +888,7 @@ pub mod tests { assert_eq!(in_memory_root, trie_root); (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); - let proving = TrieBackendBuilder::wrap(&trie) + let proving = TrieBackendBuilder::wrap(trie) .with_recorder(Recorder::default()) .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) .build(); @@ -905,7 +905,7 @@ pub mod tests { assert_eq!(proof_check.storage(&[41]).unwrap().unwrap(), vec![41]); assert_eq!(proof_check.storage(&[64]).unwrap(), None); - let proving = TrieBackendBuilder::wrap(&trie) + let proving = TrieBackendBuilder::wrap(trie) .with_recorder(Recorder::default()) .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) .build(); @@ -1019,7 +1019,7 @@ pub mod tests { { // Record the access - let proving = TrieBackendBuilder::wrap(&trie) + let proving = TrieBackendBuilder::wrap(trie) .with_recorder(Recorder::default()) .with_cache(cache.local_cache()) .build(); diff --git a/primitives/trie/src/cache/mod.rs b/primitives/trie/src/cache/mod.rs index 67348919250cd..9e23b62ca1d4b 100644 --- a/primitives/trie/src/cache/mod.rs +++ b/primitives/trie/src/cache/mod.rs @@ -497,7 +497,7 @@ mod tests { .build(); for (key, value) in TEST_DATA { - assert_eq!(*value, trie.get(&key).unwrap().unwrap()); + assert_eq!(*value, trie.get(key).unwrap().unwrap()); } } @@ -508,7 +508,7 @@ mod tests { let trie = TrieDBBuilder::::new(&memory_db, &root).build(); for (key, value) in TEST_DATA { - assert_eq!(*value, trie.get(&key).unwrap().unwrap()); + assert_eq!(*value, trie.get(key).unwrap().unwrap()); } } } diff --git a/primitives/trie/src/cache/shared_cache.rs b/primitives/trie/src/cache/shared_cache.rs index abac8c9f946ca..b33281a051f2f 100644 --- a/primitives/trie/src/cache/shared_cache.rs +++ b/primitives/trie/src/cache/shared_cache.rs @@ -259,7 +259,7 @@ impl<'a, H> ValueCacheKey<'a, H> { /// Returns the stored storage key. pub fn storage_key(&self) -> Option<&[u8]> { match self { - Self::Ref { storage_key, .. } => Some(&storage_key), + Self::Ref { storage_key, .. } => Some(storage_key), Self::Value { storage_key, .. } => Some(storage_key), Self::Hash { .. } => None, } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index fafa2a2891ce4..efc645824744f 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -375,7 +375,7 @@ where DB: hash_db::HashDBRef, { let db = KeySpacedDB::new(db, keyspace); - TrieDBBuilder::::new(&db, &root) + TrieDBBuilder::::new(&db, root) .with_optional_recorder(recorder) .with_optional_cache(cache) .build() @@ -396,7 +396,7 @@ where DB: hash_db::HashDBRef, { let db = KeySpacedDB::new(db, keyspace); - TrieDBBuilder::::new(&db, &root) + TrieDBBuilder::::new(&db, root) .with_optional_recorder(recorder) .with_optional_cache(cache) .build() @@ -610,11 +610,8 @@ mod tests { let mut empty = TrieDBMutBuilder::::new(&mut db, &mut root).build(); empty.commit(); let root1 = empty.root().as_ref().to_vec(); - let root2: Vec = LayoutV1::trie_root::<_, Vec, Vec>(std::iter::empty()) - .as_ref() - .iter() - .cloned() - .collect(); + let root2: Vec = + LayoutV1::trie_root::<_, Vec, Vec>(std::iter::empty()).as_ref().to_vec(); assert_eq!(root1, root2); } diff --git a/primitives/version/src/lib.rs b/primitives/version/src/lib.rs index 0bd62f0bac5aa..ca351d1f49fdc 100644 --- a/primitives/version/src/lib.rs +++ b/primitives/version/src/lib.rs @@ -378,14 +378,14 @@ impl, Block: BlockT> GetRuntimeVersionAt for std::sync::Arc { fn runtime_version(&self, at: &BlockId) -> Result { - (&**self).runtime_version(at) + (**self).runtime_version(at) } } #[cfg(feature = "std")] impl GetNativeVersion for std::sync::Arc { fn native_version(&self) -> &NativeVersion { - (&**self).native_version() + (**self).native_version() } } diff --git a/utils/fork-tree/src/lib.rs b/utils/fork-tree/src/lib.rs index 76c28d910f943..76afaba8149e0 100644 --- a/utils/fork-tree/src/lib.rs +++ b/utils/fork-tree/src/lib.rs @@ -1522,7 +1522,7 @@ mod test { assert_eq!(path, [0, 1, 0, 0, 0]); // Post order traversal requirement for `import` - let res = tree.import(&"Z", 100, (), &is_descendent_of_for_post_order); + let res = tree.import("Z", 100, (), &is_descendent_of_for_post_order); assert_eq!(res, Ok(false)); assert_eq!( tree.iter().map(|node| *node.0).collect::>(), diff --git a/utils/frame/benchmarking-cli/src/block/bench.rs b/utils/frame/benchmarking-cli/src/block/bench.rs index e48a7e8b3c6f5..743bed89d355d 100644 --- a/utils/frame/benchmarking-cli/src/block/bench.rs +++ b/utils/frame/benchmarking-cli/src/block/bench.rs @@ -37,7 +37,7 @@ use thousands::Separable; use crate::shared::{StatSelect, Stats}; /// Log target for printing block weight info. -const LOG_TARGET: &'static str = "benchmark::block::weight"; +const LOG_TARGET: &str = "benchmark::block::weight"; /// Parameters for modifying the benchmark behaviour. #[derive(Debug, Default, Serialize, Clone, PartialEq, Args)] @@ -114,7 +114,7 @@ where let start = Instant::now(); runtime_api - .execute_block(&parent_num, block) + .execute_block(parent_num, block) .map_err(|e| Error::Client(RuntimeApiError(e)))?; record.push(start.elapsed().as_nanos() as NanoSeconds); @@ -136,7 +136,7 @@ where let mut raw_weight = &self .client - .storage(&block, &key)? + .storage(block, &key)? .ok_or(format!("Could not find System::BlockWeight for block: {}", block))? .0[..]; diff --git a/utils/frame/benchmarking-cli/src/machine/mod.rs b/utils/frame/benchmarking-cli/src/machine/mod.rs index 5f27c71983905..becd3d4de8996 100644 --- a/utils/frame/benchmarking-cli/src/machine/mod.rs +++ b/utils/frame/benchmarking-cli/src/machine/mod.rs @@ -119,7 +119,7 @@ impl MachineCmd { info!("Running machine benchmarks..."); let mut results = Vec::new(); for requirement in &requirements.0 { - let result = self.run_benchmark(requirement, &dir)?; + let result = self.run_benchmark(requirement, dir)?; results.push(result); } self.print_summary(requirements, results) diff --git a/utils/frame/benchmarking-cli/src/pallet/command.rs b/utils/frame/benchmarking-cli/src/pallet/command.rs index 0fc7cc4d783f7..1b641664cba17 100644 --- a/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -102,7 +102,7 @@ fn combine_batches( } /// Explains possible reasons why the metadata for the benchmarking could not be found. -const ERROR_METADATA_NOT_FOUND: &'static str = "Did not find the benchmarking metadata. \ +const ERROR_METADATA_NOT_FOUND: &str = "Did not find the benchmarking metadata. \ This could mean that you either did not build the node correctly with the \ `--features runtime-benchmarks` flag, or the chain spec that you are using was \ not created by a node that was compiled with the flag"; diff --git a/utils/frame/benchmarking-cli/src/pallet/writer.rs b/utils/frame/benchmarking-cli/src/pallet/writer.rs index 42a237fcf3ce3..a8a9af689e8b4 100644 --- a/utils/frame/benchmarking-cli/src/pallet/writer.rs +++ b/utils/frame/benchmarking-cli/src/pallet/writer.rs @@ -138,7 +138,7 @@ fn map_results( let pallet_string = String::from_utf8(batch.pallet.clone()).unwrap(); let instance_string = String::from_utf8(batch.instance.clone()).unwrap(); let benchmark_data = - get_benchmark_data(batch, storage_info, &component_ranges, analysis_choice); + get_benchmark_data(batch, storage_info, component_ranges, analysis_choice); let pallet_benchmarks = all_benchmarks.entry((pallet_string, instance_string)).or_default(); pallet_benchmarks.push(benchmark_data); } diff --git a/utils/frame/benchmarking-cli/src/storage/cmd.rs b/utils/frame/benchmarking-cli/src/storage/cmd.rs index de5e189b40db0..5f5f04e1c3f12 100644 --- a/utils/frame/benchmarking-cli/src/storage/cmd.rs +++ b/utils/frame/benchmarking-cli/src/storage/cmd.rs @@ -189,7 +189,7 @@ impl StorageCmd { info!("Warmup round {}/{}", i + 1, self.params.warmups); for key in keys.as_slice() { let _ = client - .storage(&block, &key) + .storage(&block, key) .expect("Checked above to exist") .ok_or("Value unexpectedly empty"); } diff --git a/utils/frame/benchmarking-cli/src/storage/read.rs b/utils/frame/benchmarking-cli/src/storage/read.rs index cba318f87ea98..6961e245f7534 100644 --- a/utils/frame/benchmarking-cli/src/storage/read.rs +++ b/utils/frame/benchmarking-cli/src/storage/read.rs @@ -67,7 +67,7 @@ impl StorageCmd { // regular key let start = Instant::now(); let v = client - .storage(&block, &key) + .storage(&block, key) .expect("Checked above to exist") .ok_or("Value unexpectedly empty")?; record.append(v.0.len(), start.elapsed())?;