From edf2710a8aae8e4ac92fece8ad4130915e0b170f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 13 Nov 2021 19:45:26 +0100 Subject: [PATCH 01/10] authority-discovery: Support multiple authority ids per peer id An peer id can be mapped to multiple authority ids, because an authority id is a session key that could be changed every session. Before this pr the internal authority discovery cache assumed that each authority id can only be mapped to one peer id. However, this isn't true since we changed the default implementation of the authority discovery to combine the current and next session authorities. --- Cargo.lock | 18 +- client/authority-discovery/Cargo.toml | 1 + client/authority-discovery/src/lib.rs | 9 +- client/authority-discovery/src/service.rs | 9 +- client/authority-discovery/src/tests.rs | 6 +- client/authority-discovery/src/worker.rs | 11 +- .../src/worker/addr_cache.rs | 256 ++++++++++-------- .../authority-discovery/src/worker/tests.rs | 5 +- client/network-gossip/Cargo.toml | 2 +- client/network/Cargo.toml | 2 +- primitives/blockchain/Cargo.toml | 2 +- 11 files changed, 186 insertions(+), 135 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6325304bfceec..3c3cac7143dd2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3644,7 +3644,7 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log 0.4.14", - "lru", + "lru 0.6.6", "minicbor", "rand 0.7.3", "smallvec 1.7.0", @@ -3978,6 +3978,15 @@ dependencies = [ "hashbrown 0.11.2", ] +[[package]] +name = "lru" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c748cfe47cb8da225c37595b3108bea1c198c84aaae8ea0ba76d01dda9fc803" +dependencies = [ + "hashbrown 0.11.2", +] + [[package]] name = "lru-cache" version = "0.1.2" @@ -7415,6 +7424,7 @@ dependencies = [ "ip_network", "libp2p", "log 0.4.14", + "lru 0.7.0", "parity-scale-codec", "prost", "prost-build", @@ -8077,7 +8087,7 @@ dependencies = [ "linked-hash-map", "linked_hash_set", "log 0.4.14", - "lru", + "lru 0.7.0", "parity-scale-codec", "parking_lot 0.11.1", "pin-project 1.0.8", @@ -8120,7 +8130,7 @@ dependencies = [ "futures-timer 3.0.2", "libp2p", "log 0.4.14", - "lru", + "lru 0.7.0", "quickcheck", "sc-network", "sp-runtime", @@ -9113,7 +9123,7 @@ version = "4.0.0-dev" dependencies = [ "futures 0.3.16", "log 0.4.14", - "lru", + "lru 0.7.0", "parity-scale-codec", "parking_lot 0.11.1", "sp-api", diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index cee35a43df2f6..b26fa18143d06 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -36,6 +36,7 @@ sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } sp-keystore = { version = "0.10.0-dev", path = "../../primitives/keystore" } sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } +lru = "0.7.0" [dev-dependencies] quickcheck = "1.0.3" diff --git a/client/authority-discovery/src/lib.rs b/client/authority-discovery/src/lib.rs index 800f683aa0aef..263179ffa915d 100644 --- a/client/authority-discovery/src/lib.rs +++ b/client/authority-discovery/src/lib.rs @@ -18,6 +18,7 @@ #![warn(missing_docs)] #![recursion_limit = "1024"] + //! Substrate authority discovery. //! //! This crate enables Substrate authorities to discover and directly connect to @@ -31,7 +32,7 @@ pub use crate::{ worker::{NetworkProvider, Role, Worker}, }; -use std::{sync::Arc, time::Duration}; +use std::{collections::HashSet, sync::Arc, time::Duration}; use futures::{ channel::{mpsc, oneshot}, @@ -58,11 +59,13 @@ pub struct WorkerConfig { /// /// By default this is set to 1 hour. pub max_publish_interval: Duration, + /// Interval at which the keystore is queried. If the keys have changed, unconditionally /// re-publish its addresses on the DHT. /// /// By default this is set to 1 minute. pub keystore_refresh_interval: Duration, + /// The maximum interval in which the node will query the DHT for new entries. /// /// By default this is set to 10 minutes. @@ -156,7 +159,7 @@ where /// Message send from the [`Service`] to the [`Worker`]. pub(crate) enum ServicetoWorkerMsg { /// See [`Service::get_addresses_by_authority_id`]. - GetAddressesByAuthorityId(AuthorityId, oneshot::Sender>>), + GetAddressesByAuthorityId(AuthorityId, oneshot::Sender>>), /// See [`Service::get_authority_id_by_peer_id`]. - GetAuthorityIdByPeerId(PeerId, oneshot::Sender>), + GetAuthorityIdByPeerId(PeerId, oneshot::Sender>>), } diff --git a/client/authority-discovery/src/service.rs b/client/authority-discovery/src/service.rs index 2e5ae66e4dd4a..93fea47008486 100644 --- a/client/authority-discovery/src/service.rs +++ b/client/authority-discovery/src/service.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::fmt::Debug; +use std::{collections::HashSet, fmt::Debug}; use crate::ServicetoWorkerMsg; @@ -62,7 +62,7 @@ impl Service { pub async fn get_addresses_by_authority_id( &mut self, authority: AuthorityId, - ) -> Option> { + ) -> Option> { let (tx, rx) = oneshot::channel(); self.to_worker @@ -78,7 +78,10 @@ impl Service { /// /// Returns `None` if no entry was present or connection to the /// [`crate::Worker`] failed. - pub async fn get_authority_id_by_peer_id(&mut self, peer_id: PeerId) -> Option { + pub async fn get_authority_id_by_peer_id( + &mut self, + peer_id: PeerId, + ) -> Option> { let (tx, rx) = oneshot::channel(); self.to_worker diff --git a/client/authority-discovery/src/tests.rs b/client/authority-discovery/src/tests.rs index 3784b4c834266..1ba00934a0a09 100644 --- a/client/authority-discovery/src/tests.rs +++ b/client/authority-discovery/src/tests.rs @@ -29,7 +29,7 @@ use libp2p::core::{ multiaddr::{Multiaddr, Protocol}, PeerId, }; -use std::sync::Arc; +use std::{collections::HashSet, sync::Arc}; use sp_authority_discovery::AuthorityId; use sp_core::crypto::key_types; @@ -73,11 +73,11 @@ fn get_addresses_and_authority_id() { pool.run_until(async { assert_eq!( - Some(vec![remote_addr]), + Some(HashSet::from([remote_addr])), service.get_addresses_by_authority_id(remote_authority_id.clone()).await, ); assert_eq!( - Some(remote_authority_id), + Some(HashSet::from([remote_authority_id])), service.get_authority_id_by_peer_id(remote_peer_id).await, ); }); diff --git a/client/authority-discovery/src/worker.rs b/client/authority-discovery/src/worker.rs index a689d0bafd262..2897af50e1048 100644 --- a/client/authority-discovery/src/worker.rs +++ b/client/authority-discovery/src/worker.rs @@ -261,7 +261,7 @@ where }, ServicetoWorkerMsg::GetAuthorityIdByPeerId(peer_id, sender) => { let _ = sender - .send(self.addr_cache.get_authority_id_by_peer_id(&peer_id).map(Clone::clone)); + .send(self.addr_cache.get_authority_ids_by_peer_id(&peer_id).map(Clone::clone)); }, } } @@ -374,9 +374,11 @@ where .map_err(|e| Error::CallingRuntime(e.into()))? .into_iter() .filter(|id| !local_keys.contains(id.as_ref())) - .collect(); + .collect::>(); - self.addr_cache.retain_ids(&authorities); + // Inform the address cache about the new maximum number of + // addresses to cache. + self.addr_cache.set_max_authority_ids(authorities.len()); authorities.shuffle(&mut thread_rng()); self.pending_lookups = authorities; @@ -548,7 +550,7 @@ where if let Some(metrics) = &self.metrics { metrics .known_authorities_count - .set(self.addr_cache.num_ids().try_into().unwrap_or(std::u64::MAX)); + .set(self.addr_cache.num_authority_ids().try_into().unwrap_or(std::u64::MAX)); } } Ok(()) @@ -695,6 +697,7 @@ impl Metrics { #[cfg(test)] impl Worker { pub(crate) fn inject_addresses(&mut self, authority: AuthorityId, addresses: Vec) { + self.addr_cache.set_max_authority_ids(self.addr_cache.num_authority_ids() + 1); self.addr_cache.insert(authority, addresses); } } diff --git a/client/authority-discovery/src/worker/addr_cache.rs b/client/authority-discovery/src/worker/addr_cache.rs index e770297f6f3be..c2f6dd4c9f4dc 100644 --- a/client/authority-discovery/src/worker/addr_cache.rs +++ b/client/authority-discovery/src/worker/addr_cache.rs @@ -17,79 +17,89 @@ // along with this program. If not, see . use libp2p::core::multiaddr::{Multiaddr, Protocol}; -use std::collections::HashMap; +use lru::LruCache; use sc_network::PeerId; use sp_authority_discovery::AuthorityId; +use std::collections::{hash_map::Entry, HashMap, HashSet}; /// Cache for [`AuthorityId`] -> [`Vec`] and [`PeerId`] -> [`AuthorityId`] mappings. pub(super) struct AddrCache { - // The addresses found in `authority_id_to_addresses` are guaranteed to always match - // the peerids found in `peer_id_to_authority_id`. In other words, these two hashmaps - // are similar to a bi-directional map. - authority_id_to_addresses: HashMap>, - peer_id_to_authority_id: HashMap, + /// The addresses found in `authority_id_to_addresses` are guaranteed to always match + /// the peerids found in `peer_id_to_authority_id`. In other words, these two maps + /// are similar to a bi-directional map. As both are lru maps, we ensure that only inserts + /// update the position in the lru and any other access doesn't update the position of a + /// requested item. + authority_id_to_addresses: LruCache>, + peer_id_to_authority_id: HashMap>, + /// The maximum number of authority ids that should be cached. + max_authority_ids: usize, } impl AddrCache { pub fn new() -> Self { AddrCache { - authority_id_to_addresses: HashMap::new(), - peer_id_to_authority_id: HashMap::new(), + authority_id_to_addresses: LruCache::unbounded(), + peer_id_to_authority_id: HashMap::default(), + max_authority_ids: 0, } } /// Inserts the given [`AuthorityId`] and [`Vec`] pair for future lookups by /// [`AuthorityId`] or [`PeerId`]. - pub fn insert(&mut self, authority_id: AuthorityId, mut addresses: Vec) { - addresses.sort_unstable_by(|a, b| a.as_ref().cmp(b.as_ref())); - - // Insert into `self.peer_id_to_authority_id`. - let peer_ids = addresses - .iter() - .map(|a| peer_id_from_multiaddr(a)) - .filter_map(|peer_id| peer_id); - for peer_id in peer_ids.clone() { - let former_auth = - match self.peer_id_to_authority_id.insert(peer_id, authority_id.clone()) { - Some(a) if a != authority_id => a, - _ => continue, - }; - - // PeerId was associated to a different authority id before. - // Remove corresponding authority from `self.authority_id_to_addresses`. - let former_auth_addrs = match self.authority_id_to_addresses.get_mut(&former_auth) { - Some(a) => a, - None => { - debug_assert!(false); - continue + pub fn insert(&mut self, authority_id: AuthorityId, addresses: Vec) { + let addresses = addresses.into_iter().collect::>(); + let peer_ids = addresses_to_peer_ids(&addresses); + + let old_addresses = self.authority_id_to_addresses.put(authority_id.clone(), addresses); + let old_peer_ids = addresses_to_peer_ids(&old_addresses.unwrap_or_default()); + + // Add the new peer ids + peer_ids.difference(&old_peer_ids).for_each(|new_peer_id| { + self.peer_id_to_authority_id + .entry(*new_peer_id) + .or_default() + .insert(authority_id.clone()); + }); + + // Remove the old peer ids + self.remove_authority_id_from_peer_ids(&authority_id, old_peer_ids.difference(&peer_ids)); + + while self.authority_id_to_addresses.len() > self.max_authority_ids { + match self.authority_id_to_addresses.pop_lru() { + Some((authority_id, addresses)) => { + self.remove_authority_id_from_peer_ids( + &authority_id, + addresses_to_peer_ids(&addresses).iter(), + ); }, - }; - former_auth_addrs.retain(|a| peer_id_from_multiaddr(a).map_or(true, |p| p != peer_id)); + None => break, + } } + } - // Insert into `self.authority_id_to_addresses`. - for former_addr in self - .authority_id_to_addresses - .insert(authority_id.clone(), addresses.clone()) - .unwrap_or_default() - { - // Must remove from `self.peer_id_to_authority_id` any PeerId formerly associated - // to that authority but that can't be found in its new addresses. - - let peer_id = match peer_id_from_multiaddr(&former_addr) { - Some(p) => p, - None => continue, - }; - - if !peer_ids.clone().any(|p| p == peer_id) { - self.peer_id_to_authority_id.remove(&peer_id); + /// Remove the given `authority_id` from the `peer_id` to `authority_ids` mapping. + /// + /// If a `peer_id` doesn't have any `authority_id` assigned anymore, it is removed. + fn remove_authority_id_from_peer_ids<'a>( + &mut self, + authority_id: &AuthorityId, + peer_ids: impl Iterator, + ) { + peer_ids.for_each(|peer_id| { + if let Entry::Occupied(mut e) = self.peer_id_to_authority_id.entry(*peer_id) { + e.get_mut().remove(authority_id); + + // If there are no more entries, remove the peer id. + if e.get().is_empty() { + e.remove(); + } } - } + }) } /// Returns the number of authority IDs in the cache. - pub fn num_ids(&self) -> usize { + pub fn num_authority_ids(&self) -> usize { self.authority_id_to_addresses.len() } @@ -97,43 +107,23 @@ impl AddrCache { pub fn get_addresses_by_authority_id( &self, authority_id: &AuthorityId, - ) -> Option<&Vec> { - self.authority_id_to_addresses.get(&authority_id) + ) -> Option<&HashSet> { + self.authority_id_to_addresses.peek(authority_id) } - /// Returns the [`AuthorityId`] for the given [`PeerId`]. - pub fn get_authority_id_by_peer_id(&self, peer_id: &PeerId) -> Option<&AuthorityId> { + /// Returns the [`AuthorityId`]s for the given [`PeerId`]. + /// + /// As the authority id can change between sessions, one [`PeerId`] can be mapped to + /// multiple authority ids. + pub fn get_authority_ids_by_peer_id(&self, peer_id: &PeerId) -> Option<&HashSet> { self.peer_id_to_authority_id.get(peer_id) } - /// Removes all [`PeerId`]s and [`Multiaddr`]s from the cache that are not related to the given - /// [`AuthorityId`]s. - pub fn retain_ids(&mut self, authority_ids: &Vec) { - // The below logic could be replaced by `BtreeMap::drain_filter` once it stabilized. - let authority_ids_to_remove = self - .authority_id_to_addresses - .iter() - .filter(|(id, _addresses)| !authority_ids.contains(id)) - .map(|entry| entry.0) - .cloned() - .collect::>(); - - for authority_id_to_remove in authority_ids_to_remove { - // Remove other entries from `self.authority_id_to_addresses`. - let addresses = self.authority_id_to_addresses.remove(&authority_id_to_remove); - - // Remove other entries from `self.peer_id_to_authority_id`. - let peer_ids = addresses - .iter() - .flatten() - .map(|a| peer_id_from_multiaddr(a)) - .filter_map(|peer_id| peer_id); - for peer_id in peer_ids { - if let Some(id) = self.peer_id_to_authority_id.remove(&peer_id) { - debug_assert_eq!(authority_id_to_remove, id); - } - } - } + /// Set the maximum number of [`AuthorityId`]s to cache. + /// + /// The new maximum will be taken in account the next time [`Self::insert`] is called. + pub fn set_max_authority_ids(&mut self, max: usize) { + self.max_authority_ids = max; } } @@ -147,6 +137,13 @@ fn peer_id_from_multiaddr(addr: &Multiaddr) -> Option { }) } +fn addresses_to_peer_ids(addresses: &HashSet) -> HashSet { + addresses + .iter() + .filter_map(|a| peer_id_from_multiaddr(a)) + .collect::>() +} + #[cfg(test)] mod tests { use super::*; @@ -209,7 +206,7 @@ mod tests { } #[test] - fn retains_only_entries_of_provided_authority_ids() { + fn last_inserted_authority_id_is_purged() { fn property( first: (TestAuthorityId, TestMultiaddr), second: (TestAuthorityId, TestMultiaddr), @@ -221,32 +218,32 @@ mod tests { let mut cache = AddrCache::new(); + cache.set_max_authority_ids(2); cache.insert(first.0.clone(), vec![first.1.clone()]); cache.insert(second.0.clone(), vec![second.1.clone()]); - cache.insert(third.0.clone(), vec![third.1.clone()]); assert_eq!( - Some(&vec![third.1.clone()]), - cache.get_addresses_by_authority_id(&third.0), - "Expect `get_addresses_by_authority_id` to return addresses of third authority." + Some(&HashSet::from([first.1.clone()])), + cache.get_addresses_by_authority_id(&first.0), + "Expect `get_addresses_by_authority_id` to return addresses of first authority." ); assert_eq!( - Some(&third.0), - cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&third.1).unwrap()), - "Expect `get_authority_id_by_peer_id` to return `AuthorityId` of third authority." + Some(&HashSet::from([first.0.clone()])), + cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&first.1).unwrap()), + "Expect `get_authority_id_by_peer_id` to return `AuthorityId` of first authority." ); - cache.retain_ids(&vec![first.0, second.0]); + cache.insert(third.0.clone(), vec![third.1.clone()]); assert_eq!( None, - cache.get_addresses_by_authority_id(&third.0), - "Expect `get_addresses_by_authority_id` to not return `None` for third authority." + cache.get_addresses_by_authority_id(&first.0), + "Expect `get_addresses_by_authority_id` to not return `None` for first authority." ); assert_eq!( None, - cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&third.1).unwrap()), - "Expect `get_authority_id_by_peer_id` to return `None` for third authority." + cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&first.1).unwrap()), + "Expect `get_authority_id_by_peer_id` to return `None` for first authority." ); TestResult::passed() @@ -258,7 +255,7 @@ mod tests { } #[test] - fn keeps_consistency_between_authority_id_and_peer_id() { + fn purges_authority_id_cache_properly() { fn property( authority1: TestAuthorityId, authority2: TestAuthorityId, @@ -273,6 +270,8 @@ mod tests { let TestMultiaddrsSamePeerCombo(multiaddr3, multiaddr4) = multiaddr3; let mut cache = AddrCache::new(); + // Only allow one authority id to be stored. + cache.set_max_authority_ids(2); cache.insert(authority1.clone(), vec![multiaddr1.clone()]); cache.insert( @@ -282,44 +281,47 @@ mod tests { assert_eq!( None, - cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr1).unwrap()) + cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&multiaddr1).unwrap()) ); assert_eq!( - Some(&authority1), - cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr2).unwrap()) + Some(&HashSet::from([authority1.clone()])), + cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&multiaddr2).unwrap()) ); assert_eq!( - Some(&authority1), - cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr3).unwrap()) + Some(&HashSet::from([authority1.clone()])), + cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&multiaddr3).unwrap()) ); assert_eq!( - Some(&authority1), - cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr4).unwrap()) + Some(&HashSet::from([authority1.clone()])), + cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&multiaddr4).unwrap()) ); cache.insert(authority2.clone(), vec![multiaddr2.clone()]); assert_eq!( - Some(&authority2), - cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr2).unwrap()) + Some(&HashSet::from([authority2.clone(), authority1.clone()])), + cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&multiaddr2).unwrap()) ); assert_eq!( - Some(&authority1), - cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr3).unwrap()) + Some(&HashSet::from([authority1.clone()])), + cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&multiaddr3).unwrap()) ); - assert_eq!(cache.get_addresses_by_authority_id(&authority1).unwrap().len(), 2); + assert_eq!(cache.get_addresses_by_authority_id(&authority1).unwrap().len(), 3); cache.insert(authority2.clone(), vec![multiaddr2.clone(), multiaddr3.clone()]); assert_eq!( - Some(&authority2), - cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr2).unwrap()) + Some(&HashSet::from([authority2.clone(), authority1.clone()])), + cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&multiaddr2).unwrap()) + ); + assert_eq!( + Some(&HashSet::from([authority2.clone(), authority1.clone()])), + cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&multiaddr3).unwrap()) ); assert_eq!( - Some(&authority2), - cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr3).unwrap()) + &HashSet::from([multiaddr2.clone(), multiaddr3.clone(), multiaddr4.clone()]), + cache.get_addresses_by_authority_id(&authority1).unwrap(), ); - assert!(cache.get_addresses_by_authority_id(&authority1).unwrap().is_empty()); TestResult::passed() } @@ -328,4 +330,32 @@ mod tests { .max_tests(10) .quickcheck(property as fn(_, _, _, _, _) -> TestResult) } + + /// As the runtime gives us the current + next authority ids, it can happen that some + /// authority changed its session keys. Changing the sessions keys leads to having two + /// authority ids that map to the same `PeerId` & addresses. + #[test] + fn adding_two_authority_ids_for_the_same_peer_id() { + let mut addr_cache = AddrCache::new(); + addr_cache.set_max_authority_ids(10); + + let peer_id = PeerId::random(); + let addr = Multiaddr::empty().with(Protocol::P2p(peer_id.into())); + + let authority_id0 = AuthorityPair::generate().0.public(); + let authority_id1 = AuthorityPair::generate().0.public(); + + addr_cache.insert(authority_id0.clone(), vec![addr.clone()]); + addr_cache.insert(authority_id1.clone(), vec![addr.clone()]); + + assert_eq!(2, addr_cache.num_authority_ids()); + assert_eq!( + &HashSet::from([addr.clone()]), + addr_cache.get_addresses_by_authority_id(&authority_id0).unwrap() + ); + assert_eq!( + &HashSet::from([addr]), + addr_cache.get_addresses_by_authority_id(&authority_id1).unwrap() + ); + } } diff --git a/client/authority-discovery/src/worker/tests.rs b/client/authority-discovery/src/worker/tests.rs index 3c1610256f5bc..130aea71fdfb0 100644 --- a/client/authority-discovery/src/worker/tests.rs +++ b/client/authority-discovery/src/worker/tests.rs @@ -19,6 +19,7 @@ use crate::worker::schema; use std::{ + collections::HashSet, sync::{Arc, Mutex}, task::Poll, }; @@ -469,7 +470,7 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { .send(ServicetoWorkerMsg::GetAddressesByAuthorityId(remote_public_key, sender)) .await .expect("Channel has capacity of 1."); - assert_eq!(Some(vec![remote_multiaddr]), addresses.await.unwrap()); + assert_eq!(Some(HashSet::from([remote_multiaddr])), addresses.await.unwrap()); }); } @@ -562,7 +563,7 @@ fn do_not_cache_addresses_without_peer_id() { local_worker.handle_dht_value_found_event(vec![dht_event]).unwrap(); assert_eq!( - Some(&vec![multiaddr_with_peer_id]), + Some(&HashSet::from([multiaddr_with_peer_id])), local_worker.addr_cache.get_addresses_by_authority_id(&remote_public.into()), "Expect worker to only cache `Multiaddr`s with `PeerId`s.", ); diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index b4907ade834aa..eab097dad0184 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -19,7 +19,7 @@ futures = "0.3.9" futures-timer = "3.0.1" libp2p = { version = "0.39.1", default-features = false } log = "0.4.8" -lru = "0.6.6" +lru = "0.7.0" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.9.0", path = "../../utils/prometheus" } sc-network = { version = "0.10.0-dev", path = "../network" } sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index d6d054504369b..e977016adb37f 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -36,7 +36,7 @@ hex = "0.4.0" ip_network = "0.4.0" linked-hash-map = "0.5.4" linked_hash_set = "0.1.3" -lru = "0.6.6" +lru = "0.7.0" log = "0.4.8" parking_lot = "0.11.1" pin-project = "1.0.8" diff --git a/primitives/blockchain/Cargo.toml b/primitives/blockchain/Cargo.toml index 93daef5fa1a27..469fc4db8bf4a 100644 --- a/primitives/blockchain/Cargo.toml +++ b/primitives/blockchain/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.11" -lru = "0.6.6" +lru = "0.7.0" parking_lot = "0.11.1" thiserror = "1.0.21" futures = "0.3.9" From 45b388a509af7909bcb1d2a0e75284b843ffac2f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 13 Nov 2021 23:54:18 +0100 Subject: [PATCH 02/10] Review feedback --- Cargo.lock | 1 - client/authority-discovery/Cargo.toml | 1 - client/authority-discovery/src/worker.rs | 5 +- .../src/worker/addr_cache.rs | 103 +++++++++--------- 4 files changed, 53 insertions(+), 57 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 72de57551b9e6..e41a6c867177f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7585,7 +7585,6 @@ dependencies = [ "ip_network", "libp2p", "log 0.4.14", - "lru 0.7.0", "parity-scale-codec", "prost 0.8.0", "prost-build 0.9.0", diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index b52f4c98a900c..b1d9d4ebd3935 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -36,7 +36,6 @@ sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } sp-keystore = { version = "0.10.0-dev", path = "../../primitives/keystore" } sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } -lru = "0.7.0" [dev-dependencies] quickcheck = "1.0.3" diff --git a/client/authority-discovery/src/worker.rs b/client/authority-discovery/src/worker.rs index 2897af50e1048..e2db3f50b29ba 100644 --- a/client/authority-discovery/src/worker.rs +++ b/client/authority-discovery/src/worker.rs @@ -376,9 +376,7 @@ where .filter(|id| !local_keys.contains(id.as_ref())) .collect::>(); - // Inform the address cache about the new maximum number of - // addresses to cache. - self.addr_cache.set_max_authority_ids(authorities.len()); + self.addr_cache.retain_ids(&authorities); authorities.shuffle(&mut thread_rng()); self.pending_lookups = authorities; @@ -697,7 +695,6 @@ impl Metrics { #[cfg(test)] impl Worker { pub(crate) fn inject_addresses(&mut self, authority: AuthorityId, addresses: Vec) { - self.addr_cache.set_max_authority_ids(self.addr_cache.num_authority_ids() + 1); self.addr_cache.insert(authority, addresses); } } diff --git a/client/authority-discovery/src/worker/addr_cache.rs b/client/authority-discovery/src/worker/addr_cache.rs index c2f6dd4c9f4dc..417bde210ea95 100644 --- a/client/authority-discovery/src/worker/addr_cache.rs +++ b/client/authority-discovery/src/worker/addr_cache.rs @@ -18,7 +18,6 @@ use libp2p::core::multiaddr::{Multiaddr, Protocol}; -use lru::LruCache; use sc_network::PeerId; use sp_authority_discovery::AuthorityId; use std::collections::{hash_map::Entry, HashMap, HashSet}; @@ -26,22 +25,17 @@ use std::collections::{hash_map::Entry, HashMap, HashSet}; /// Cache for [`AuthorityId`] -> [`Vec`] and [`PeerId`] -> [`AuthorityId`] mappings. pub(super) struct AddrCache { /// The addresses found in `authority_id_to_addresses` are guaranteed to always match - /// the peerids found in `peer_id_to_authority_id`. In other words, these two maps - /// are similar to a bi-directional map. As both are lru maps, we ensure that only inserts - /// update the position in the lru and any other access doesn't update the position of a - /// requested item. - authority_id_to_addresses: LruCache>, - peer_id_to_authority_id: HashMap>, - /// The maximum number of authority ids that should be cached. - max_authority_ids: usize, + /// the peerids found in `peer_id_to_authority_ids`. In other words, these two hashmaps + /// are similar to a bi-directional map. + authority_id_to_addresses: HashMap>, + peer_id_to_authority_ids: HashMap>, } impl AddrCache { pub fn new() -> Self { AddrCache { - authority_id_to_addresses: LruCache::unbounded(), - peer_id_to_authority_id: HashMap::default(), - max_authority_ids: 0, + authority_id_to_addresses: HashMap::new(), + peer_id_to_authority_ids: HashMap::new(), } } @@ -51,12 +45,12 @@ impl AddrCache { let addresses = addresses.into_iter().collect::>(); let peer_ids = addresses_to_peer_ids(&addresses); - let old_addresses = self.authority_id_to_addresses.put(authority_id.clone(), addresses); + let old_addresses = self.authority_id_to_addresses.insert(authority_id.clone(), addresses); let old_peer_ids = addresses_to_peer_ids(&old_addresses.unwrap_or_default()); // Add the new peer ids peer_ids.difference(&old_peer_ids).for_each(|new_peer_id| { - self.peer_id_to_authority_id + self.peer_id_to_authority_ids .entry(*new_peer_id) .or_default() .insert(authority_id.clone()); @@ -64,18 +58,6 @@ impl AddrCache { // Remove the old peer ids self.remove_authority_id_from_peer_ids(&authority_id, old_peer_ids.difference(&peer_ids)); - - while self.authority_id_to_addresses.len() > self.max_authority_ids { - match self.authority_id_to_addresses.pop_lru() { - Some((authority_id, addresses)) => { - self.remove_authority_id_from_peer_ids( - &authority_id, - addresses_to_peer_ids(&addresses).iter(), - ); - }, - None => break, - } - } } /// Remove the given `authority_id` from the `peer_id` to `authority_ids` mapping. @@ -87,7 +69,7 @@ impl AddrCache { peer_ids: impl Iterator, ) { peer_ids.for_each(|peer_id| { - if let Entry::Occupied(mut e) = self.peer_id_to_authority_id.entry(*peer_id) { + if let Entry::Occupied(mut e) = self.peer_id_to_authority_ids.entry(*peer_id) { e.get_mut().remove(authority_id); // If there are no more entries, remove the peer id. @@ -108,7 +90,7 @@ impl AddrCache { &self, authority_id: &AuthorityId, ) -> Option<&HashSet> { - self.authority_id_to_addresses.peek(authority_id) + self.authority_id_to_addresses.get(authority_id) } /// Returns the [`AuthorityId`]s for the given [`PeerId`]. @@ -116,14 +98,36 @@ impl AddrCache { /// As the authority id can change between sessions, one [`PeerId`] can be mapped to /// multiple authority ids. pub fn get_authority_ids_by_peer_id(&self, peer_id: &PeerId) -> Option<&HashSet> { - self.peer_id_to_authority_id.get(peer_id) + self.peer_id_to_authority_ids.get(peer_id) } - /// Set the maximum number of [`AuthorityId`]s to cache. - /// - /// The new maximum will be taken in account the next time [`Self::insert`] is called. - pub fn set_max_authority_ids(&mut self, max: usize) { - self.max_authority_ids = max; + /// Removes all [`PeerId`]s and [`Multiaddr`]s from the cache that are not related to the given + /// [`AuthorityId`]s. + pub fn retain_ids(&mut self, authority_ids: &[AuthorityId]) { + // The below logic could be replaced by `BtreeMap::drain_filter` once it stabilized. + let authority_ids_to_remove = self + .authority_id_to_addresses + .iter() + .filter(|(id, _addresses)| !authority_ids.contains(id)) + .map(|entry| entry.0) + .cloned() + .collect::>(); + + for authority_id_to_remove in authority_ids_to_remove { + // Remove other entries from `self.authority_id_to_addresses`. + let addresses = if let Some(addresses) = + self.authority_id_to_addresses.remove(&authority_id_to_remove) + { + addresses + } else { + continue + }; + + self.remove_authority_id_from_peer_ids( + &authority_id_to_remove, + addresses_to_peer_ids(&addresses).iter(), + ); + } } } @@ -206,7 +210,7 @@ mod tests { } #[test] - fn last_inserted_authority_id_is_purged() { + fn retains_only_entries_of_provided_authority_ids() { fn property( first: (TestAuthorityId, TestMultiaddr), second: (TestAuthorityId, TestMultiaddr), @@ -218,32 +222,32 @@ mod tests { let mut cache = AddrCache::new(); - cache.set_max_authority_ids(2); cache.insert(first.0.clone(), vec![first.1.clone()]); cache.insert(second.0.clone(), vec![second.1.clone()]); + cache.insert(third.0.clone(), vec![third.1.clone()]); assert_eq!( - Some(&HashSet::from([first.1.clone()])), - cache.get_addresses_by_authority_id(&first.0), - "Expect `get_addresses_by_authority_id` to return addresses of first authority." + Some(&HashSet::from([third.1.clone()])), + cache.get_addresses_by_authority_id(&third.0), + "Expect `get_addresses_by_authority_id` to return addresses of third authority.", ); assert_eq!( - Some(&HashSet::from([first.0.clone()])), - cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&first.1).unwrap()), - "Expect `get_authority_id_by_peer_id` to return `AuthorityId` of first authority." + Some(&HashSet::from([third.0.clone()])), + cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&third.1).unwrap()), + "Expect `get_authority_id_by_peer_id` to return `AuthorityId` of third authority.", ); - cache.insert(third.0.clone(), vec![third.1.clone()]); + cache.retain_ids(&vec![first.0.clone(), second.0]); assert_eq!( None, - cache.get_addresses_by_authority_id(&first.0), - "Expect `get_addresses_by_authority_id` to not return `None` for first authority." + cache.get_addresses_by_authority_id(&third.0), + "Expect `get_addresses_by_authority_id` to not return `None` for third authority.", ); assert_eq!( None, - cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&first.1).unwrap()), - "Expect `get_authority_id_by_peer_id` to return `None` for first authority." + cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&third.1).unwrap()), + "Expect `get_authority_id_by_peer_id` to return `None` for third authority.", ); TestResult::passed() @@ -255,7 +259,7 @@ mod tests { } #[test] - fn purges_authority_id_cache_properly() { + fn keeps_consistency_between_authority_id_and_peer_id() { fn property( authority1: TestAuthorityId, authority2: TestAuthorityId, @@ -270,8 +274,6 @@ mod tests { let TestMultiaddrsSamePeerCombo(multiaddr3, multiaddr4) = multiaddr3; let mut cache = AddrCache::new(); - // Only allow one authority id to be stored. - cache.set_max_authority_ids(2); cache.insert(authority1.clone(), vec![multiaddr1.clone()]); cache.insert( @@ -337,7 +339,6 @@ mod tests { #[test] fn adding_two_authority_ids_for_the_same_peer_id() { let mut addr_cache = AddrCache::new(); - addr_cache.set_max_authority_ids(10); let peer_id = PeerId::random(); let addr = Multiaddr::empty().with(Protocol::P2p(peer_id.into())); From 75c4b4443851f0a1ffc92e15396d5ddb4d8aef86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sun, 14 Nov 2021 14:31:18 +0100 Subject: [PATCH 03/10] Update client/authority-discovery/src/worker/addr_cache.rs Co-authored-by: Andronik Ordian --- client/authority-discovery/src/worker/addr_cache.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/client/authority-discovery/src/worker/addr_cache.rs b/client/authority-discovery/src/worker/addr_cache.rs index 417bde210ea95..30058784605a3 100644 --- a/client/authority-discovery/src/worker/addr_cache.rs +++ b/client/authority-discovery/src/worker/addr_cache.rs @@ -27,6 +27,11 @@ pub(super) struct AddrCache { /// The addresses found in `authority_id_to_addresses` are guaranteed to always match /// the peerids found in `peer_id_to_authority_ids`. In other words, these two hashmaps /// are similar to a bi-directional map. + /// + /// NB: since we may store the mapping across several sessions, a single + /// `PeerId` might correspond to multiple `AuthorityId`s. However, + /// it's not expected that a single `AuthorityId` can have multiple `PeerId`s. + /// This is ensured in [`AddrCache::insert`]. authority_id_to_addresses: HashMap>, peer_id_to_authority_ids: HashMap>, } From 960e2ffdb42017520a3e5c5a569778e014ce1fa5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sun, 14 Nov 2021 14:36:42 +0100 Subject: [PATCH 04/10] Early return on no peer ids --- client/authority-discovery/src/worker/addr_cache.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/client/authority-discovery/src/worker/addr_cache.rs b/client/authority-discovery/src/worker/addr_cache.rs index 30058784605a3..aeefb47ced9d0 100644 --- a/client/authority-discovery/src/worker/addr_cache.rs +++ b/client/authority-discovery/src/worker/addr_cache.rs @@ -29,7 +29,7 @@ pub(super) struct AddrCache { /// are similar to a bi-directional map. /// /// NB: since we may store the mapping across several sessions, a single - /// `PeerId` might correspond to multiple `AuthorityId`s. However, + /// `PeerId` might correspond to multiple `AuthorityId`s. However, /// it's not expected that a single `AuthorityId` can have multiple `PeerId`s. /// This is ensured in [`AddrCache::insert`]. authority_id_to_addresses: HashMap>, @@ -50,6 +50,17 @@ impl AddrCache { let addresses = addresses.into_iter().collect::>(); let peer_ids = addresses_to_peer_ids(&addresses); + if peer_ids.is_empty() { + log::debug!( + target: super::LOG_TARGET, + "Authority({:?}) provides no addresses or addresses without peer ids. Adresses: {:?}", + authority_id, + addresses, + ); + + return + } + let old_addresses = self.authority_id_to_addresses.insert(authority_id.clone(), addresses); let old_peer_ids = addresses_to_peer_ids(&old_addresses.unwrap_or_default()); From 358bd85d05b3b652529b23409868890be3b780aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 15 Nov 2021 12:54:43 +0100 Subject: [PATCH 05/10] Update client/authority-discovery/src/worker/addr_cache.rs Co-authored-by: Pierre Krieger --- client/authority-discovery/src/worker/addr_cache.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/authority-discovery/src/worker/addr_cache.rs b/client/authority-discovery/src/worker/addr_cache.rs index aeefb47ced9d0..76409c1fef2e7 100644 --- a/client/authority-discovery/src/worker/addr_cache.rs +++ b/client/authority-discovery/src/worker/addr_cache.rs @@ -28,7 +28,7 @@ pub(super) struct AddrCache { /// the peerids found in `peer_id_to_authority_ids`. In other words, these two hashmaps /// are similar to a bi-directional map. /// - /// NB: since we may store the mapping across several sessions, a single + /// Since we may store the mapping across several sessions, a single /// `PeerId` might correspond to multiple `AuthorityId`s. However, /// it's not expected that a single `AuthorityId` can have multiple `PeerId`s. /// This is ensured in [`AddrCache::insert`]. From e4d76f703136dab41552095e2752ee464265a2be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 15 Nov 2021 12:55:54 +0100 Subject: [PATCH 06/10] Update types in comment --- client/authority-discovery/src/worker/addr_cache.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/authority-discovery/src/worker/addr_cache.rs b/client/authority-discovery/src/worker/addr_cache.rs index 76409c1fef2e7..c7bde2e0ebfed 100644 --- a/client/authority-discovery/src/worker/addr_cache.rs +++ b/client/authority-discovery/src/worker/addr_cache.rs @@ -22,7 +22,7 @@ use sc_network::PeerId; use sp_authority_discovery::AuthorityId; use std::collections::{hash_map::Entry, HashMap, HashSet}; -/// Cache for [`AuthorityId`] -> [`Vec`] and [`PeerId`] -> [`AuthorityId`] mappings. +/// Cache for [`AuthorityId`] -> [`HashSet`] and [`PeerId`] -> [`HashSet`] mappings. pub(super) struct AddrCache { /// The addresses found in `authority_id_to_addresses` are guaranteed to always match /// the peerids found in `peer_id_to_authority_ids`. In other words, these two hashmaps From 1e876902d0375d15123c05c6395029fab57fe303 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 15 Nov 2021 21:39:36 +0100 Subject: [PATCH 07/10] FMT --- client/authority-discovery/src/worker/addr_cache.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/client/authority-discovery/src/worker/addr_cache.rs b/client/authority-discovery/src/worker/addr_cache.rs index c7bde2e0ebfed..90f4f525d4ebc 100644 --- a/client/authority-discovery/src/worker/addr_cache.rs +++ b/client/authority-discovery/src/worker/addr_cache.rs @@ -22,7 +22,8 @@ use sc_network::PeerId; use sp_authority_discovery::AuthorityId; use std::collections::{hash_map::Entry, HashMap, HashSet}; -/// Cache for [`AuthorityId`] -> [`HashSet`] and [`PeerId`] -> [`HashSet`] mappings. +/// Cache for [`AuthorityId`] -> [`HashSet`] and [`PeerId`] -> [`HashSet`] +/// mappings. pub(super) struct AddrCache { /// The addresses found in `authority_id_to_addresses` are guaranteed to always match /// the peerids found in `peer_id_to_authority_ids`. In other words, these two hashmaps From 24bb824eabeb1b63214a93ad09f05e11e88239fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 15 Nov 2021 21:45:40 +0100 Subject: [PATCH 08/10] Add warning --- client/authority-discovery/src/worker/addr_cache.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/client/authority-discovery/src/worker/addr_cache.rs b/client/authority-discovery/src/worker/addr_cache.rs index 90f4f525d4ebc..3ba5ed1900e3d 100644 --- a/client/authority-discovery/src/worker/addr_cache.rs +++ b/client/authority-discovery/src/worker/addr_cache.rs @@ -60,6 +60,13 @@ impl AddrCache { ); return + } else if peer_ids.len() > 1 { + log::warn!( + target: super::LOG_TARGET, + "Authority({:?}) can be reached through multiple peer ids: {:?}", + authority_id, + peer_ids + ); } let old_addresses = self.authority_id_to_addresses.insert(authority_id.clone(), addresses); From 7feed9190fe4af0f76d7e8a16b819d761cf04b35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 16 Nov 2021 11:23:39 +0100 Subject: [PATCH 09/10] Update client/authority-discovery/src/worker/addr_cache.rs Co-authored-by: Andronik Ordian --- client/authority-discovery/src/worker/addr_cache.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/client/authority-discovery/src/worker/addr_cache.rs b/client/authority-discovery/src/worker/addr_cache.rs index 3ba5ed1900e3d..d4ba156d5fa19 100644 --- a/client/authority-discovery/src/worker/addr_cache.rs +++ b/client/authority-discovery/src/worker/addr_cache.rs @@ -32,7 +32,6 @@ pub(super) struct AddrCache { /// Since we may store the mapping across several sessions, a single /// `PeerId` might correspond to multiple `AuthorityId`s. However, /// it's not expected that a single `AuthorityId` can have multiple `PeerId`s. - /// This is ensured in [`AddrCache::insert`]. authority_id_to_addresses: HashMap>, peer_id_to_authority_ids: HashMap>, } From bd9423a1d1aeaa92dadc4387dfedd705928a0080 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 16 Nov 2021 11:28:22 +0100 Subject: [PATCH 10/10] Feedback --- client/authority-discovery/src/lib.rs | 4 ++-- client/authority-discovery/src/service.rs | 4 ++-- client/authority-discovery/src/tests.rs | 2 +- client/authority-discovery/src/worker.rs | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/client/authority-discovery/src/lib.rs b/client/authority-discovery/src/lib.rs index 263179ffa915d..1bbb9f38796c2 100644 --- a/client/authority-discovery/src/lib.rs +++ b/client/authority-discovery/src/lib.rs @@ -160,6 +160,6 @@ where pub(crate) enum ServicetoWorkerMsg { /// See [`Service::get_addresses_by_authority_id`]. GetAddressesByAuthorityId(AuthorityId, oneshot::Sender>>), - /// See [`Service::get_authority_id_by_peer_id`]. - GetAuthorityIdByPeerId(PeerId, oneshot::Sender>>), + /// See [`Service::get_authority_ids_by_peer_id`]. + GetAuthorityIdsByPeerId(PeerId, oneshot::Sender>>), } diff --git a/client/authority-discovery/src/service.rs b/client/authority-discovery/src/service.rs index 93fea47008486..9b59a4ec8647f 100644 --- a/client/authority-discovery/src/service.rs +++ b/client/authority-discovery/src/service.rs @@ -78,14 +78,14 @@ impl Service { /// /// Returns `None` if no entry was present or connection to the /// [`crate::Worker`] failed. - pub async fn get_authority_id_by_peer_id( + pub async fn get_authority_ids_by_peer_id( &mut self, peer_id: PeerId, ) -> Option> { let (tx, rx) = oneshot::channel(); self.to_worker - .send(ServicetoWorkerMsg::GetAuthorityIdByPeerId(peer_id, tx)) + .send(ServicetoWorkerMsg::GetAuthorityIdsByPeerId(peer_id, tx)) .await .ok()?; diff --git a/client/authority-discovery/src/tests.rs b/client/authority-discovery/src/tests.rs index 1ba00934a0a09..cef91445064ca 100644 --- a/client/authority-discovery/src/tests.rs +++ b/client/authority-discovery/src/tests.rs @@ -78,7 +78,7 @@ fn get_addresses_and_authority_id() { ); assert_eq!( Some(HashSet::from([remote_authority_id])), - service.get_authority_id_by_peer_id(remote_peer_id).await, + service.get_authority_ids_by_peer_id(remote_peer_id).await, ); }); } diff --git a/client/authority-discovery/src/worker.rs b/client/authority-discovery/src/worker.rs index e2db3f50b29ba..00021ecbdcb83 100644 --- a/client/authority-discovery/src/worker.rs +++ b/client/authority-discovery/src/worker.rs @@ -259,7 +259,7 @@ where self.addr_cache.get_addresses_by_authority_id(&authority).map(Clone::clone), ); }, - ServicetoWorkerMsg::GetAuthorityIdByPeerId(peer_id, sender) => { + ServicetoWorkerMsg::GetAuthorityIdsByPeerId(peer_id, sender) => { let _ = sender .send(self.addr_cache.get_authority_ids_by_peer_id(&peer_id).map(Clone::clone)); },