From 431b91040f4d1dd4d51c4b51fe4e28f0131bcea7 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 18 Nov 2019 17:32:45 +0100 Subject: [PATCH 01/15] Refactor gossiping system --- Cargo.lock | 94 ++ Cargo.toml | 1 + client/finality-grandpa/Cargo.toml | 1 + .../src/communication/gossip.rs | 2 +- .../finality-grandpa/src/communication/mod.rs | 135 +-- .../src/communication/tests.rs | 3 +- client/finality-grandpa/src/environment.rs | 22 +- client/finality-grandpa/src/lib.rs | 35 +- client/finality-grandpa/src/observer.rs | 20 +- client/network-gossip/Cargo.toml | 19 + client/network-gossip/src/bridge.rs | 244 +++++ client/network-gossip/src/lib.rs | 84 ++ client/network-gossip/src/state_machine.rs | 920 ++++++++++++++++++ client/network/Cargo.toml | 1 + client/network/src/behaviour.rs | 47 +- client/network/src/config.rs | 11 +- .../{legacy_proto/mod.rs => generic_proto.rs} | 2 +- .../behaviour.rs | 147 ++- client/network/src/generic_proto/handler.rs | 22 + .../src/generic_proto/handler/group.rs | 425 ++++++++ .../handler/legacy.rs} | 71 +- .../src/generic_proto/handler/notif_in.rs | 277 ++++++ .../src/generic_proto/handler/notif_out.rs | 323 ++++++ .../{legacy_proto => generic_proto}/tests.rs | 32 +- client/network/src/generic_proto/upgrade.rs | 35 + .../src/generic_proto/upgrade/collec.rs | 99 ++ .../upgrade/legacy.rs} | 0 .../generic_proto/upgrade/notifications.rs | 266 +++++ .../src/generic_proto/upgrade/select.rs | 120 +++ client/network/src/lib.rs | 4 +- client/network/src/protocol.rs | 181 ++-- client/network/src/protocol/event.rs | 30 + client/network/src/service.rs | 159 ++- client/service/test/src/lib.rs | 1 + 34 files changed, 3423 insertions(+), 410 deletions(-) create mode 100644 client/network-gossip/Cargo.toml create mode 100644 client/network-gossip/src/bridge.rs create mode 100644 client/network-gossip/src/lib.rs create mode 100644 client/network-gossip/src/state_machine.rs rename client/network/src/{legacy_proto/mod.rs => generic_proto.rs} (93%) rename client/network/src/{legacy_proto => generic_proto}/behaviour.rs (89%) create mode 100644 client/network/src/generic_proto/handler.rs create mode 100644 client/network/src/generic_proto/handler/group.rs rename client/network/src/{legacy_proto/handler.rs => generic_proto/handler/legacy.rs} (91%) create mode 100644 client/network/src/generic_proto/handler/notif_in.rs create mode 100644 client/network/src/generic_proto/handler/notif_out.rs rename client/network/src/{legacy_proto => generic_proto}/tests.rs (91%) create mode 100644 client/network/src/generic_proto/upgrade.rs create mode 100644 client/network/src/generic_proto/upgrade/collec.rs rename client/network/src/{legacy_proto/upgrade.rs => generic_proto/upgrade/legacy.rs} (100%) create mode 100644 client/network/src/generic_proto/upgrade/notifications.rs create mode 100644 client/network/src/generic_proto/upgrade/select.rs diff --git a/Cargo.lock b/Cargo.lock index 5fe1e3003edc5..8eb8604725d45 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -138,6 +138,48 @@ name = "assert_matches" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "async-macros" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures-core-preview 0.3.0-alpha.19 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "async-std" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "async-macros 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "async-task 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-channel 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-io 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-timer 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "kv-log-macro 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.19 (registry+https://github.com/rust-lang/crates.io-index)", + "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "once_cell 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project-lite 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "async-task" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "atty" version = "0.2.13" @@ -1354,6 +1396,15 @@ dependencies = [ "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "futures-timer" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures-core-preview 0.3.0-alpha.19 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "futures-util" version = "0.3.1" @@ -1946,6 +1997,14 @@ dependencies = [ "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "kv-log-macro" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "kvdb" version = "0.1.0" @@ -3057,6 +3116,11 @@ name = "once_cell" version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "once_cell" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "opaque-debug" version = "0.2.3" @@ -4084,6 +4148,11 @@ dependencies = [ "fixedbitset 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "pin-project-lite" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "pin-utils" version = "0.1.0-alpha.4" @@ -5766,6 +5835,7 @@ dependencies = [ "substrate-keyring 2.0.0", "substrate-keystore 2.0.0", "substrate-network 2.0.0", + "substrate-network-gossip 2.0.0", "substrate-primitives 2.0.0", "substrate-state-machine 2.0.0", "substrate-telemetry 2.0.0", @@ -5838,6 +5908,7 @@ dependencies = [ name = "substrate-network" version = "2.0.0" dependencies = [ + "async-std 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "derive_more 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5886,6 +5957,22 @@ dependencies = [ "zeroize 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "substrate-network-gossip" +version = "2.0.0" +dependencies = [ + "async-std 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-preview 0.3.0-alpha.19 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-timer 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libp2p 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "lru-cache 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sr-primitives 2.0.0", + "substrate-network 2.0.0", +] + [[package]] name = "substrate-offchain" version = "2.0.0" @@ -7616,6 +7703,9 @@ dependencies = [ "checksum asn1_der 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6fce6b6a0ffdafebd82c87e79e3f40e8d2c523e5fea5566ff6b90509bf98d638" "checksum asn1_der_derive 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0d0864d84b8e07b145449be9a8537db86bf9de5ce03b913214694643b4743502" "checksum assert_matches 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7deb0a829ca7bcfaf5da70b073a8d128619259a7be8216a355e23f00763059e5" +"checksum async-macros 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e421d59b24c1feea2496e409b3e0a8de23e5fc130a2ddc0b012e551f3b272bba" +"checksum async-std 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f6ee33a6cfdd96bfde032d14b29905244a70868bd8dda1f3b13504d6cbc3b7bc" +"checksum async-task 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de6bd58f7b9cc49032559422595c81cbfcf04db2f2133592f70af19e258a1ced" "checksum atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "1803c647a3ec87095e7ae7acfca019e98de5ec9a7d01343f611cf3152ed71a90" "checksum autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" "checksum backtrace 0.3.40 (registry+https://github.com/rust-lang/crates.io-index)" = "924c76597f0d9ca25d762c25a4d369d51267536465dc5064bdf0eb073ed477ea" @@ -7751,6 +7841,7 @@ dependencies = [ "checksum futures-sink-preview 0.3.0-alpha.19 (registry+https://github.com/rust-lang/crates.io-index)" = "86f148ef6b69f75bb610d4f9a2336d4fc88c4b5b67129d1a340dd0fd362efeec" "checksum futures-task 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0bae52d6b29cf440e298856fec3965ee6fa71b06aa7495178615953fd669e5f9" "checksum futures-timer 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "878f1d2fc31355fa02ed2372e741b0c17e58373341e6a122569b4623a14a7d33" +"checksum futures-timer 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7946248e9429ff093345d3e8fdf4eb0f9b2d79091611c9c14f744971a6f8be45" "checksum futures-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c0d66274fb76985d3c62c886d1da7ac4c0903a8c9f754e8fe0f35a6a6cc39e76" "checksum futures-util-preview 0.3.0-alpha.19 (registry+https://github.com/rust-lang/crates.io-index)" = "5ce968633c17e5f97936bd2797b6e38fb56cf16a7422319f7ec2e30d3c470e8d" "checksum gcc 0.3.55 (registry+https://github.com/rust-lang/crates.io-index)" = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" @@ -7811,6 +7902,7 @@ dependencies = [ "checksum keccak 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" "checksum keccak-hasher 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3468207deea1359a0e921591ae9b4c928733d94eb9d6a2eeda994cfd59f42cf8" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" +"checksum kv-log-macro 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8c54d9f465d530a752e6ebdc217e081a7a614b48cb200f6f0aee21ba6bc9aabb" "checksum kvdb 0.1.0 (git+https://github.com/paritytech/parity-common?rev=b0317f649ab2c665b7987b8475878fc4d2e1f81d)" = "" "checksum kvdb-memorydb 0.1.0 (git+https://github.com/paritytech/parity-common?rev=b0317f649ab2c665b7987b8475878fc4d2e1f81d)" = "" "checksum kvdb-rocksdb 0.1.4 (git+https://github.com/paritytech/parity-common?rev=b0317f649ab2c665b7987b8475878fc4d2e1f81d)" = "" @@ -7882,6 +7974,7 @@ dependencies = [ "checksum ole32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d2c49021782e5233cd243168edfa8037574afed4eba4bbaf538b3d8d1789d8c" "checksum once_cell 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "532c29a261168a45ce28948f9537ddd7a5dd272cc513b3017b1e82a88f962c37" "checksum once_cell 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "d584f08c2d717d5c23a6414fc2822b71c651560713e54fa7eace675f758a355e" +"checksum once_cell 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "891f486f630e5c5a4916c7e16c4b24a53e78c860b646e9f8e005e4f16847bfed" "checksum opaque-debug 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" "checksum openssl 0.10.25 (registry+https://github.com/rust-lang/crates.io-index)" = "2f372b2b53ce10fb823a337aaa674e3a7d072b957c6264d0f4ff0bd86e657449" "checksum openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" @@ -7914,6 +8007,7 @@ dependencies = [ "checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" "checksum percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" "checksum petgraph 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3659d1ee90221741f65dd128d9998311b0e40c5d3c23a62445938214abce4f" +"checksum pin-project-lite 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f0af6cbca0e6e3ce8692ee19fb8d734b641899e07b68eb73e9bbbd32f1703991" "checksum pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587" "checksum pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)" = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677" "checksum plain 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" diff --git a/Cargo.toml b/Cargo.toml index f35cab80144e0..aa7c0f6762112 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,6 +30,7 @@ members = [ "client/header-metadata", "client/keystore", "client/network", + "client/network-gossip", "client/offchain", "client/rpc-servers", "client/rpc", diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index d8320327a9fb0..9b5daf55ad850 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -25,6 +25,7 @@ client = { package = "substrate-client", path = "../" } header-metadata = { package = "substrate-header-metadata", path = "../header-metadata" } inherents = { package = "substrate-inherents", path = "../../primitives/inherents" } network = { package = "substrate-network", path = "../network" } +network-gossip = { package = "substrate-network-gossip", path = "../network-gossip" } paint-finality-tracker = { path = "../../paint/finality-tracker" } fg_primitives = { package = "substrate-finality-grandpa-primitives", path = "../../primitives/finality-grandpa" } grandpa = { package = "finality-grandpa", version = "0.9.0", features = ["derive-codec"] } diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 7758de6afa70d..9cf6234fec4b3 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -83,7 +83,7 @@ //! We only send polite messages to peers, use sr_primitives::traits::{NumberFor, Block as BlockT, Zero}; -use network::consensus_gossip::{self as network_gossip, MessageIntent, ValidatorContext}; +use network_gossip::{self, MessageIntent, ValidatorContext}; use network::{config::Roles, PeerId}; use codec::{Encode, Decode}; use fg_primitives::AuthorityId; diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 247f9efd2df18..5e8556f689659 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -30,13 +30,12 @@ use std::sync::Arc; use futures::prelude::*; -use futures::sync::{oneshot, mpsc}; -use futures03::stream::{StreamExt, TryStreamExt}; +use futures::sync::mpsc; +use futures03::{compat::Compat, stream::{StreamExt, TryStreamExt}}; use grandpa::Message::{Prevote, Precommit, PrimaryPropose}; use grandpa::{voter, voter_set::VoterSet}; use log::{debug, trace}; -use network::{consensus_gossip as network_gossip, NetworkService}; -use network_gossip::ConsensusMessage; +use network_gossip::{GossipEngine, Network as AbstractNetwork}; use codec::{Encode, Decode}; use primitives::Pair; use sr_primitives::traits::{Block as BlockT, Hash as HashT, Header as HeaderT, NumberFor}; @@ -149,141 +148,62 @@ pub(crate) fn global_topic(set_id: SetIdNumber) -> B::Hash { <::Hashing as HashT>::hash(format!("{}-GLOBAL", set_id).as_bytes()) } -impl Network for Arc> where +impl Network for GossipEngine where B: BlockT, - S: network::specialization::NetworkSpecialization, - H: network::ExHashT, { - type In = NetworkStream< - Box + Send + 'static>, - >; + type In = Box + Send + 'static>; fn messages_for(&self, topic: B::Hash) -> Self::In { - // Given that one can only communicate with the Substrate network via the `NetworkService` via message-passing, - // and given that methods on the network consensus gossip are not exposed but only reachable by passing a - // closure into `with_gossip` on the `NetworkService` this function needs to make use of the `NetworkStream` - // construction. - // - // We create a oneshot channel and pass the sender within a closure to the network. At some point in the future - // the network passes the message channel back through the oneshot channel. But the consumer of this function - // expects a stream, not a stream within a oneshot. This complexity is abstracted within `NetworkStream`, - // waiting for the oneshot to resolve and from there on acting like a normal message channel. - let (tx, rx) = oneshot::channel(); - self.with_gossip(move |gossip, _| { - let inner_rx: Box + Send> = Box::new(gossip - .messages_for(GRANDPA_ENGINE_ID, topic) - .map(|x| Ok(x)) - .compat() - ); - let _ = tx.send(inner_rx); - }); - NetworkStream::PollingOneshot(rx) + let stream = self.messages_for(GRANDPA_ENGINE_ID, topic) + .map(|x| Ok(x)) + .compat(); + Box::new(stream) } fn register_validator(&self, validator: Arc>) { - self.with_gossip( - move |gossip, context| gossip.register_validator(context, GRANDPA_ENGINE_ID, validator) - ) + unimplemented!() } fn gossip_message(&self, topic: B::Hash, data: Vec, force: bool) { - let msg = ConsensusMessage { - engine_id: GRANDPA_ENGINE_ID, - data, - }; - - self.with_gossip( - move |gossip, ctx| gossip.multicast(ctx, topic, msg, force) - ) + self.multicast(topic, GRANDPA_ENGINE_ID, data, force) } fn register_gossip_message(&self, topic: B::Hash, data: Vec) { - let msg = ConsensusMessage { - engine_id: GRANDPA_ENGINE_ID, - data, - }; - - self.with_gossip(move |gossip, _| gossip.register_message(topic, msg)) + self.register_message(topic, GRANDPA_ENGINE_ID, data) } fn send_message(&self, who: Vec, data: Vec) { - let msg = ConsensusMessage { - engine_id: GRANDPA_ENGINE_ID, - data, - }; - - self.with_gossip(move |gossip, ctx| for who in &who { - gossip.send_message(ctx, who, msg.clone()) - }) + for who in &who { + self.send_message(who, GRANDPA_ENGINE_ID, data.clone()) + } } fn report(&self, who: network::PeerId, cost_benefit: i32) { - self.report_peer(who, cost_benefit) + // TODO: self.report_peer(who, cost_benefit) } fn announce(&self, block: B::Hash, associated_data: Vec) { - self.announce_block(block, associated_data) + // TODO: self.announce_block(block, associated_data) } fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { - NetworkService::set_sync_fork_request(self, peers, hash, number) - } -} - -/// A stream used by NetworkBridge in its implementation of Network. Given a oneshot that eventually returns a channel -/// which eventually returns messages, instead of: -/// -/// 1. polling the oneshot until it returns a message channel -/// -/// 2. polling the message channel for messages -/// -/// `NetworkStream` combines the two steps into one, requiring a consumer to only poll `NetworkStream` to retrieve -/// messages directly. -pub enum NetworkStream { - PollingOneshot(oneshot::Receiver), - PollingTopicNotifications(R), -} - -impl Stream for NetworkStream -where - R: Stream, -{ - type Item = R::Item; - type Error = (); - - fn poll(&mut self) -> Poll, Self::Error> { - match self { - NetworkStream::PollingOneshot(oneshot) => { - match oneshot.poll() { - Ok(futures::Async::Ready(mut stream)) => { - let poll_result = stream.poll(); - *self = NetworkStream::PollingTopicNotifications(stream); - poll_result - }, - Ok(futures::Async::NotReady) => Ok(futures::Async::NotReady), - Err(_) => Err(()) - } - }, - NetworkStream::PollingTopicNotifications(stream) => { - stream.poll() - }, - } + // TODO: NetworkService::set_sync_fork_request(self, peers, hash, number) } } /// Bridge between the underlying network service, gossiping consensus messages and Grandpa -pub(crate) struct NetworkBridge> { - service: N, +pub(crate) struct NetworkBridge { + service: GossipEngine, validator: Arc>, neighbor_sender: periodic::NeighborPacketSender, } -impl> NetworkBridge { +impl NetworkBridge { /// Create a new NetworkBridge to the given NetworkService. Returns the service /// handle and a future that must be polled to completion to finish startup. /// On creation it will register previous rounds' votes with the gossip /// service taken from the VoterSetState. - pub(crate) fn new( + pub(crate) fn new( service: N, config: crate::Config, set_state: crate::environment::SharedVoterSetState, @@ -299,7 +219,7 @@ impl> NetworkBridge { ); let validator = Arc::new(validator); - service.register_validator(validator.clone()); + let service = GossipEngine::new(service, &b"/sub/grandpa"[..], GRANDPA_ENGINE_ID, validator.clone()); { // register all previous votes with the gossip service so that they're @@ -407,7 +327,8 @@ impl> NetworkBridge { }); let topic = round_topic::(round.0, set_id.0); - let incoming = self.service.messages_for(topic) + let incoming = Compat::new(self.service.messages_for(GRANDPA_ENGINE_ID, topic) + .map(|item| Ok::<_, ()>(item))) .filter_map(|notification| { let decoded = GossipMessage::::decode(&mut ¬ification.message[..]); if let Err(ref e) = decoded { @@ -460,7 +381,7 @@ impl> NetworkBridge { .map_err(|()| Error::Network(format!("Failed to receive message on unbounded stream"))); let (tx, out_rx) = mpsc::unbounded(); - let outgoing = OutgoingMessages:: { + let outgoing = OutgoingMessages::> { round: round.0, set_id: set_id.0, network: self.service.clone(), @@ -507,7 +428,7 @@ impl> NetworkBridge { self.neighbor_sender.clone(), ); - let outgoing = CommitsOut::::new( + let outgoing = CommitsOut::>::new( self.service.clone(), set_id.0, is_voter, @@ -665,7 +586,7 @@ fn incoming_global>( .map_err(|()| Error::Network(format!("Failed to receive message on unbounded stream"))) } -impl> Clone for NetworkBridge { +impl Clone for NetworkBridge { fn clone(&self) -> Self { NetworkBridge { service: self.service.clone(), diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index e8b399aef39b6..59389b2b35251 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -18,7 +18,6 @@ use futures::sync::mpsc; use futures::prelude::*; -use network::consensus_gossip as network_gossip; use network::test::{Block, Hash}; use network_gossip::Validator; use tokio::runtime::current_thread; @@ -111,7 +110,7 @@ impl network_gossip::ValidatorContext for TestNetwork { } struct Tester { - net_handle: super::NetworkBridge, + net_handle: super::NetworkBridge, gossip_validator: Arc>, events: mpsc::UnboundedReceiver, } diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index 883dc0e35c0d6..89c8bae384e93 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -48,8 +48,8 @@ use sr_primitives::traits::{ use substrate_telemetry::{telemetry, CONSENSUS_INFO}; use crate::{ - CommandOrError, Commit, Config, Error, Network, Precommit, Prevote, - PrimaryPropose, SignedMessage, NewAuthoritySet, VoterCommand, + CommandOrError, Commit, Config, Error, Precommit, Prevote, PrimaryPropose, + SignedMessage, NewAuthoritySet, VoterCommand, }; use consensus_common::SelectChain; @@ -375,20 +375,20 @@ impl SharedVoterSetState { } /// The environment we run GRANDPA in. -pub(crate) struct Environment, RA, SC, VR> { +pub(crate) struct Environment { pub(crate) client: Arc>, pub(crate) select_chain: SC, pub(crate) voters: Arc>, pub(crate) config: Config, pub(crate) authority_set: SharedAuthoritySet>, pub(crate) consensus_changes: SharedConsensusChanges>, - pub(crate) network: crate::communication::NetworkBridge, + pub(crate) network: crate::communication::NetworkBridge, pub(crate) set_id: SetId, pub(crate) voter_set_state: SharedVoterSetState, pub(crate) voting_rule: VR, } -impl, RA, SC, VR> Environment { +impl Environment { /// Updates the voter set state using the given closure. The write lock is /// held during evaluation of the closure and the environment's voter set /// state is set to its result if successful. @@ -404,15 +404,13 @@ impl, RA, SC, VR> Environment, B, E, N, RA, SC, VR> +impl, B, E, RA, SC, VR> grandpa::Chain> -for Environment +for Environment where Block: 'static, B: Backend + 'static, E: CallExecutor + Send + Sync + 'static, - N: Network + 'static, - N::In: 'static, SC: SelectChain + 'static, VR: VotingRule>, RA: Send + Sync, @@ -545,15 +543,13 @@ pub(crate) fn ancestry, E, RA>( Ok(tree_route.retracted().iter().skip(1).map(|e| e.hash).collect()) } -impl, N, RA, SC, VR> +impl, RA, SC, VR> voter::Environment> -for Environment +for Environment where Block: 'static, B: Backend + 'static, E: CallExecutor + 'static + Send + Sync, - N: Network + 'static + Send, - N::In: 'static + Send, RA: 'static + Send + Sync, SC: SelectChain + 'static, VR: VotingRule>, diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 80d2350982481..b27cdca300647 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -92,7 +92,7 @@ mod observer; mod until_imported; mod voting_rule; -pub use communication::Network; +pub use network_gossip::{GossipEngine, Network as AbstractNetwork}; pub use finality_proof::FinalityProofProvider; pub use justification::GrandpaJustification; pub use light_import::light_block_import; @@ -278,9 +278,8 @@ pub(crate) trait BlockSyncRequester { fn set_sync_fork_request(&self, peers: Vec, hash: Block::Hash, number: NumberFor); } -impl BlockSyncRequester for NetworkBridge where +impl BlockSyncRequester for NetworkBridge where Block: BlockT, - N: communication::Network, { fn set_sync_fork_request(&self, peers: Vec, hash: Block::Hash, number: NumberFor) { NetworkBridge::set_sync_fork_request(self, peers, hash, number) @@ -449,11 +448,11 @@ where )) } -fn global_communication, B, E, N, RA>( +fn global_communication, B, E, RA>( set_id: SetId, voters: &Arc>, client: &Arc>, - network: &NetworkBridge, + network: &NetworkBridge, keystore: &Option, ) -> ( impl Stream< @@ -467,7 +466,6 @@ fn global_communication, B, E, N, RA>( ) where B: Backend, E: CallExecutor + Send + Sync, - N: Network, RA: Send + Sync, NumberFor: BlockNumberOps, { @@ -550,8 +548,7 @@ pub fn run_grandpa_voter, N, RA, SC, VR, X>( Block::Hash: Ord, B: Backend + 'static, E: CallExecutor + Send + Sync + 'static, - N: Network + Send + Sync + 'static, - N::In: Send + 'static, + N: AbstractNetwork + Send + Clone + 'static, SC: SelectChain + 'static, VR: VotingRule> + Clone + 'static, NumberFor: BlockNumberOps, @@ -641,17 +638,15 @@ pub fn run_grandpa_voter, N, RA, SC, VR, X>( /// Future that powers the voter. #[must_use] -struct VoterWork, RA, SC, VR> { +struct VoterWork { voter: Box>> + Send>, - env: Arc>, + env: Arc>, voter_commands_rx: mpsc::UnboundedReceiver>>, } -impl VoterWork +impl VoterWork where Block: BlockT, - N: Network + Sync, - N::In: Send + 'static, NumberFor: BlockNumberOps, RA: 'static + Send + Sync, E: CallExecutor + Send + Sync + 'static, @@ -662,7 +657,7 @@ where fn new( client: Arc>, config: Config, - network: NetworkBridge, + network: NetworkBridge, select_chain: SC, voting_rule: VR, persistent_data: PersistentData, @@ -822,11 +817,9 @@ where } } -impl Future for VoterWork +impl Future for VoterWork where Block: BlockT, - N: Network + Sync, - N::In: Send + 'static, NumberFor: BlockNumberOps, RA: 'static + Send + Sync, E: CallExecutor + Send + Sync + 'static, @@ -883,8 +876,7 @@ pub fn run_grandpa, N, RA, SC, VR, X>( Block::Hash: Ord, B: Backend + 'static, E: CallExecutor + Send + Sync + 'static, - N: Network + Send + Sync + 'static, - N::In: Send + 'static, + N: AbstractNetwork + Send + Clone + 'static, SC: SelectChain + 'static, NumberFor: BlockNumberOps, DigestFor: Encode, @@ -909,15 +901,14 @@ pub fn setup_disabled_grandpa, RA, N>( B: Backend + 'static, E: CallExecutor + Send + Sync + 'static, RA: Send + Sync + 'static, - N: Network + Send + Sync + 'static, - N::In: Send + 'static, + N: AbstractNetwork + Send + Sync + 'static, { register_finality_tracker_inherent_data_provider( client, inherent_data_providers, )?; - network.register_validator(Arc::new(network::consensus_gossip::DiscardAll)); + //network.register_validator(Arc::new(network_gossip::DiscardAll)); Ok(()) } diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index b182eaaaa5e61..3d33aa426a514 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -23,6 +23,7 @@ use grandpa::{ BlockNumberOps, Error as GrandpaError, voter, voter_set::VoterSet }; use log::{debug, info, warn}; +use network_gossip::Network as AbstractNetwork; use consensus_common::SelectChain; use client_api::{CallExecutor, backend::Backend}; @@ -32,7 +33,7 @@ use primitives::{H256, Blake2Hasher}; use crate::{ global_communication, CommandOrError, CommunicationIn, Config, environment, - LinkHalf, Network, Error, aux_schema::PersistentData, VoterCommand, VoterSetState, + LinkHalf, Error, aux_schema::PersistentData, VoterCommand, VoterSetState, }; use crate::authorities::SharedAuthoritySet; use crate::communication::NetworkBridge; @@ -159,8 +160,7 @@ pub fn run_grandpa_observer, N, RA, SC>( ) -> ::client_api::error::Result + Send + 'static> where B: Backend + 'static, E: CallExecutor + Send + Sync + 'static, - N: Network + Send + Sync + 'static, - N::In: Send + 'static, + N: AbstractNetwork + Send + Clone + 'static, SC: SelectChain + 'static, NumberFor: BlockNumberOps, RA: Send + Sync + 'static, @@ -200,20 +200,18 @@ pub fn run_grandpa_observer, N, RA, SC>( /// Future that powers the observer. #[must_use] -struct ObserverWork, N: Network, E, Backend, RA> { +struct ObserverWork, E, Backend, RA> { observer: Box>> + Send>, client: Arc>, - network: NetworkBridge, + network: NetworkBridge, persistent_data: PersistentData, keystore: Option, voter_commands_rx: mpsc::UnboundedReceiver>>, } -impl ObserverWork +impl ObserverWork where B: BlockT, - N: Network, - N::In: Send + 'static, NumberFor: BlockNumberOps, RA: 'static + Send + Sync, E: CallExecutor + Send + Sync + 'static, @@ -221,7 +219,7 @@ where { fn new( client: Arc>, - network: NetworkBridge, + network: NetworkBridge, persistent_data: PersistentData, keystore: Option, voter_commands_rx: mpsc::UnboundedReceiver>>, @@ -325,11 +323,9 @@ where } } -impl Future for ObserverWork +impl Future for ObserverWork where B: BlockT, - N: Network, - N::In: Send + 'static, NumberFor: BlockNumberOps, RA: 'static + Send + Sync, E: CallExecutor + Send + Sync + 'static, diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml new file mode 100644 index 0000000000000..590439958d6ca --- /dev/null +++ b/client/network-gossip/Cargo.toml @@ -0,0 +1,19 @@ +[package] +description = "Gossiping for the Substrate network protocol" +name = "substrate-network-gossip" +version = "2.0.0" +license = "GPL-3.0" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +async-std = "1.0" +log = "0.4.8" +futures01 = { package = "futures", version = "0.1.29" } +futures-preview = { version = "0.3.0-alpha.19", features = ["compat"] } +futures-timer = "0.4.0" +lru-cache = "0.1.2" +libp2p = { version = "0.13.0", default-features = false, features = ["libp2p-websocket"] } +network = { package = "substrate-network", path = "../network" } +parking_lot = "0.9.0" +sr-primitives = { path = "../../primitives/sr-primitives" } diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs new file mode 100644 index 0000000000000..9f88b8bac172b --- /dev/null +++ b/client/network-gossip/src/bridge.rs @@ -0,0 +1,244 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::Network; +use crate::state_machine::{ConsensusGossip, Validator, TopicNotification}; + +use network::Context; +use network::message::generic::ConsensusMessage; +use network::{Event, config::Roles}; + +use futures::{prelude::*, channel::mpsc, compat::Compat01As03}; +use libp2p::PeerId; +use parking_lot::Mutex; +use sr_primitives::{traits::Block as BlockT, ConsensusEngineId}; +use std::{borrow::Cow, sync::Arc, time::Duration}; + +pub struct GossipEngine { + inner: Arc>>, +} + +struct GossipEngineInner { + state_machine: ConsensusGossip, + context: Box + Send>, +} + +impl GossipEngine { + /// Create a new instance. + pub fn new( + network: N, + proto_name: impl Into>, + engine_id: ConsensusEngineId, + validator: Arc>, + ) -> Self where B: 'static { + let proto_name = proto_name.into(); + + let mut state_machine = ConsensusGossip::new(); + let mut context = Box::new(ContextOverService { + network: network.clone(), + proto_name: proto_name.clone(), + }); + + network.register_notif_protocol(proto_name.clone(), engine_id, Vec::new()); + state_machine.register_validator(&mut *context, engine_id, validator); + + let inner = Arc::new(Mutex::new(GossipEngineInner { + state_machine, + context, + })); + + let gossip_engine = GossipEngine { + inner: inner.clone(), + }; + + async_std::task::spawn({ + let inner = Arc::downgrade(&inner); + async move { + loop { + async_std::task::sleep(Duration::from_millis(1100)).await; + if let Some(inner) = inner.upgrade() { + let mut inner = inner.lock(); + let inner = &mut *inner; + inner.state_machine.tick(&mut *inner.context); + } else { + break; + } + } + } + }); + + async_std::task::spawn(async move { + let mut stream = Compat01As03::new(network.events_stream()); + while let Some(Ok(event)) = stream.next().await { + match event { + Event::NotifOpened { remote, proto_name } => { + if proto_name != proto_name { + continue; + } + let mut inner = inner.lock(); + let inner = &mut *inner; + // TODO: for now we hard-code the roles to FULL; fix that + inner.state_machine.new_peer(&mut *inner.context, remote, Roles::FULL); + } + Event::NotifClosed { remote, proto_name } => { + if proto_name != proto_name { + continue; + } + let mut inner = inner.lock(); + let inner = &mut *inner; + inner.state_machine.peer_disconnected(&mut *inner.context, remote); + }, + Event::NotifMessages { remote, messages } => { + let mut inner = inner.lock(); + let inner = &mut *inner; + inner.state_machine.on_incoming( + &mut *inner.context, + remote, + messages.into_iter() + .filter_map(|(proto, data)| if proto == proto_name { + Some(ConsensusMessage { engine_id, data }) + } else { None }) + .collect() + ); + }, + Event::Dht(_) => {} + } + } + }); + + gossip_engine + } + + /// Closes all notification streams. + pub fn abort(&self) { + self.inner.lock().state_machine.abort(); + } + + /// Registers a message without propagating it to any peers. The message + /// becomes available to new peers or when the service is asked to GossipEngine + /// the message's topic. No validation is performed on the message, if the + /// message is already expired it should be dropped on the next garbage + /// collection. + pub fn register_message( + &self, + topic: B::Hash, + engine_id: ConsensusEngineId, + message: Vec, + ) { + let message = ConsensusMessage { + engine_id, + data: message, + }; + + self.inner.lock().state_machine.register_message(topic, message); + } + + /// Broadcast all messages with given topic. + pub fn broadcast_topic(&self, topic: B::Hash, force: bool) { + let mut inner = self.inner.lock(); + let inner = &mut *inner; + inner.state_machine.broadcast_topic(&mut *inner.context, topic, force); + } + + /// Get data of valid, incoming messages for a topic (but might have expired meanwhile) + pub fn messages_for(&self, engine_id: ConsensusEngineId, topic: B::Hash) + -> mpsc::UnboundedReceiver + { + self.inner.lock().state_machine.messages_for(engine_id, topic) + } + + /// Send all messages with given topic to a peer. + pub fn send_topic( + &self, + who: &PeerId, + topic: B::Hash, + engine_id: ConsensusEngineId, + force: bool + ) { + let mut inner = self.inner.lock(); + let inner = &mut *inner; + inner.state_machine.send_topic(&mut *inner.context, who, topic, engine_id, force) + } + + /// Multicast a message to all peers. + pub fn multicast( + &self, + topic: B::Hash, + engine_id: ConsensusEngineId, + message: Vec, + force: bool, + ) { + let message = ConsensusMessage { + engine_id, + data: message, + }; + + let mut inner = self.inner.lock(); + let inner = &mut *inner; + inner.state_machine.multicast(&mut *inner.context, topic, message, force) + } + + /// Send addressed message to a peer. The message is not kept or multicast + /// later on. + pub fn send_message( + &self, + who: &PeerId, + engine_id: ConsensusEngineId, + message: Vec, + ) { + let mut inner = self.inner.lock(); + let inner = &mut *inner; + inner.state_machine.send_message(&mut *inner.context, who, ConsensusMessage { + engine_id, + data: message, + }); + } +} + +impl Clone for GossipEngine { + fn clone(&self) -> Self { + GossipEngine { + inner: self.inner.clone() + } + } +} + +struct ContextOverService { + network: N, + proto_name: Cow<'static, [u8]>, +} + +impl Context for ContextOverService { + fn report_peer(&mut self, who: PeerId, reputation: i32) { + self.network.report_peer(who, reputation); + } + + fn disconnect_peer(&mut self, who: PeerId) { + self.network.disconnect_peer(who) + } + + fn send_consensus(&mut self, who: PeerId, messages: Vec) { + // TODO: send batch + for message in messages { + self.network.write_notif(who.clone(), self.proto_name.clone(), message.engine_id, message.data); + } + } + + fn send_chain_specific(&mut self, who: PeerId, message: Vec) { + unreachable!() // TODO: handle that + } +} + diff --git a/client/network-gossip/src/lib.rs b/client/network-gossip/src/lib.rs new file mode 100644 index 0000000000000..0b42f1aa1e71f --- /dev/null +++ b/client/network-gossip/src/lib.rs @@ -0,0 +1,84 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +pub use self::bridge::GossipEngine; +// TODO: remove +pub use self::state_machine::*; + +use network::{specialization::NetworkSpecialization, Event, ExHashT, NetworkService, PeerId}; +use network::message::generic::ConsensusMessage; +use sr_primitives::{traits::Block as BlockT, ConsensusEngineId}; +use std::{borrow::Cow, sync::Arc}; + +mod bridge; +mod state_machine; + +/// Abstraction over a network. +pub trait Network { + /// Returns a stream of events representing what happens on the network. + fn events_stream(&self) -> Box + Send>; + + /// Adjust the reputation of a node. + fn report_peer(&self, peer_id: PeerId, reputation: i32); + + /// Force-disconnect a peer. + fn disconnect_peer(&mut self, who: PeerId); + + /// Send a notification to a peer. + fn write_notif(&self, who: PeerId, proto_name: Cow<'static, [u8]>, engine_id: ConsensusEngineId, message: Vec); + + /// Registers a notifications protocol. + /// + /// See the documentation of [`NetworkService:register_notif_protocol`] for more information. + fn register_notif_protocol( + &self, + proto_name: impl Into>, + engine_id: ConsensusEngineId, + handshake: impl Into> + ); +} + +impl, H: ExHashT> Network for Arc> { + fn events_stream(&self) -> Box + Send> { + Box::new(NetworkService::events_stream(self)) + } + + fn report_peer(&self, peer_id: PeerId, reputation: i32) { + NetworkService::report_peer(self, peer_id, reputation); + } + + fn disconnect_peer(&mut self, who: PeerId) { + unimplemented!() // TODO: + } + + fn write_notif(&self, who: PeerId, proto_name: Cow<'static, [u8]>, engine_id: ConsensusEngineId, message: Vec) { + let message = ConsensusMessage { + engine_id, + data: message, + }; + + NetworkService::write_notif(self, who, proto_name, message) + } + + fn register_notif_protocol( + &self, + proto_name: impl Into>, + engine_id: ConsensusEngineId, + handshake: impl Into> + ) { + NetworkService::register_notif_protocol(self, proto_name, engine_id, handshake) + } +} diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs new file mode 100644 index 0000000000000..6ca1cb93f2339 --- /dev/null +++ b/client/network-gossip/src/state_machine.rs @@ -0,0 +1,920 @@ +// Copyright 2017-2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Utility for gossip of network messages between nodes. +//! Handles chain-specific and standard BFT messages. +//! +//! Gossip messages are separated by two categories: "topics" and consensus engine ID. +//! The consensus engine ID is sent over the wire with the message, while the topic is not, +//! with the expectation that the topic can be derived implicitly from the content of the +//! message, assuming it is valid. +//! +//! Topics are a single 32-byte tag associated with a message, used to group those messages +//! in an opaque way. Consensus code can invoke `broadcast_topic` to attempt to send all messages +//! under a single topic to all peers who don't have them yet, and `send_topic` to +//! send all messages under a single topic to a specific peer. +//! +//! Each consensus engine ID must have an associated, +//! registered `Validator` for all gossip messages. The primary role of this `Validator` is +//! to process incoming messages from peers, and decide whether to discard them or process +//! them. It also decides whether to re-broadcast the message. +//! +//! The secondary role of the `Validator` is to check if a message is allowed to be sent to a given +//! peer. All messages, before being sent, will be checked against this filter. +//! This enables the validator to use information it's aware of about connected peers to decide +//! whether to send messages to them at any given moment in time - In particular, to wait until +//! peers can accept and process the message before sending it. +//! +//! Lastly, the fact that gossip validators can decide not to rebroadcast messages +//! opens the door for neighbor status packets to be baked into the gossip protocol. +//! These status packets will typically contain light pieces of information +//! used to inform peers of a current view of protocol state. + +use std::collections::{HashMap, HashSet, hash_map::Entry}; +use std::sync::Arc; +use std::iter; +use std::time; +use log::{trace, debug}; +use futures::channel::mpsc; +use lru_cache::LruCache; +use libp2p::PeerId; +use sr_primitives::traits::{Block as BlockT, Hash, HashFor}; +use sr_primitives::ConsensusEngineId; +pub use network::message::generic::{Message, ConsensusMessage}; +use network::Context; +use network::config::Roles; + +// FIXME: Add additional spam/DoS attack protection: https://github.com/paritytech/substrate/issues/1115 +const KNOWN_MESSAGES_CACHE_SIZE: usize = 4096; + +const REBROADCAST_INTERVAL: time::Duration = time::Duration::from_secs(30); +/// Reputation change when a peer sends us a gossip message that we didn't know about. +const GOSSIP_SUCCESS_REPUTATION_CHANGE: i32 = 1 << 4; +/// Reputation change when a peer sends us a gossip message that we already knew about. +const DUPLICATE_GOSSIP_REPUTATION_CHANGE: i32 = -(1 << 2); +/// Reputation change when a peer sends us a gossip message for an unknown engine, whatever that +/// means. +const UNKNOWN_GOSSIP_REPUTATION_CHANGE: i32 = -(1 << 6); +/// Reputation change when a peer sends a message from a topic it isn't registered on. +const UNREGISTERED_TOPIC_REPUTATION_CHANGE: i32 = -(1 << 10); + +struct PeerConsensus { + known_messages: HashSet, + filtered_messages: HashMap, + roles: Roles, +} + +/// Topic stream message with sender. +#[derive(Debug, Eq, PartialEq)] +pub struct TopicNotification { + /// Message data. + pub message: Vec, + /// Sender if available. + pub sender: Option, +} + +struct MessageEntry { + message_hash: B::Hash, + topic: B::Hash, + message: ConsensusMessage, + sender: Option, +} + +/// Consensus message destination. +pub enum MessageRecipient { + /// Send to all peers. + BroadcastToAll, + /// Send to peers that don't have that message already. + BroadcastNew, + /// Send to specific peer. + Peer(PeerId), +} + +/// The reason for sending out the message. +#[derive(Eq, PartialEq, Copy, Clone)] +#[cfg_attr(test, derive(Debug))] +pub enum MessageIntent { + /// Requested broadcast. + Broadcast { + /// How many times this message was previously filtered by the gossip + /// validator when trying to propagate to a given peer. + previous_attempts: usize + }, + /// Requested broadcast to all peers. + ForcedBroadcast, + /// Periodic rebroadcast of all messages to all peers. + PeriodicRebroadcast, +} + +/// Message validation result. +pub enum ValidationResult { + /// Message should be stored and propagated under given topic. + ProcessAndKeep(H), + /// Message should be processed, but not propagated. + ProcessAndDiscard(H), + /// Message should be ignored. + Discard, +} + +impl MessageIntent { + fn broadcast() -> MessageIntent { + MessageIntent::Broadcast { previous_attempts: 0 } + } +} + +/// Validation context. Allows reacting to incoming messages by sending out further messages. +pub trait ValidatorContext { + /// Broadcast all messages with given topic to peers that do not have it yet. + fn broadcast_topic(&mut self, topic: B::Hash, force: bool); + /// Broadcast a message to all peers that have not received it previously. + fn broadcast_message(&mut self, topic: B::Hash, message: Vec, force: bool); + /// Send addressed message to a peer. + fn send_message(&mut self, who: &PeerId, message: Vec); + /// Send all messages with given topic to a peer. + fn send_topic(&mut self, who: &PeerId, topic: B::Hash, force: bool); +} + +struct NetworkContext<'g, 'p, B: BlockT> { + gossip: &'g mut ConsensusGossip, + protocol: &'p mut dyn Context, + engine_id: ConsensusEngineId, +} + +impl<'g, 'p, B: BlockT> ValidatorContext for NetworkContext<'g, 'p, B> { + /// Broadcast all messages with given topic to peers that do not have it yet. + fn broadcast_topic(&mut self, topic: B::Hash, force: bool) { + self.gossip.broadcast_topic(self.protocol, topic, force); + } + + /// Broadcast a message to all peers that have not received it previously. + fn broadcast_message(&mut self, topic: B::Hash, message: Vec, force: bool) { + self.gossip.multicast( + self.protocol, + topic, + ConsensusMessage{ data: message, engine_id: self.engine_id.clone() }, + force, + ); + } + + /// Send addressed message to a peer. + fn send_message(&mut self, who: &PeerId, message: Vec) { + self.protocol.send_consensus(who.clone(), vec![ConsensusMessage { + engine_id: self.engine_id, + data: message, + }]); + } + + /// Send all messages with given topic to a peer. + fn send_topic(&mut self, who: &PeerId, topic: B::Hash, force: bool) { + self.gossip.send_topic(self.protocol, who, topic, self.engine_id, force); + } +} + +fn propagate<'a, B: BlockT, I>( + protocol: &mut dyn Context, + messages: I, + intent: MessageIntent, + peers: &mut HashMap>, + validators: &HashMap>>, +) + where I: Clone + IntoIterator, // (msg_hash, topic, message) +{ + let mut check_fns = HashMap::new(); + let mut message_allowed = move |who: &PeerId, intent: MessageIntent, topic: &B::Hash, message: &ConsensusMessage| { + let engine_id = message.engine_id; + let check_fn = match check_fns.entry(engine_id) { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(vacant) => match validators.get(&engine_id) { + None => return false, // treat all messages with no validator as not allowed + Some(validator) => vacant.insert(validator.message_allowed()), + } + }; + + (check_fn)(who, intent, topic, &message.data) + }; + + for (id, ref mut peer) in peers.iter_mut() { + let mut batch = Vec::new(); + for (message_hash, topic, message) in messages.clone() { + let previous_attempts = peer.filtered_messages + .get(&message_hash) + .cloned() + .unwrap_or(0); + + let intent = match intent { + MessageIntent::Broadcast { .. } => + if peer.known_messages.contains(&message_hash) { + continue; + } else { + MessageIntent::Broadcast { previous_attempts } + }, + MessageIntent::PeriodicRebroadcast => + if peer.known_messages.contains(&message_hash) { + MessageIntent::PeriodicRebroadcast + } else { + // peer doesn't know message, so the logic should treat it as an + // initial broadcast. + MessageIntent::Broadcast { previous_attempts } + }, + other => other, + }; + + if !message_allowed(id, intent, &topic, &message) { + let count = peer.filtered_messages + .entry(message_hash.clone()) + .or_insert(0); + + *count += 1; + + continue; + } + + peer.filtered_messages.remove(message_hash); + peer.known_messages.insert(message_hash.clone()); + + trace!(target: "gossip", "Propagating to {}: {:?}", id, message); + batch.push(message.clone()) + } + protocol.send_consensus(id.clone(), batch); + } +} + +/// Validates consensus messages. +pub trait Validator: Send + Sync { + /// New peer is connected. + fn new_peer(&self, _context: &mut dyn ValidatorContext, _who: &PeerId, _roles: Roles) { + } + + /// New connection is dropped. + fn peer_disconnected(&self, _context: &mut dyn ValidatorContext, _who: &PeerId) { + } + + /// Validate consensus message. + fn validate( + &self, + context: &mut dyn ValidatorContext, + sender: &PeerId, + data: &[u8] + ) -> ValidationResult; + + /// Produce a closure for validating messages on a given topic. + fn message_expired<'a>(&'a self) -> Box bool + 'a> { + Box::new(move |_topic, _data| false) + } + + /// Produce a closure for filtering egress messages. + fn message_allowed<'a>(&'a self) -> Box bool + 'a> { + Box::new(move |_who, _intent, _topic, _data| true) + } +} + +/// Consensus network protocol handler. Manages statements and candidate requests. +pub struct ConsensusGossip { + peers: HashMap>, + live_message_sinks: HashMap<(ConsensusEngineId, B::Hash), Vec>>, + messages: Vec>, + known_messages: LruCache, + validators: HashMap>>, + next_broadcast: time::Instant, +} + +impl ConsensusGossip { + /// Create a new instance. + pub fn new() -> Self { + ConsensusGossip { + peers: HashMap::new(), + live_message_sinks: HashMap::new(), + messages: Default::default(), + known_messages: LruCache::new(KNOWN_MESSAGES_CACHE_SIZE), + validators: Default::default(), + next_broadcast: time::Instant::now() + REBROADCAST_INTERVAL, + } + } + + /// Closes all notification streams. + pub fn abort(&mut self) { + self.live_message_sinks.clear(); + } + + /// Register message validator for a message type. + pub fn register_validator( + &mut self, + protocol: &mut dyn Context, + engine_id: ConsensusEngineId, + validator: Arc> + ) { + self.register_validator_internal(engine_id, validator.clone()); + let peers: Vec<_> = self.peers.iter().map(|(id, peer)| (id.clone(), peer.roles)).collect(); + for (id, roles) in peers { + let mut context = NetworkContext { gossip: self, protocol, engine_id: engine_id.clone() }; + validator.new_peer(&mut context, &id, roles); + } + } + + fn register_validator_internal(&mut self, engine_id: ConsensusEngineId, validator: Arc>) { + self.validators.insert(engine_id, validator.clone()); + } + + /// Handle new connected peer. + pub fn new_peer(&mut self, protocol: &mut dyn Context, who: PeerId, roles: Roles) { + // light nodes are not valid targets for consensus gossip messages + if !roles.is_full() { + return; + } + + trace!(target:"gossip", "Registering {:?} {}", roles, who); + self.peers.insert(who.clone(), PeerConsensus { + known_messages: HashSet::new(), + filtered_messages: HashMap::new(), + roles, + }); + for (engine_id, v) in self.validators.clone() { + let mut context = NetworkContext { gossip: self, protocol, engine_id: engine_id.clone() }; + v.new_peer(&mut context, &who, roles); + } + } + + fn register_message_hashed( + &mut self, + message_hash: B::Hash, + topic: B::Hash, + message: ConsensusMessage, + sender: Option, + ) { + if self.known_messages.insert(message_hash.clone(), ()).is_none() { + self.messages.push(MessageEntry { + message_hash, + topic, + message, + sender, + }); + } + } + + /// Registers a message without propagating it to any peers. The message + /// becomes available to new peers or when the service is asked to gossip + /// the message's topic. No validation is performed on the message, if the + /// message is already expired it should be dropped on the next garbage + /// collection. + pub fn register_message( + &mut self, + topic: B::Hash, + message: ConsensusMessage, + ) { + let message_hash = HashFor::::hash(&message.data[..]); + self.register_message_hashed(message_hash, topic, message, None); + } + + /// Call when a peer has been disconnected to stop tracking gossip status. + pub fn peer_disconnected(&mut self, protocol: &mut dyn Context, who: PeerId) { + for (engine_id, v) in self.validators.clone() { + let mut context = NetworkContext { gossip: self, protocol, engine_id: engine_id.clone() }; + v.peer_disconnected(&mut context, &who); + } + } + + /// Perform periodic maintenance + pub fn tick(&mut self, protocol: &mut dyn Context) { + self.collect_garbage(); + if time::Instant::now() >= self.next_broadcast { + self.rebroadcast(protocol); + self.next_broadcast = time::Instant::now() + REBROADCAST_INTERVAL; + } + } + + /// Rebroadcast all messages to all peers. + fn rebroadcast(&mut self, protocol: &mut dyn Context) { + let messages = self.messages.iter() + .map(|entry| (&entry.message_hash, &entry.topic, &entry.message)); + propagate(protocol, messages, MessageIntent::PeriodicRebroadcast, &mut self.peers, &self.validators); + } + + /// Broadcast all messages with given topic. + pub fn broadcast_topic(&mut self, protocol: &mut dyn Context, topic: B::Hash, force: bool) { + let messages = self.messages.iter() + .filter_map(|entry| + if entry.topic == topic { Some((&entry.message_hash, &entry.topic, &entry.message)) } else { None } + ); + let intent = if force { MessageIntent::ForcedBroadcast } else { MessageIntent::broadcast() }; + propagate(protocol, messages, intent, &mut self.peers, &self.validators); + } + + /// Prune old or no longer relevant consensus messages. Provide a predicate + /// for pruning, which returns `false` when the items with a given topic should be pruned. + pub fn collect_garbage(&mut self) { + self.live_message_sinks.retain(|_, sinks| { + sinks.retain(|sink| !sink.is_closed()); + !sinks.is_empty() + }); + + let known_messages = &mut self.known_messages; + let before = self.messages.len(); + let validators = &self.validators; + + let mut check_fns = HashMap::new(); + let mut message_expired = move |entry: &MessageEntry| { + let engine_id = entry.message.engine_id; + let check_fn = match check_fns.entry(engine_id) { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(vacant) => match validators.get(&engine_id) { + None => return true, // treat all messages with no validator as expired + Some(validator) => vacant.insert(validator.message_expired()), + } + }; + + (check_fn)(entry.topic, &entry.message.data) + }; + + self.messages.retain(|entry| !message_expired(entry)); + + trace!(target: "gossip", "Cleaned up {} stale messages, {} left ({} known)", + before - self.messages.len(), + self.messages.len(), + known_messages.len(), + ); + + for (_, ref mut peer) in self.peers.iter_mut() { + peer.known_messages.retain(|h| known_messages.contains_key(h)); + peer.filtered_messages.retain(|h, _| known_messages.contains_key(h)); + } + } + + /// Get data of valid, incoming messages for a topic (but might have expired meanwhile) + pub fn messages_for(&mut self, engine_id: ConsensusEngineId, topic: B::Hash) + -> mpsc::UnboundedReceiver + { + let (tx, rx) = mpsc::unbounded(); + for entry in self.messages.iter_mut() + .filter(|e| e.topic == topic && e.message.engine_id == engine_id) + { + tx.unbounded_send(TopicNotification { + message: entry.message.data.clone(), + sender: entry.sender.clone(), + }) + .expect("receiver known to be live; qed"); + } + + self.live_message_sinks.entry((engine_id, topic)).or_default().push(tx); + + rx + } + + /// Handle an incoming ConsensusMessage for topic by who via protocol. Discard message if topic + /// already known, the message is old, its source peers isn't a registered peer or the connection + /// to them is broken. Return `Some(topic, message)` if it was added to the internal queue, `None` + /// in all other cases. + pub fn on_incoming( + &mut self, + protocol: &mut dyn Context, + who: PeerId, + messages: Vec, + ) { + trace!(target:"gossip", "Received {} messages from peer {}", messages.len(), who); + for message in messages { + let message_hash = HashFor::::hash(&message.data[..]); + + if self.known_messages.contains_key(&message_hash) { + trace!(target:"gossip", "Ignored already known message from {}", who); + protocol.report_peer(who.clone(), DUPLICATE_GOSSIP_REPUTATION_CHANGE); + continue; + } + + let engine_id = message.engine_id; + // validate the message + let validation = self.validators.get(&engine_id) + .cloned() + .map(|v| { + let mut context = NetworkContext { gossip: self, protocol, engine_id }; + v.validate(&mut context, &who, &message.data) + }); + + let validation_result = match validation { + Some(ValidationResult::ProcessAndKeep(topic)) => Some((topic, true)), + Some(ValidationResult::ProcessAndDiscard(topic)) => Some((topic, false)), + Some(ValidationResult::Discard) => None, + None => { + trace!(target:"gossip", "Unknown message engine id {:?} from {}", engine_id, who); + protocol.report_peer(who.clone(), UNKNOWN_GOSSIP_REPUTATION_CHANGE); + protocol.disconnect_peer(who.clone()); + continue; + } + }; + + if let Some((topic, keep)) = validation_result { + protocol.report_peer(who.clone(), GOSSIP_SUCCESS_REPUTATION_CHANGE); + if let Some(ref mut peer) = self.peers.get_mut(&who) { + peer.known_messages.insert(message_hash); + if let Entry::Occupied(mut entry) = self.live_message_sinks.entry((engine_id, topic)) { + debug!(target: "gossip", "Pushing consensus message to sinks for {}.", topic); + entry.get_mut().retain(|sink| { + if let Err(e) = sink.unbounded_send(TopicNotification { + message: message.data.clone(), + sender: Some(who.clone()) + }) { + trace!(target: "gossip", "Error broadcasting message notification: {:?}", e); + } + !sink.is_closed() + }); + if entry.get().is_empty() { + entry.remove_entry(); + } + } + if keep { + self.register_message_hashed(message_hash, topic, message, Some(who.clone())); + } + } else { + trace!(target:"gossip", "Ignored statement from unregistered peer {}", who); + protocol.report_peer(who.clone(), UNREGISTERED_TOPIC_REPUTATION_CHANGE); + } + } else { + trace!(target:"gossip", "Handled valid one hop message from peer {}", who); + } + } + } + + /// Send all messages with given topic to a peer. + pub fn send_topic( + &mut self, + protocol: &mut dyn Context, + who: &PeerId, + topic: B::Hash, + engine_id: ConsensusEngineId, + force: bool + ) { + let validator = self.validators.get(&engine_id); + let mut message_allowed = match validator { + None => return, // treat all messages with no validator as not allowed + Some(validator) => validator.message_allowed(), + }; + + if let Some(ref mut peer) = self.peers.get_mut(who) { + let mut batch = Vec::new(); + for entry in self.messages.iter().filter(|m| m.topic == topic && m.message.engine_id == engine_id) { + let intent = if force { + MessageIntent::ForcedBroadcast + } else { + let previous_attempts = peer.filtered_messages + .get(&entry.message_hash) + .cloned() + .unwrap_or(0); + + MessageIntent::Broadcast { previous_attempts } + }; + + if !force && peer.known_messages.contains(&entry.message_hash) { + continue; + } + + if !message_allowed(who, intent, &entry.topic, &entry.message.data) { + let count = peer.filtered_messages + .entry(entry.message_hash) + .or_insert(0); + + *count += 1; + + continue; + } + + peer.filtered_messages.remove(&entry.message_hash); + peer.known_messages.insert(entry.message_hash.clone()); + + trace!(target: "gossip", "Sending topic message to {}: {:?}", who, entry.message); + batch.push(ConsensusMessage { + engine_id: engine_id.clone(), + data: entry.message.data.clone(), + }); + } + protocol.send_consensus(who.clone(), batch); + } + } + + /// Multicast a message to all peers. + pub fn multicast( + &mut self, + protocol: &mut dyn Context, + topic: B::Hash, + message: ConsensusMessage, + force: bool, + ) { + let message_hash = HashFor::::hash(&message.data); + self.register_message_hashed(message_hash, topic, message.clone(), None); + let intent = if force { MessageIntent::ForcedBroadcast } else { MessageIntent::broadcast() }; + propagate(protocol, iter::once((&message_hash, &topic, &message)), intent, &mut self.peers, &self.validators); + } + + /// Send addressed message to a peer. The message is not kept or multicast + /// later on. + pub fn send_message( + &mut self, + protocol: &mut dyn Context, + who: &PeerId, + message: ConsensusMessage, + ) { + let peer = match self.peers.get_mut(who) { + None => return, + Some(peer) => peer, + }; + + let message_hash = HashFor::::hash(&message.data); + + trace!(target: "gossip", "Sending direct to {}: {:?}", who, message); + + peer.filtered_messages.remove(&message_hash); + peer.known_messages.insert(message_hash); + protocol.send_consensus(who.clone(), vec![message.clone()]); + } +} + +/// A gossip message validator that discards all messages. +pub struct DiscardAll; + +impl Validator for DiscardAll { + fn validate( + &self, + _context: &mut dyn ValidatorContext, + _sender: &PeerId, + _data: &[u8], + ) -> ValidationResult { + ValidationResult::Discard + } + + fn message_expired<'a>(&'a self) -> Box bool + 'a> { + Box::new(move |_topic, _data| true) + } + + fn message_allowed<'a>(&'a self) -> Box bool + 'a> { + Box::new(move |_who, _intent, _topic, _data| false) + } +} + +#[cfg(test)] +mod tests { + use std::sync::{Arc, atomic::{AtomicBool, Ordering}}; + use parking_lot::Mutex; + use sr_primitives::testing::{H256, Block as RawBlock, ExtrinsicWrapper}; + use futures::executor::block_on_stream; + + use super::*; + + type Block = RawBlock>; + + macro_rules! push_msg { + ($consensus:expr, $topic:expr, $hash: expr, $m:expr) => { + if $consensus.known_messages.insert($hash, ()).is_none() { + $consensus.messages.push(MessageEntry { + message_hash: $hash, + topic: $topic, + message: ConsensusMessage { data: $m, engine_id: [0, 0, 0, 0]}, + sender: None, + }); + } + } + } + + struct AllowAll; + impl Validator for AllowAll { + fn validate( + &self, + _context: &mut dyn ValidatorContext, + _sender: &PeerId, + _data: &[u8], + ) -> ValidationResult { + ValidationResult::ProcessAndKeep(H256::default()) + } + } + + #[test] + fn collects_garbage() { + struct AllowOne; + impl Validator for AllowOne { + fn validate( + &self, + _context: &mut dyn ValidatorContext, + _sender: &PeerId, + data: &[u8], + ) -> ValidationResult { + if data[0] == 1 { + ValidationResult::ProcessAndKeep(H256::default()) + } else { + ValidationResult::Discard + } + } + + fn message_expired<'a>(&'a self) -> Box bool + 'a> { + Box::new(move |_topic, data| data[0] != 1) + } + } + + let prev_hash = H256::random(); + let best_hash = H256::random(); + let mut consensus = ConsensusGossip::::new(); + let m1_hash = H256::random(); + let m2_hash = H256::random(); + let m1 = vec![1, 2, 3]; + let m2 = vec![4, 5, 6]; + + push_msg!(consensus, prev_hash, m1_hash, m1); + push_msg!(consensus, best_hash, m2_hash, m2); + consensus.known_messages.insert(m1_hash, ()); + consensus.known_messages.insert(m2_hash, ()); + + let test_engine_id = Default::default(); + consensus.register_validator_internal(test_engine_id, Arc::new(AllowAll)); + consensus.collect_garbage(); + assert_eq!(consensus.messages.len(), 2); + assert_eq!(consensus.known_messages.len(), 2); + + consensus.register_validator_internal(test_engine_id, Arc::new(AllowOne)); + + // m2 is expired + consensus.collect_garbage(); + assert_eq!(consensus.messages.len(), 1); + // known messages are only pruned based on size. + assert_eq!(consensus.known_messages.len(), 2); + assert!(consensus.known_messages.contains_key(&m2_hash)); + } + + #[test] + fn message_stream_include_those_sent_before_asking_for_stream() { + let mut consensus = ConsensusGossip::::new(); + consensus.register_validator_internal([0, 0, 0, 0], Arc::new(AllowAll)); + + let message = ConsensusMessage { data: vec![4, 5, 6], engine_id: [0, 0, 0, 0] }; + let topic = HashFor::::hash(&[1,2,3]); + + consensus.register_message(topic, message.clone()); + let mut stream = block_on_stream(consensus.messages_for([0, 0, 0, 0], topic)); + + assert_eq!(stream.next(), Some(TopicNotification { message: message.data, sender: None })); + } + + #[test] + fn can_keep_multiple_messages_per_topic() { + let mut consensus = ConsensusGossip::::new(); + + let topic = [1; 32].into(); + let msg_a = ConsensusMessage { data: vec![1, 2, 3], engine_id: [0, 0, 0, 0] }; + let msg_b = ConsensusMessage { data: vec![4, 5, 6], engine_id: [0, 0, 0, 0] }; + + consensus.register_message(topic, msg_a); + consensus.register_message(topic, msg_b); + + assert_eq!(consensus.messages.len(), 2); + } + + #[test] + fn can_keep_multiple_subscribers_per_topic() { + let mut consensus = ConsensusGossip::::new(); + consensus.register_validator_internal([0, 0, 0, 0], Arc::new(AllowAll)); + + let data = vec![4, 5, 6]; + let message = ConsensusMessage { data: data.clone(), engine_id: [0, 0, 0, 0] }; + let topic = HashFor::::hash(&[1, 2, 3]); + + consensus.register_message(topic, message.clone()); + + let mut stream1 = block_on_stream(consensus.messages_for([0, 0, 0, 0], topic)); + let mut stream2 = block_on_stream(consensus.messages_for([0, 0, 0, 0], topic)); + + assert_eq!(stream1.next(), Some(TopicNotification { message: data.clone(), sender: None })); + assert_eq!(stream2.next(), Some(TopicNotification { message: data, sender: None })); + } + + #[test] + fn topics_are_localized_to_engine_id() { + let mut consensus = ConsensusGossip::::new(); + consensus.register_validator_internal([0, 0, 0, 0], Arc::new(AllowAll)); + + let topic = [1; 32].into(); + let msg_a = ConsensusMessage { data: vec![1, 2, 3], engine_id: [0, 0, 0, 0] }; + let msg_b = ConsensusMessage { data: vec![4, 5, 6], engine_id: [0, 0, 0, 1] }; + + consensus.register_message(topic, msg_a); + consensus.register_message(topic, msg_b); + + let mut stream = block_on_stream(consensus.messages_for([0, 0, 0, 0], topic)); + + assert_eq!(stream.next(), Some(TopicNotification { message: vec![1, 2, 3], sender: None })); + + let _ = consensus.live_message_sinks.remove(&([0, 0, 0, 0], topic)); + assert_eq!(stream.next(), None); + } + + #[test] + fn keeps_track_of_broadcast_attempts() { + struct DummyNetworkContext; + impl Context for DummyNetworkContext { + fn report_peer(&mut self, _who: PeerId, _reputation: i32) {} + fn disconnect_peer(&mut self, _who: PeerId) {} + fn send_consensus(&mut self, _who: PeerId, _consensus: Vec) {} + fn send_chain_specific(&mut self, _who: PeerId, _message: Vec) {} + } + + // A mock gossip validator that never expires any message, allows + // setting whether messages should be allowed and keeps track of any + // messages passed to `message_allowed`. + struct MockValidator { + allow: AtomicBool, + messages: Arc, MessageIntent)>>>, + } + + impl MockValidator { + fn new() -> MockValidator { + MockValidator { + allow: AtomicBool::new(false), + messages: Arc::new(Mutex::new(Vec::new())), + } + } + } + + impl Validator for MockValidator { + fn validate( + &self, + _context: &mut dyn ValidatorContext, + _sender: &PeerId, + _data: &[u8], + ) -> ValidationResult { + ValidationResult::ProcessAndKeep(H256::default()) + } + + fn message_expired<'a>(&'a self) -> Box bool + 'a> { + Box::new(move |_topic, _data| false) + } + + fn message_allowed<'a>(&'a self) -> Box bool + 'a> { + let messages = self.messages.clone(); + Box::new(move |_, intent, _, data| { + messages.lock().push((data.to_vec(), intent)); + self.allow.load(Ordering::SeqCst) + }) + } + } + + // we setup an instance of the mock gossip validator, add a new peer to + // it and register a message. + let mut consensus = ConsensusGossip::::new(); + let validator = Arc::new(MockValidator::new()); + consensus.register_validator_internal([0, 0, 0, 0], validator.clone()); + consensus.new_peer( + &mut DummyNetworkContext, + PeerId::random(), + Roles::AUTHORITY, + ); + + let data = vec![1, 2, 3]; + let msg = ConsensusMessage { data: data.clone(), engine_id: [0, 0, 0, 0] }; + consensus.register_message(H256::default(), msg); + + // tick the gossip handler and make sure it triggers a message rebroadcast + let mut tick = || { + consensus.next_broadcast = std::time::Instant::now(); + consensus.tick(&mut DummyNetworkContext); + }; + + // by default we won't allow the message we registered, so everytime we + // tick the gossip handler, the message intent should be kept as + // `Broadcast` but the previous attempts should be incremented. + tick(); + assert_eq!( + validator.messages.lock().pop().unwrap(), + (data.clone(), MessageIntent::Broadcast { previous_attempts: 0 }), + ); + + tick(); + assert_eq!( + validator.messages.lock().pop().unwrap(), + (data.clone(), MessageIntent::Broadcast { previous_attempts: 1 }), + ); + + // we set the validator to allow the message to go through + validator.allow.store(true, Ordering::SeqCst); + + // we still get the same message intent but it should be delivered now + tick(); + assert_eq!( + validator.messages.lock().pop().unwrap(), + (data.clone(), MessageIntent::Broadcast { previous_attempts: 2 }), + ); + + // ticking the gossip handler again the message intent should change to + // `PeriodicRebroadcast` since it was sent. + tick(); + assert_eq!( + validator.messages.lock().pop().unwrap(), + (data.clone(), MessageIntent::PeriodicRebroadcast), + ); + } +} diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 19c720c15011f..ec2fc2f259296 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -7,6 +7,7 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] +async-std = "1.0" bytes = "0.4.12" derive_more = "0.15.0" either = "1.5.3" diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index a0299bc340ce2..b11c3163a59ce 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -16,10 +16,11 @@ use crate::{ debug_info, discovery::DiscoveryBehaviour, discovery::DiscoveryOut, DiscoveryNetBehaviour, - protocol::event::DhtEvent + protocol::event::{Event, DhtEvent}, }; use crate::{ExHashT, specialization::NetworkSpecialization}; use crate::protocol::{CustomMessageOutcome, Protocol}; +use consensus::{import_queue::{IncomingBlock, Origin}, BlockOrigin}; use futures::prelude::*; use libp2p::NetworkBehaviour; use libp2p::core::{Multiaddr, PeerId, PublicKey}; @@ -27,7 +28,7 @@ use libp2p::kad::record; use libp2p::swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess}; use libp2p::core::{nodes::Substream, muxing::StreamMuxerBox}; use log::{debug, warn}; -use sr_primitives::traits::Block as BlockT; +use sr_primitives::{traits::{Block as BlockT, NumberFor}, Justification}; use std::iter; use void; @@ -50,8 +51,10 @@ pub struct Behaviour, H: ExHashT> { /// Event generated by `Behaviour`. pub enum BehaviourOut { - SubstrateAction(CustomMessageOutcome), - Dht(DhtEvent), + BlockImport(BlockOrigin, Vec>), + JustificationImport(Origin, B::Hash, NumberFor, Justification), + FinalityProofImport(Origin, B::Hash, NumberFor, Vec), + Event(Event), } impl, H: ExHashT> Behaviour { @@ -127,7 +130,33 @@ Behaviour { impl, H: ExHashT> NetworkBehaviourEventProcess> for Behaviour { fn inject_event(&mut self, event: CustomMessageOutcome) { - self.events.push(BehaviourOut::SubstrateAction(event)); + match event { + CustomMessageOutcome::BlockImport(origin, blocks) => + self.events.push(BehaviourOut::BlockImport(origin, blocks)), + CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) => + self.events.push(BehaviourOut::JustificationImport(origin, hash, nb, justification)), + CustomMessageOutcome::FinalityProofImport(origin, hash, nb, proof) => + self.events.push(BehaviourOut::FinalityProofImport(origin, hash, nb, proof)), + CustomMessageOutcome::NotifOpened { remote, proto_names } => + for proto_name in proto_names { + self.events.push(BehaviourOut::Event(Event::NotifOpened { + remote: remote.clone(), + proto_name + })); + }, + CustomMessageOutcome::NotifClosed { remote, proto_names } => + for proto_name in proto_names { + self.events.push(BehaviourOut::Event(Event::NotifClosed { + remote: remote.clone(), + proto_name + })); + }, + CustomMessageOutcome::NotifMessages { remote, messages } => { + let ev = Event::NotifMessages { remote, messages }; + self.events.push(BehaviourOut::Event(ev)); + }, + CustomMessageOutcome::None => {} + } } } @@ -166,16 +195,16 @@ impl, H: ExHashT> NetworkBehaviourEventPr self.substrate.add_discovered_nodes(iter::once(peer_id)); } DiscoveryOut::ValueFound(results) => { - self.events.push(BehaviourOut::Dht(DhtEvent::ValueFound(results))); + self.events.push(BehaviourOut::Event(Event::Dht(DhtEvent::ValueFound(results)))); } DiscoveryOut::ValueNotFound(key) => { - self.events.push(BehaviourOut::Dht(DhtEvent::ValueNotFound(key))); + self.events.push(BehaviourOut::Event(Event::Dht(DhtEvent::ValueNotFound(key)))); } DiscoveryOut::ValuePut(key) => { - self.events.push(BehaviourOut::Dht(DhtEvent::ValuePut(key))); + self.events.push(BehaviourOut::Event(Event::Dht(DhtEvent::ValuePut(key)))); } DiscoveryOut::ValuePutFailed(key) => { - self.events.push(BehaviourOut::Dht(DhtEvent::ValuePutFailed(key))); + self.events.push(BehaviourOut::Event(Event::Dht(DhtEvent::ValuePutFailed(key)))); } } } diff --git a/client/network/src/config.rs b/client/network/src/config.rs index d10345c2f3817..a05f69a77ce1a 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -27,12 +27,12 @@ use crate::on_demand_layer::OnDemand; use crate::service::{ExHashT, TransactionPool}; use bitflags::bitflags; use consensus::{block_validation::BlockAnnounceValidator, import_queue::ImportQueue}; -use sr_primitives::traits::{Block as BlockT}; +use sr_primitives::{ConsensusEngineId, traits::{Block as BlockT}}; use libp2p::identity::{Keypair, ed25519}; use libp2p::wasm_ext; use libp2p::{PeerId, Multiaddr, multiaddr}; use core::{fmt, iter}; -use std::{error::Error, fs, io::{self, Write}, net::Ipv4Addr, path::{Path, PathBuf}, sync::Arc}; +use std::{borrow::Cow, error::Error, fs, io::{self, Write}, net::Ipv4Addr, path::{Path, PathBuf}, sync::Arc}; use zeroize::Zeroize; /// Network initialization parameters. @@ -261,6 +261,12 @@ pub struct NetworkConfiguration { pub node_name: String, /// Configuration for the transport layer. pub transport: TransportConfig, + /// Extra protocol names for the gossiping scheme, plus a legacy consensus engine ID for + /// backwards-compatibility. + /// + /// If a remote tries to open a substream with one of these protocol names, we know that it is + /// a gossiping protocol that requires a handshake message. + pub extra_notif_protos: Vec<(Cow<'static, [u8]>, ConsensusEngineId)>, /// Maximum number of peers to ask the same blocks in parallel. pub max_parallel_downloads: u32, } @@ -285,6 +291,7 @@ impl Default for NetworkConfiguration { allow_private_ipv4: true, wasm_external_transport: None, }, + extra_notif_protos: Vec::new(), max_parallel_downloads: 5, } } diff --git a/client/network/src/legacy_proto/mod.rs b/client/network/src/generic_proto.rs similarity index 93% rename from client/network/src/legacy_proto/mod.rs rename to client/network/src/generic_proto.rs index bbe795528be9d..099fe0be3286c 100644 --- a/client/network/src/legacy_proto/mod.rs +++ b/client/network/src/generic_proto.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -pub use self::behaviour::{LegacyProto, LegacyProtoOut}; +pub use self::behaviour::{GenericProto, GenericProtoOut}; mod behaviour; mod handler; diff --git a/client/network/src/legacy_proto/behaviour.rs b/client/network/src/generic_proto/behaviour.rs similarity index 89% rename from client/network/src/legacy_proto/behaviour.rs rename to client/network/src/generic_proto/behaviour.rs index d1d378174a21b..cc1631f2850bd 100644 --- a/client/network/src/legacy_proto/behaviour.rs +++ b/client/network/src/generic_proto/behaviour.rs @@ -15,8 +15,8 @@ // along with Substrate. If not, see . use crate::{DiscoveryNetBehaviour, config::ProtocolId}; -use crate::legacy_proto::handler::{CustomProtoHandlerProto, CustomProtoHandlerOut, CustomProtoHandlerIn}; -use crate::legacy_proto::upgrade::RegisteredProtocol; +use crate::generic_proto::handler::{NotifsHandlerProto, NotifsHandlerOut, NotifsHandlerIn}; +use crate::generic_proto::upgrade::RegisteredProtocol; use bytes::BytesMut; use fnv::FnvHashMap; use futures::prelude::*; @@ -32,9 +32,24 @@ use tokio_io::{AsyncRead, AsyncWrite}; /// Network behaviour that handles opening substreams for custom protocols with other nodes. /// +/// ## Legacy vs new protocol +/// +/// The `GenericProto` behaves as following: +/// +/// - Whenever a connection is established, we open a single substream (called "legay protocol" in +/// the source code). This substream name depends on the `protocol_id` and `versions` passed at +/// initialization. If the remote refuses this substream, we close the connection. +/// +/// - For each registered protocol, we also open an additional substream for this protocol. If the +/// remote refuses this substream, then it's fine. +/// +/// - Whenever we want to send a message, we pass either `None` to force the legacy substream, or +/// `Some` to indicate a registered protocol. If the registered protocol was refused by the remote, +/// we use the legacy instead. +/// /// ## How it works /// -/// The role of the `LegacyProto` is to synchronize the following components: +/// The role of the `GenericProto` is to synchronize the following components: /// /// - The libp2p swarm that opens new connections and reports disconnects. /// - The connection handler (see `handler.rs`) that handles individual connections. @@ -60,9 +75,12 @@ use tokio_io::{AsyncRead, AsyncWrite}; /// Note that this "banning" system is not an actual ban. If a "banned" node tries to connect to /// us, we accept the connection. The "banning" system is only about delaying dialing attempts. /// -pub struct LegacyProto< TSubstream> { - /// List of protocols to open with peers. Never modified. - protocol: RegisteredProtocol, +pub struct GenericProto { + /// Legacy protocol to open with peers. Never modified. + legacy_protocol: RegisteredProtocol, + + /// Notification protocols. Entries are only ever added and not removed. + notif_protocols: Vec<(Cow<'static, [u8]>, Vec)>, /// Receiver for instructions about who to connect to or disconnect from. peerset: peerset::Peerset, @@ -79,7 +97,7 @@ pub struct LegacyProto< TSubstream> { next_incoming_index: peerset::IncomingIndex, /// Events to produce from `poll()`. - events: SmallVec<[NetworkBehaviourAction; 4]>, + events: SmallVec<[NetworkBehaviourAction; 4]>, /// Marker to pin the generics. marker: PhantomData, @@ -186,13 +204,11 @@ struct IncomingPeer { incoming_id: peerset::IncomingIndex, } -/// Event that can be emitted by the `LegacyProto`. +/// Event that can be emitted by the `GenericProto`. #[derive(Debug)] -pub enum LegacyProtoOut { +pub enum GenericProtoOut { /// Opened a custom protocol with the remote. CustomProtocolOpen { - /// Version of the protocol that has been opened. - version: u8, /// Id of the node we have opened a connection with. peer_id: PeerId, /// Endpoint used for this custom protocol. @@ -225,17 +241,18 @@ pub enum LegacyProtoOut { }, } -impl LegacyProto { +impl GenericProto { /// Creates a `CustomProtos`. pub fn new( protocol: impl Into, versions: &[u8], peerset: peerset::Peerset, ) -> Self { - let protocol = RegisteredProtocol::new(protocol, versions); + let legacy_protocol = RegisteredProtocol::new(protocol, versions); - LegacyProto { - protocol, + GenericProto { + legacy_protocol, + notif_protocols: Vec::new(), peerset, peers: FnvHashMap::default(), incoming: SmallVec::new(), @@ -245,6 +262,23 @@ impl LegacyProto { } } + /// Registers a new notifications protocol. + /// + /// You are very strongly encouraged to call this method very early on. Any connection open + /// will retain the protocols that were registered then, and not any new one. + pub fn register_notif_protocol( + &mut self, + proto_name: impl Into>, + handshake: impl Into> + ) { + self.notif_protocols.push((proto_name.into(), handshake.into())); + } + + /// Returns a list of all the notification protocols that have been registered. + pub fn notif_protocols_names(&self) -> impl ExactSizeIterator { + self.notif_protocols.iter().map(|(n, _)| &**n) + } + /// Returns the list of all the peers we have an open channel to. pub fn open_peers<'a>(&'a self) -> impl Iterator + 'a { self.peers.iter().filter(|(_, state)| state.is_open()).map(|(id, _)| id) @@ -296,7 +330,7 @@ impl LegacyProto { debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: peer_id.clone(), - event: CustomProtoHandlerIn::Disable, + event: NotifsHandlerIn::Disable, }); let banned_until = ban.map(|dur| Instant::now() + dur); *entry.into_mut() = PeerState::Disabled { open, connected_point, banned_until } @@ -317,7 +351,7 @@ impl LegacyProto { debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: peer_id.clone(), - event: CustomProtoHandlerIn::Disable, + event: NotifsHandlerIn::Disable, }); let banned_until = ban.map(|dur| Instant::now() + dur); *entry.into_mut() = PeerState::Disabled { open: false, connected_point, banned_until } @@ -349,18 +383,24 @@ impl LegacyProto { /// /// Also note that even we have a valid open substream, it may in fact be already closed /// without us knowing, in which case the packet will not be received. - pub fn send_packet(&mut self, target: &PeerId, message: Vec) { + pub fn send_packet( + &mut self, + target: &PeerId, + proto_name: Option>, + message: impl Into>, + ) { if !self.is_open(target) { return; } - trace!(target: "sub-libp2p", "External API => Packet for {:?}", target); + trace!(target: "sub-libp2p", "External API => Packet for {:?} with protocol {:?}", target, proto_name); trace!(target: "sub-libp2p", "Handler({:?}) <= Packet", target); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: target.clone(), - event: CustomProtoHandlerIn::SendCustomMessage { - message, - } + event: NotifsHandlerIn::Send { + message: message.into(), + proto_name: proto_name.map(Into::into), + }, }); } @@ -418,7 +458,7 @@ impl LegacyProto { debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: occ_entry.key().clone(), - event: CustomProtoHandlerIn::Enable, + event: NotifsHandlerIn::Enable, }); *occ_entry.into_mut() = PeerState::Enabled { connected_point, open }; }, @@ -436,7 +476,7 @@ impl LegacyProto { debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: occ_entry.key().clone(), - event: CustomProtoHandlerIn::Enable, + event: NotifsHandlerIn::Enable, }); *occ_entry.into_mut() = PeerState::Enabled { connected_point, open: false }; }, @@ -493,7 +533,7 @@ impl LegacyProto { debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", entry.key()); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: entry.key().clone(), - event: CustomProtoHandlerIn::Disable, + event: NotifsHandlerIn::Disable, }); *entry.into_mut() = PeerState::Disabled { open, connected_point, banned_until: None } }, @@ -557,7 +597,7 @@ impl LegacyProto { debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", incoming.peer_id); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: incoming.peer_id, - event: CustomProtoHandlerIn::Enable, + event: NotifsHandlerIn::Enable, }); *state = PeerState::Enabled { open: false, connected_point }; @@ -599,13 +639,13 @@ impl LegacyProto { debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", incoming.peer_id); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: incoming.peer_id, - event: CustomProtoHandlerIn::Disable, + event: NotifsHandlerIn::Disable, }); *state = PeerState::Disabled { open: false, connected_point, banned_until: None }; } } -impl DiscoveryNetBehaviour for LegacyProto { +impl DiscoveryNetBehaviour for GenericProto { fn add_discovered_nodes(&mut self, peer_ids: impl Iterator) { self.peerset.discovered(peer_ids.into_iter().map(|peer_id| { debug!(target: "sub-libp2p", "PSM <= Discovered({:?})", peer_id); @@ -614,15 +654,15 @@ impl DiscoveryNetBehaviour for LegacyProto { } } -impl NetworkBehaviour for LegacyProto +impl NetworkBehaviour for GenericProto where - TSubstream: AsyncRead + AsyncWrite, + TSubstream: AsyncRead + AsyncWrite + Send + 'static, { - type ProtocolsHandler = CustomProtoHandlerProto; - type OutEvent = LegacyProtoOut; + type ProtocolsHandler = NotifsHandlerProto; + type OutEvent = GenericProtoOut; fn new_handler(&mut self) -> Self::ProtocolsHandler { - CustomProtoHandlerProto::new(self.protocol.clone()) + NotifsHandlerProto::new(self.legacy_protocol.clone(), self.notif_protocols.clone()) } fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { @@ -639,7 +679,7 @@ where debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", peer_id); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: peer_id.clone(), - event: CustomProtoHandlerIn::Enable, + event: NotifsHandlerIn::Enable, }); *st = PeerState::Enabled { open: false, connected_point }; } @@ -682,7 +722,7 @@ where debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: peer_id.clone(), - event: CustomProtoHandlerIn::Disable, + event: NotifsHandlerIn::Disable, }); *st = PeerState::Disabled { open: false, connected_point, banned_until }; } @@ -712,7 +752,7 @@ where } if open { debug!(target: "sub-libp2p", "External API <= Closed({:?})", peer_id); - let event = LegacyProtoOut::CustomProtocolClosed { + let event = GenericProtoOut::CustomProtocolClosed { peer_id: peer_id.clone(), reason: "Disconnected by libp2p".into(), }; @@ -729,7 +769,7 @@ where self.peers.insert(peer_id.clone(), PeerState::Banned { until: timer_deadline }); if open { debug!(target: "sub-libp2p", "External API <= Closed({:?})", peer_id); - let event = LegacyProtoOut::CustomProtocolClosed { + let event = GenericProtoOut::CustomProtocolClosed { peer_id: peer_id.clone(), reason: "Disconnected by libp2p".into(), }; @@ -751,7 +791,7 @@ where if open { debug!(target: "sub-libp2p", "External API <= Closed({:?})", peer_id); - let event = LegacyProtoOut::CustomProtocolClosed { + let event = GenericProtoOut::CustomProtocolClosed { peer_id: peer_id.clone(), reason: "Disconnected by libp2p".into(), }; @@ -822,10 +862,10 @@ where fn inject_node_event( &mut self, source: PeerId, - event: CustomProtoHandlerOut, + event: NotifsHandlerOut, ) { match event { - CustomProtoHandlerOut::CustomProtocolClosed { reason } => { + NotifsHandlerOut::CustomProtocolClosed { reason } => { debug!(target: "sub-libp2p", "Handler({:?}) => Closed: {}", source, reason); let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) { @@ -836,7 +876,7 @@ where }; debug!(target: "sub-libp2p", "External API <= Closed({:?})", source); - let event = LegacyProtoOut::CustomProtocolClosed { + let event = GenericProtoOut::CustomProtocolClosed { reason, peer_id: source.clone(), }; @@ -852,7 +892,7 @@ where debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", source); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: source.clone(), - event: CustomProtoHandlerIn::Disable, + event: NotifsHandlerIn::Disable, }); *entry.into_mut() = PeerState::Disabled { @@ -878,8 +918,8 @@ where } } - CustomProtoHandlerOut::CustomProtocolOpen { version } => { - debug!(target: "sub-libp2p", "Handler({:?}) => Open: version {:?}", source, version); + NotifsHandlerOut::CustomProtocolOpen => { + debug!(target: "sub-libp2p", "Handler({:?}) => Open", source); let endpoint = match self.peers.get_mut(&source) { Some(PeerState::Enabled { ref mut open, ref connected_point }) | Some(PeerState::DisabledPendingEnable { ref mut open, ref connected_point, .. }) | @@ -894,8 +934,7 @@ where }; debug!(target: "sub-libp2p", "External API <= Open({:?})", source); - let event = LegacyProtoOut::CustomProtocolOpen { - version, + let event = GenericProtoOut::CustomProtocolOpen { peer_id: source, endpoint, }; @@ -903,11 +942,11 @@ where self.events.push(NetworkBehaviourAction::GenerateEvent(event)); } - CustomProtoHandlerOut::CustomMessage { message } => { + NotifsHandlerOut::CustomMessage { proto_name, message } => { debug_assert!(self.is_open(&source)); trace!(target: "sub-libp2p", "Handler({:?}) => Message", source); trace!(target: "sub-libp2p", "External API <= Message({:?})", source); - let event = LegacyProtoOut::CustomMessage { + let event = GenericProtoOut::CustomMessage { peer_id: source, message, }; @@ -915,25 +954,25 @@ where self.events.push(NetworkBehaviourAction::GenerateEvent(event)); } - CustomProtoHandlerOut::Clogged { messages } => { + NotifsHandlerOut::Clogged { messages } => { debug_assert!(self.is_open(&source)); trace!(target: "sub-libp2p", "Handler({:?}) => Clogged", source); trace!(target: "sub-libp2p", "External API <= Clogged({:?})", source); warn!(target: "sub-libp2p", "Queue of packets to send to {:?} is \ pretty large", source); - self.events.push(NetworkBehaviourAction::GenerateEvent(LegacyProtoOut::Clogged { + self.events.push(NetworkBehaviourAction::GenerateEvent(GenericProtoOut::Clogged { peer_id: source, messages, })); } // Don't do anything for non-severe errors except report them. - CustomProtoHandlerOut::ProtocolError { is_severe, ref error } if !is_severe => { + NotifsHandlerOut::ProtocolError { is_severe, ref error } if !is_severe => { debug!(target: "sub-libp2p", "Handler({:?}) => Benign protocol error: {:?}", source, error) } - CustomProtoHandlerOut::ProtocolError { error, .. } => { + NotifsHandlerOut::ProtocolError { error, .. } => { debug!(target: "sub-libp2p", "Handler({:?}) => Severe protocol error: {:?}", source, error); // A severe protocol error happens when we detect a "bad" node, such as a node on @@ -951,7 +990,7 @@ where _params: &mut impl PollParameters, ) -> Async< NetworkBehaviourAction< - CustomProtoHandlerIn, + NotifsHandlerIn, Self::OutEvent, >, > { @@ -1013,7 +1052,7 @@ where debug!(target: "sub-libp2p", "Handler({:?}) <= Enable now that ban has expired", peer_id); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: peer_id.clone(), - event: CustomProtoHandlerIn::Enable, + event: NotifsHandlerIn::Enable, }); *peer_state = PeerState::Enabled { connected_point, open }; } diff --git a/client/network/src/generic_proto/handler.rs b/client/network/src/generic_proto/handler.rs new file mode 100644 index 0000000000000..0061888f4e30b --- /dev/null +++ b/client/network/src/generic_proto/handler.rs @@ -0,0 +1,22 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +pub use self::group::{NotifsHandlerProto, NotifsHandler, NotifsHandlerIn, NotifsHandlerOut}; + +mod group; +mod legacy; +mod notif_in; +mod notif_out; diff --git a/client/network/src/generic_proto/handler/group.rs b/client/network/src/generic_proto/handler/group.rs new file mode 100644 index 0000000000000..7f5ed11b29a36 --- /dev/null +++ b/client/network/src/generic_proto/handler/group.rs @@ -0,0 +1,425 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::generic_proto::{ + handler::legacy::{LegacyProtoHandler, LegacyProtoHandlerProto, LegacyProtoHandlerIn, LegacyProtoHandlerOut}, + handler::notif_in::{NotifsInHandlerProto, NotifsInHandler, NotifsInHandlerIn, NotifsInHandlerOut}, + handler::notif_out::{NotifsOutHandlerProto, NotifsOutHandler, NotifsOutHandlerIn, NotifsOutHandlerOut}, + upgrade::{NotificationsIn, NotificationsOut, RegisteredProtocol, SelectUpgrade, UpgradeCollec}, +}; +use bytes::BytesMut; +use futures::prelude::*; +use libp2p::core::{ConnectedPoint, PeerId}; +use libp2p::core::either::{EitherError, EitherOutput}; +use libp2p::core::upgrade::{EitherUpgrade, UpgradeError, InboundUpgrade, OutboundUpgrade}; +use libp2p::swarm::{ + ProtocolsHandler, ProtocolsHandlerEvent, + IntoProtocolsHandler, + KeepAlive, + ProtocolsHandlerUpgrErr, + SubstreamProtocol, +}; +use log::error; +use std::{borrow::Cow, error, io}; +use tokio_io::{AsyncRead, AsyncWrite}; + +/// Implements the `IntoProtocolsHandler` trait of libp2p. +/// +/// Every time a connection with a remote starts, an instance of this struct is created and +/// sent to a background task dedicated to this connection. Once the connection is established, +/// it is turned into a [`NotifsHandler`]. +pub struct NotifsHandlerProto { + /// Prototypes for handlers for ingoing substreams. + in_handlers: Vec>, + + /// Prototypes for handlers for outgoing substreams. + out_handlers: Vec>, + + /// Prototype for handler for backwards-compatibility. + legacy: LegacyProtoHandlerProto, +} + +/// The actual handler once the connection has been established. +pub struct NotifsHandler { + /// Handlers for ingoing substreams. + in_handlers: Vec>, + + /// Handlers for outgoing substreams. + out_handlers: Vec>, + + /// Handler for backwards-compatibility. + legacy: LegacyProtoHandler, + + /// State of this handler. + enabled: EnabledState, + + /// If we receive inbound substream requests while in initialization mode, + /// we push the corresponding index here and process them when the handler + /// gets enabled/disabled. + pending_in: Vec, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +enum EnabledState { + Initial, + Enabled, + Disabled, +} + +impl IntoProtocolsHandler for NotifsHandlerProto +where + TSubstream: AsyncRead + AsyncWrite + Send + 'static, +{ + type Handler = NotifsHandler; + + fn inbound_protocol(&self) -> SelectUpgrade, RegisteredProtocol> { + let in_handlers = self.in_handlers.iter() + .map(|h| h.inbound_protocol()) + .collect::>(); + + SelectUpgrade::new(in_handlers, self.legacy.inbound_protocol()) + } + + fn into_handler(self, remote_peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { + NotifsHandler { + in_handlers: self.in_handlers + .into_iter() + .map(|p| p.into_handler(remote_peer_id, connected_point)) + .collect(), + out_handlers: self.out_handlers + .into_iter() + .map(|p| p.into_handler(remote_peer_id, connected_point)) + .collect(), + legacy: self.legacy.into_handler(remote_peer_id, connected_point), + enabled: EnabledState::Initial, + pending_in: Vec::new(), + } + } +} + +/// Event that can be received by a `NotifsHandler`. +#[derive(Debug)] +pub enum NotifsHandlerIn { + /// The node should start using custom protocols. + Enable, + + /// The node should stop using custom protocols. + Disable, + + /// Sends a message through a custom protocol substream. + Send { + /// Name of the protocol for the message, or `None` to force the legacy protocol. + /// + /// If `Some`, must match one of the registered protocols. For backwards-compatibility + /// reasons, if the remote doesn't support this protocol, we use the legacy substream. + proto_name: Option>, + + /// The message to send. + message: Vec, + }, +} + +/// Event that can be emitted by a `NotifsHandler`. +#[derive(Debug)] +pub enum NotifsHandlerOut { + /// Opened a custom protocol with the remote. + CustomProtocolOpen, + + /// Closed a custom protocol with the remote. + CustomProtocolClosed { + /// Reason why the substream closed, for diagnostic purposes. + reason: Cow<'static, str>, + }, + + /// Receives a message on a custom protocol substream. + CustomMessage { + /// Engine corresponding to the message, or `None` if this came from the legacy substream. + proto_name: Option>, + + /// Message that has been received. + /// + /// If `proto_name` is `None`, this decodes to a `Message`. If `proto_name` is `Some`, + /// this directly decodes to a gossiping message. + message: BytesMut, + }, + + /// A substream to the remote is clogged. The send buffer is very large, and we should print + /// a diagnostic message and/or avoid sending more data. + Clogged { + /// Copy of the messages that are within the buffer, for further diagnostic. + messages: Vec>, + }, + + /// An error has happened on the protocol level with this node. + ProtocolError { + /// If true the error is severe, such as a protocol violation. + is_severe: bool, + /// The error that happened. + error: Box, + }, +} + +impl NotifsHandlerProto { + pub fn new(legacy: RegisteredProtocol, list: impl Into, Vec)>>) -> Self { + let list = list.into(); + + NotifsHandlerProto { + in_handlers: list.clone().into_iter().map(|(p, _)| NotifsInHandlerProto::new(p)).collect(), + out_handlers: list.clone().into_iter().map(|(p, _)| NotifsOutHandlerProto::new(p)).collect(), + legacy: LegacyProtoHandlerProto::new(legacy), + } + } +} + +impl ProtocolsHandler for NotifsHandler +where TSubstream: AsyncRead + AsyncWrite + Send + 'static { + type InEvent = NotifsHandlerIn; + type OutEvent = NotifsHandlerOut; + type Substream = TSubstream; + type Error = EitherError< + EitherError< + as ProtocolsHandler>::Error, + as ProtocolsHandler>::Error, + >, + as ProtocolsHandler>::Error, + >; + type InboundProtocol = SelectUpgrade, RegisteredProtocol>; + type OutboundProtocol = EitherUpgrade; + type OutboundOpenInfo = Option; // Index within the `out_handlers`; None for legacy + + fn listen_protocol(&self) -> SubstreamProtocol { + let in_handlers = self.in_handlers.iter() + .map(|h| h.listen_protocol().into_upgrade().1) + .collect::>(); + + let proto = SelectUpgrade::new(in_handlers, self.legacy.listen_protocol().into_upgrade().1); + SubstreamProtocol::new(proto) + } + + fn inject_fully_negotiated_inbound( + &mut self, + out: >::Output + ) { + match out { + EitherOutput::First((out, num)) => + self.in_handlers[num].inject_fully_negotiated_inbound(out), + EitherOutput::Second(out) => + self.legacy.inject_fully_negotiated_inbound(out), + } + } + + fn inject_fully_negotiated_outbound( + &mut self, + out: >::Output, + num: Self::OutboundOpenInfo + ) { + match (out, num) { + (EitherOutput::First(out), Some(num)) => + self.out_handlers[num].inject_fully_negotiated_outbound(out, ()), + (EitherOutput::Second(out), None) => + self.legacy.inject_fully_negotiated_outbound(out, ()), + _ => error!("inject_fully_negotiated_outbound called with wrong parameters"), + } + } + + fn inject_event(&mut self, message: NotifsHandlerIn) { + match message { + NotifsHandlerIn::Enable => { + self.enabled = EnabledState::Enabled; + self.legacy.inject_event(LegacyProtoHandlerIn::Enable); + for handler in &mut self.out_handlers { + handler.inject_event(NotifsOutHandlerIn::Enable); + } + for num in self.pending_in.drain(..) { + self.in_handlers[num].inject_event(NotifsInHandlerIn::Accept(vec![1])); // TODO: message + } + }, + NotifsHandlerIn::Disable => { + self.enabled = EnabledState::Disabled; + self.legacy.inject_event(LegacyProtoHandlerIn::Disable); + for handler in &mut self.out_handlers { + handler.inject_event(NotifsOutHandlerIn::Disable); + } + for num in self.pending_in.drain(..) { + self.in_handlers[num].inject_event(NotifsInHandlerIn::Refuse); + } + }, + NotifsHandlerIn::Send { proto_name, message } => { + if let Some(proto_name) = proto_name { + for handler in &mut self.out_handlers { + if handler.is_open() && handler.protocol_name() == &proto_name[..] { + handler.inject_event(NotifsOutHandlerIn::Send(message)); + return; + } + } + } + + self.legacy.inject_event(LegacyProtoHandlerIn::SendCustomMessage { message }); + }, + } + } + + fn inject_dial_upgrade_error( + &mut self, + num: Option, + err: ProtocolsHandlerUpgrErr> + ) { + log::error!("Dial upgrade error: {:?}", err); + match (err, num) { + (ProtocolsHandlerUpgrErr::Timeout, Some(num)) => + self.out_handlers[num].inject_dial_upgrade_error( + (), + ProtocolsHandlerUpgrErr::Timeout + ), + (ProtocolsHandlerUpgrErr::Timeout, None) => + self.legacy.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout), + (ProtocolsHandlerUpgrErr::Timer, Some(num)) => + self.out_handlers[num].inject_dial_upgrade_error( + (), + ProtocolsHandlerUpgrErr::Timer + ), + (ProtocolsHandlerUpgrErr::Timer, None) => + self.legacy.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timer), + (ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), Some(num)) => + self.out_handlers[num].inject_dial_upgrade_error( + (), + ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) + ), + (ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), None) => + self.legacy.inject_dial_upgrade_error( + (), + ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) + ), + (ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::A(err))), Some(num)) => + self.out_handlers[num].inject_dial_upgrade_error( + (), + ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)) + ), + (ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::B(err))), None) => + self.legacy.inject_dial_upgrade_error( + (), + ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)) + ), + _ => error!("inject_dial_upgrade_error called with bad parameters"), + } + } + + fn connection_keep_alive(&self) -> KeepAlive { + // Iterate over each handler and return the maximum value. + + let mut ret = self.legacy.connection_keep_alive(); + if ret.is_yes() { + return KeepAlive::Yes; + } + + for handler in &self.in_handlers { + let val = handler.connection_keep_alive(); + if val.is_yes() { + return KeepAlive::Yes; + } + if ret < val { ret = val; } + } + + for handler in &self.out_handlers { + let val = handler.connection_keep_alive(); + if val.is_yes() { + return KeepAlive::Yes; + } + if ret < val { ret = val; } + } + + ret + } + + fn poll( + &mut self, + ) -> Poll< + ProtocolsHandlerEvent, + Self::Error, + > { + for (handler_num, handler) in self.in_handlers.iter_mut().enumerate() { + if let Async::Ready(ev) = handler.poll().map_err(|e| EitherError::A(EitherError::A(e)))? { + match ev { + ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: () } => + error!("Incoming substream handler tried to open a substream"), + ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest) => + match self.enabled { + EnabledState::Initial => self.pending_in.push(handler_num), + EnabledState::Enabled => + handler.inject_event(NotifsInHandlerIn::Accept(vec![1, 2, 3, 4])), // TODO: message + EnabledState::Disabled => + handler.inject_event(NotifsInHandlerIn::Refuse), + }, + ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed) => {}, + ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(message)) => { + let msg = NotifsHandlerOut::CustomMessage { + message, + proto_name: Some(handler.protocol_name().to_owned().into()), + }; + return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(msg))); + }, + } + } + } + + for (handler_num, handler) in self.out_handlers.iter_mut().enumerate() { + if let Async::Ready(ev) = handler.poll().map_err(|e| EitherError::A(EitherError::B(e)))? { + match ev { + ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: () } => + return Ok(Async::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: protocol.map_upgrade(EitherUpgrade::A), + info: Some(handler_num), + })), + ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { handshake }) => {}, // TODO: + ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed) => {}, // TODO: + ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Refused) => {}, // TODO: + } + } + } + + if let Async::Ready(ev) = self.legacy.poll().map_err(EitherError::B)? { + match ev { + ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: () } => + return Ok(Async::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: protocol.map_upgrade(EitherUpgrade::B), + info: None, + })), + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolOpen { .. }) => + return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CustomProtocolOpen + ))), + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolClosed { reason }) => + return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CustomProtocolClosed { reason } + ))), + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomMessage { message }) => + return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CustomMessage { message, proto_name: None } + ))), + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::Clogged { messages }) => + return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::Clogged { messages } + ))), + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::ProtocolError { is_severe, error }) => + return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::ProtocolError { is_severe, error } + ))), + } + } + + Ok(Async::NotReady) + } +} diff --git a/client/network/src/legacy_proto/handler.rs b/client/network/src/generic_proto/handler/legacy.rs similarity index 91% rename from client/network/src/legacy_proto/handler.rs rename to client/network/src/generic_proto/handler/legacy.rs index 7bdbe4a31ff7c..d6181ca52b622 100644 --- a/client/network/src/legacy_proto/handler.rs +++ b/client/network/src/generic_proto/handler/legacy.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use crate::legacy_proto::upgrade::{RegisteredProtocol, RegisteredProtocolEvent, RegisteredProtocolSubstream}; +use crate::generic_proto::upgrade::{RegisteredProtocol, RegisteredProtocolEvent, RegisteredProtocolSubstream}; use bytes::BytesMut; use futures::prelude::*; use futures03::{compat::Compat, TryFutureExt as _}; @@ -37,7 +37,7 @@ use tokio_io::{AsyncRead, AsyncWrite}; /// /// Every time a connection with a remote starts, an instance of this struct is created and /// sent to a background task dedicated to this connection. Once the connection is established, -/// it is turned into a `CustomProtoHandler`. It then handles all communications that are specific +/// it is turned into a `LegacyProtoHandler`. It then handles all communications that are specific /// to Substrate on that single connection. /// /// Note that there can be multiple instance of this struct simultaneously for same peer. However @@ -87,7 +87,7 @@ use tokio_io::{AsyncRead, AsyncWrite}; /// We consider that we are now "closed" if the remote closes all the existing substreams. /// Re-opening it can then be performed by closing all active substream and re-opening one. /// -pub struct CustomProtoHandlerProto { +pub struct LegacyProtoHandlerProto { /// Configuration for the protocol upgrade to negotiate. protocol: RegisteredProtocol, @@ -95,31 +95,28 @@ pub struct CustomProtoHandlerProto { marker: PhantomData, } -impl CustomProtoHandlerProto -where - TSubstream: AsyncRead + AsyncWrite, -{ - /// Builds a new `CustomProtoHandlerProto`. +impl LegacyProtoHandlerProto { + /// Builds a new `LegacyProtoHandlerProto`. pub fn new(protocol: RegisteredProtocol) -> Self { - CustomProtoHandlerProto { + LegacyProtoHandlerProto { protocol, marker: PhantomData, } } } -impl IntoProtocolsHandler for CustomProtoHandlerProto +impl IntoProtocolsHandler for LegacyProtoHandlerProto where TSubstream: AsyncRead + AsyncWrite, { - type Handler = CustomProtoHandler; + type Handler = LegacyProtoHandler; fn inbound_protocol(&self) -> RegisteredProtocol { self.protocol.clone() } fn into_handler(self, remote_peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { - CustomProtoHandler { + LegacyProtoHandler { protocol: self.protocol, endpoint: connected_point.to_endpoint(), remote_peer_id: remote_peer_id.clone(), @@ -133,7 +130,7 @@ where } /// The actual handler once the connection has been established. -pub struct CustomProtoHandler { +pub struct LegacyProtoHandler { /// Configuration for the protocol upgrade to negotiate. protocol: RegisteredProtocol, @@ -152,7 +149,7 @@ pub struct CustomProtoHandler { /// /// This queue must only ever be modified to insert elements at the back, or remove the first /// element. - events_queue: SmallVec<[ProtocolsHandlerEvent; 16]>, + events_queue: SmallVec<[ProtocolsHandlerEvent; 16]>, } /// State of the handler. @@ -205,9 +202,9 @@ enum ProtocolState { Poisoned, } -/// Event that can be received by a `CustomProtoHandler`. +/// Event that can be received by a `LegacyProtoHandler`. #[derive(Debug)] -pub enum CustomProtoHandlerIn { +pub enum LegacyProtoHandlerIn { /// The node should start using custom protocols. Enable, @@ -221,9 +218,9 @@ pub enum CustomProtoHandlerIn { }, } -/// Event that can be emitted by a `CustomProtoHandler`. +/// Event that can be emitted by a `LegacyProtoHandler`. #[derive(Debug)] -pub enum CustomProtoHandlerOut { +pub enum LegacyProtoHandlerOut { /// Opened a custom protocol with the remote. CustomProtocolOpen { /// Version of the protocol that has been opened. @@ -258,7 +255,7 @@ pub enum CustomProtoHandlerOut { }, } -impl CustomProtoHandler +impl LegacyProtoHandler where TSubstream: AsyncRead + AsyncWrite, { @@ -284,7 +281,7 @@ where } } else { - let event = CustomProtoHandlerOut::CustomProtocolOpen { + let event = LegacyProtoHandlerOut::CustomProtocolOpen { version: incoming[0].protocol_version() }; self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); @@ -338,7 +335,7 @@ where /// Polls the state for events. Optionally returns an event to produce. #[must_use] fn poll_state(&mut self) - -> Option> { + -> Option> { match mem::replace(&mut self.state, ProtocolState::Poisoned) { ProtocolState::Poisoned => { error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", @@ -366,7 +363,7 @@ where match deadline.poll() { Ok(Async::Ready(())) => { deadline = Delay::new(Duration::from_secs(60)).compat(); - let event = CustomProtoHandlerOut::ProtocolError { + let event = LegacyProtoHandlerOut::ProtocolError { is_severe: true, error: "Timeout when opening protocol".to_string().into(), }; @@ -392,7 +389,7 @@ where match substream.poll() { Ok(Async::NotReady) => substreams.push(substream), Ok(Async::Ready(Some(RegisteredProtocolEvent::Message(message)))) => { - let event = CustomProtoHandlerOut::CustomMessage { + let event = LegacyProtoHandlerOut::CustomMessage { message }; substreams.push(substream); @@ -400,7 +397,7 @@ where return Some(ProtocolsHandlerEvent::Custom(event)); }, Ok(Async::Ready(Some(RegisteredProtocolEvent::Clogged { messages }))) => { - let event = CustomProtoHandlerOut::Clogged { + let event = LegacyProtoHandlerOut::Clogged { messages, }; substreams.push(substream); @@ -410,7 +407,7 @@ where Ok(Async::Ready(None)) => { shutdown.push(substream); if substreams.is_empty() { - let event = CustomProtoHandlerOut::CustomProtocolClosed { + let event = LegacyProtoHandlerOut::CustomProtocolClosed { reason: "All substreams have been closed by the remote".into(), }; self.state = ProtocolState::Disabled { @@ -422,7 +419,7 @@ where } Err(err) => { if substreams.is_empty() { - let event = CustomProtoHandlerOut::CustomProtocolClosed { + let event = LegacyProtoHandlerOut::CustomProtocolClosed { reason: format!("Error on the last substream: {:?}", err).into(), }; self.state = ProtocolState::Disabled { @@ -486,7 +483,7 @@ where } ProtocolState::Opening { .. } => { - let event = CustomProtoHandlerOut::CustomProtocolOpen { + let event = LegacyProtoHandlerOut::CustomProtocolOpen { version: substream.protocol_version() }; self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); @@ -523,10 +520,10 @@ where } } -impl ProtocolsHandler for CustomProtoHandler +impl ProtocolsHandler for LegacyProtoHandler where TSubstream: AsyncRead + AsyncWrite { - type InEvent = CustomProtoHandlerIn; - type OutEvent = CustomProtoHandlerOut; + type InEvent = LegacyProtoHandlerIn; + type OutEvent = LegacyProtoHandlerOut; type Substream = TSubstream; type Error = ConnectionKillError; type InboundProtocol = RegisteredProtocol; @@ -552,11 +549,11 @@ where TSubstream: AsyncRead + AsyncWrite { self.inject_fully_negotiated(proto); } - fn inject_event(&mut self, message: CustomProtoHandlerIn) { + fn inject_event(&mut self, message: LegacyProtoHandlerIn) { match message { - CustomProtoHandlerIn::Disable => self.disable(), - CustomProtoHandlerIn::Enable => self.enable(), - CustomProtoHandlerIn::SendCustomMessage { message } => + LegacyProtoHandlerIn::Disable => self.disable(), + LegacyProtoHandlerIn::Enable => self.enable(), + LegacyProtoHandlerIn::SendCustomMessage { message } => self.send_message(message), } } @@ -568,7 +565,7 @@ where TSubstream: AsyncRead + AsyncWrite { _ => false, }; - self.events_queue.push(ProtocolsHandlerEvent::Custom(CustomProtoHandlerOut::ProtocolError { + self.events_queue.push(ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::ProtocolError { is_severe, error: Box::new(err), })); @@ -609,12 +606,12 @@ where TSubstream: AsyncRead + AsyncWrite { } } -impl fmt::Debug for CustomProtoHandler +impl fmt::Debug for LegacyProtoHandler where TSubstream: AsyncRead + AsyncWrite, { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - f.debug_struct("CustomProtoHandler") + f.debug_struct("LegacyProtoHandler") .finish() } } diff --git a/client/network/src/generic_proto/handler/notif_in.rs b/client/network/src/generic_proto/handler/notif_in.rs new file mode 100644 index 0000000000000..8c696257cf3fb --- /dev/null +++ b/client/network/src/generic_proto/handler/notif_in.rs @@ -0,0 +1,277 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::generic_proto::upgrade::{NotificationsIn, NotificationsInSubstream}; +use bytes::BytesMut; +use futures::prelude::*; +use libp2p::core::{ConnectedPoint, PeerId, Endpoint}; +use libp2p::core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade}; +use libp2p::swarm::{ + ProtocolsHandler, ProtocolsHandlerEvent, + IntoProtocolsHandler, + KeepAlive, + ProtocolsHandlerUpgrErr, + SubstreamProtocol, +}; +use log::{error, warn}; +use smallvec::SmallVec; +use std::{borrow::Cow, error, fmt, marker::PhantomData}; +use tokio_io::{AsyncRead, AsyncWrite}; + +/// Implements the `IntoProtocolsHandler` trait of libp2p. +/// +/// Every time a connection with a remote starts, an instance of this struct is created and +/// sent to a background task dedicated to this connection. Once the connection is established, +/// it is turned into a [`NotifsInHandler`]. +pub struct NotifsInHandlerProto { + /// Configuration for the protocol upgrade to negotiate. + in_protocol: NotificationsIn, + + /// Marker to pin the generic type. + marker: PhantomData, +} + +impl NotifsInHandlerProto { + /// Builds a new `NotifsInHandlerProto`. + pub fn new( + proto_name: impl Into> + ) -> Self { + NotifsInHandlerProto { + in_protocol: NotificationsIn::new(proto_name), + marker: PhantomData, + } + } +} + +impl IntoProtocolsHandler for NotifsInHandlerProto +where + TSubstream: AsyncRead + AsyncWrite + 'static, +{ + type Handler = NotifsInHandler; + + fn inbound_protocol(&self) -> NotificationsIn { + self.in_protocol.clone() + } + + fn into_handler(self, remote_peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { + NotifsInHandler { + in_protocol: self.in_protocol, + substream: None, + pending_accept_refuses: 0, + endpoint: connected_point.to_endpoint(), + remote_peer_id: remote_peer_id.clone(), + events_queue: SmallVec::new(), + } + } +} + +/// The actual handler once the connection has been established. +pub struct NotifsInHandler { + /// Configuration for the protocol upgrade to negotiate for inbound substreams. + in_protocol: NotificationsIn, + + /// Identifier of the node we're talking to. Used only for logging purposes and shouldn't have + /// any influence on the behaviour. + // TODO: remove? + remote_peer_id: PeerId, + + /// Whether we are the connection dialer or listener. Used only for logging purposes and + /// shouldn't have any influence on the behaviour. + // TODO: remove? + endpoint: Endpoint, + + /// Substream that is open with the remote. + substream: Option>, + + /// If the substream is opened and closed rapidly, we can emit several `OpenRequest` messages + /// without the handler having time to respond with `Accept` or `Refuse`. Every time an + /// `OpenRequest` is emitted, we increment this variable in order to keep the state consistent. + pending_accept_refuses: usize, + + /// Queue of events to send to the outside. + /// + /// This queue must only ever be modified to insert elements at the back, or remove the first + /// element. + events_queue: SmallVec<[ProtocolsHandlerEvent; 16]>, +} + +/// Event that can be received by a `NotifsInHandler`. +#[derive(Debug)] +pub enum NotifsInHandlerIn { + /// Can be sent back as a response to an `OpenRequest`. Contains the status message to send + /// to the remote. + /// + /// The substream is now considered open, and `Notif` events can be received. + Accept(Vec), + + /// Can be sent back as a response to an `OpenRequest`. + Refuse, +} + +/// Event that can be emitted by a `NotifsInHandler`. +#[derive(Debug)] +pub enum NotifsInHandlerOut { + /// The remote wants to open a substream. + /// + /// Every time this event is emitted, a corresponding `Accepted` or `Refused` **must** be sent + /// back. + OpenRequest, + + /// The notifications substream has been closed by the remote. In order to avoid race + /// conditions, this does **not** cancel any previously-sent `OpenRequest`. + Closed, + + /// Received a message on the notifications substream. + /// + /// Can only happen after an `Accept` and before a `Closed`. + Notif(BytesMut), +} + +impl NotifsInHandler { + /// Returns the name of the protocol that we accept. + pub fn protocol_name(&self) -> &[u8] { + self.in_protocol.protocol_name() + } +} + +impl ProtocolsHandler for NotifsInHandler +where TSubstream: AsyncRead + AsyncWrite + 'static { + type InEvent = NotifsInHandlerIn; + type OutEvent = NotifsInHandlerOut; + type Substream = TSubstream; + type Error = ConnectionKillError; + type InboundProtocol = NotificationsIn; + type OutboundProtocol = DeniedUpgrade; + type OutboundOpenInfo = (); + + fn listen_protocol(&self) -> SubstreamProtocol { + SubstreamProtocol::new(self.in_protocol.clone()) + } + + fn inject_fully_negotiated_inbound( + &mut self, + proto: >::Output + ) { + if self.substream.is_some() { + warn!(target: "sub-libp2p", "Received duplicate inbound substream"); + return; + } + + self.substream = Some(proto); + self.events_queue.push(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest)); + self.pending_accept_refuses += 1; + } + + fn inject_fully_negotiated_outbound( + &mut self, + out: >::Output, + _: Self::OutboundOpenInfo + ) { + // We never emit any outgoing substream. + void::unreachable(out) + } + + fn inject_event(&mut self, message: NotifsInHandlerIn) { + self.pending_accept_refuses = match self.pending_accept_refuses.checked_sub(1) { + Some(v) => v, + None => { + error!(target: "sub-libp2p", "Inconsistent state: received Accept/Refuse when no \ + pending request exists"); + return; + } + }; + + // If we send multiple `OpenRequest`s in a row, we will receive back multiple + // `Accept`/`Refuse` messages. All of them are obsolete except the last one. + if self.pending_accept_refuses != 0 { + return; + } + + match (message, self.substream.as_mut()) { + (NotifsInHandlerIn::Accept(message), Some(sub)) => sub.send_handshake(message), + (NotifsInHandlerIn::Accept(_), None) => {}, + (NotifsInHandlerIn::Refuse, _) => self.substream = None, + } + } + + fn inject_dial_upgrade_error(&mut self, _: (), err: ProtocolsHandlerUpgrErr) { + unimplemented!() // TODO: + /*let is_severe = match err { + ProtocolsHandlerUpgrErr::Upgrade(_) => true, + _ => false, + }; + + self.events_queue.push(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::ProtocolError { + is_severe, + error: Box::new(err), + }));*/ + } + + fn connection_keep_alive(&self) -> KeepAlive { + if self.substream.is_some() { + KeepAlive::Yes + } else { + KeepAlive::No + } + } + + fn poll( + &mut self, + ) -> Poll< + ProtocolsHandlerEvent, + Self::Error, + > { + // Flush the events queue if necessary. + if !self.events_queue.is_empty() { + let event = self.events_queue.remove(0); + return Ok(Async::Ready(event)) + } + + if let Some(substream) = self.substream.as_mut() { + match substream.poll() { + Ok(Async::Ready(Some(msg))) => + return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(msg)))), + Ok(Async::NotReady) => {}, + Ok(Async::Ready(None)) | Err(_) => return Err(ConnectionKillError), // TODO: ? + } + } + + Ok(Async::NotReady) + } +} + +impl fmt::Debug for NotifsInHandler +where + TSubstream: AsyncRead + AsyncWrite, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + f.debug_struct("NotifsInHandler") + .finish() + } +} + +// TODO: remove +#[derive(Debug)] +pub struct ConnectionKillError; + +impl error::Error for ConnectionKillError { +} + +impl fmt::Display for ConnectionKillError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + unimplemented!() // TODO: + } +} diff --git a/client/network/src/generic_proto/handler/notif_out.rs b/client/network/src/generic_proto/handler/notif_out.rs new file mode 100644 index 0000000000000..4f968eecd43de --- /dev/null +++ b/client/network/src/generic_proto/handler/notif_out.rs @@ -0,0 +1,323 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::generic_proto::upgrade::{NotificationsOut, NotificationsOutSubstream}; +use bytes::BytesMut; +use futures::prelude::*; +use libp2p::core::{ConnectedPoint, PeerId, Endpoint}; +use libp2p::core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade}; +use libp2p::swarm::{ + ProtocolsHandler, ProtocolsHandlerEvent, + IntoProtocolsHandler, + KeepAlive, + ProtocolsHandlerUpgrErr, + SubstreamProtocol, +}; +use log::error; +use smallvec::SmallVec; +use std::{borrow::Cow, fmt, io, marker::PhantomData, mem, time::Duration}; +use tokio_io::{AsyncRead, AsyncWrite}; + +/// Implements the `IntoProtocolsHandler` trait of libp2p. +/// +/// Every time a connection with a remote starts, an instance of this struct is created and +/// sent to a background task dedicated to this connection. Once the connection is established, +/// it is turned into a [`NotifsOutHandler`]. +/// +/// See the documentation of [`NotifsOutHandler`] for more information. +pub struct NotifsOutHandlerProto { + /// Name of the protocol to negotiate. + proto_name: Cow<'static, [u8]>, + + /// Marker to pin the generic type. + marker: PhantomData, +} + +impl NotifsOutHandlerProto { + /// Builds a new [`NotifsOutHandlerProto`]. Will use the given protocol name for the + /// notifications substream. + pub fn new(proto_name: impl Into>) -> Self { + NotifsOutHandlerProto { + proto_name: proto_name.into(), + marker: PhantomData, + } + } +} + +impl IntoProtocolsHandler for NotifsOutHandlerProto +where + TSubstream: AsyncRead + AsyncWrite + Send + 'static, +{ + type Handler = NotifsOutHandler; + + fn inbound_protocol(&self) -> DeniedUpgrade { + DeniedUpgrade + } + + fn into_handler(self, remote_peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { + NotifsOutHandler { + proto_name: self.proto_name, + endpoint: connected_point.to_endpoint(), + remote_peer_id: remote_peer_id.clone(), + state: State::Disabled, + events_queue: SmallVec::new(), + } + } +} + +/// Handler for an outbound notification substream. +/// +/// When a connection is established, this handler starts in the "disabled" state, meaning that +/// no substream will be open. +/// +/// One can try open a substream by sending an [`NotifsOutHandlerIn::Enable`] message to the +/// handler. Once done, the handler will try to establish then maintain an outbound substream with +/// the remote for the purpose of sending notifications to it. +pub struct NotifsOutHandler { + /// Name of the protocol to negotiate. + proto_name: Cow<'static, [u8]>, + + /// Identifier of the node we're talking to. Used only for logging purposes and shouldn't have + /// any influence on the behaviour. + // TODO: remove? + remote_peer_id: PeerId, + + /// Whether we are the connection dialer or listener. Used only for logging purposes and + /// shouldn't have any influence on the behaviour. + // TODO: remove? + endpoint: Endpoint, + + /// Relationship with the node we're connected to. + state: State, + + /// Queue of events to send to the outside. + /// + /// This queue must only ever be modified to insert elements at the back, or remove the first + /// element. + events_queue: SmallVec<[ProtocolsHandlerEvent; 16]>, +} + +/// Our relationship with the node we're connected to. +enum State { + /// The handler is disabled and idle. No substream is open. + Disabled, + + /// The handler is disabled. A substream is open and needs to be closed. + // TODO: needed? + DisabledOpen(NotificationsOutSubstream), + + /// The handler is disabled but we are still trying to open a substream with the remote. + /// + /// If the handler gets enabled again, we can immediately switch to `Opening`. + DisabledOpening, + + /// The handler is enabled and we are trying to open a substream with the remote. + Opening, + + /// The handler is enabled. We have tried opening a substream in the past but the remote + /// refused it. + Refused, + + /// The handler is enabled and substream is open. + Open(NotificationsOutSubstream), + + /// Poisoned state. Shouldn't be found in the wild. + Poisoned, +} + +/// Event that can be received by a `NotifsOutHandler`. +#[derive(Debug)] +pub enum NotifsOutHandlerIn { + /// Enables the notifications substream for this node. The handler will try to maintain a + /// substream with the remote. + Enable, + + /// Disables the notifications substream for this node. This is the default state. + Disable, + + /// Sends a message on the notifications substream. Ignored if the substream isn't open. + /// + /// It is only valid to send this if the handler has been enabled. + // TODO: is ignoring the correct way to do this? + Send(Vec), +} + +/// Event that can be emitted by a `NotifsOutHandler`. +#[derive(Debug)] +pub enum NotifsOutHandlerOut { + /// The notifications substream has been accepted by the remote. + Open { + /// Handshake message sent by the remote after we opened the substream. + handshake: BytesMut, + }, + + /// The notifications substream has been closed by the remote. + Closed, + + /// We tried to open a notifications substream, but the remote refused it. + /// + /// The handler is still enabled and will try again in a few seconds. + Refused, +} + +impl NotifsOutHandler { + /// Returns true if the substream is open. + pub fn is_open(&self) -> bool { + match &self.state { + State::Disabled => false, + State::DisabledOpening => false, + State::DisabledOpen(_) => true, + State::Opening => false, + State::Refused => false, + State::Open(_) => true, + State::Poisoned => false, + } + } + + /// Returns the name of the protocol that we negotiate. + pub fn protocol_name(&self) -> &[u8] { + &self.proto_name + } +} + +impl ProtocolsHandler for NotifsOutHandler +where TSubstream: AsyncRead + AsyncWrite + Send + 'static { + type InEvent = NotifsOutHandlerIn; + type OutEvent = NotifsOutHandlerOut; + type Substream = TSubstream; + type Error = void::Void; + type InboundProtocol = DeniedUpgrade; + type OutboundProtocol = NotificationsOut; + type OutboundOpenInfo = (); + + fn listen_protocol(&self) -> SubstreamProtocol { + SubstreamProtocol::new(DeniedUpgrade) + } + + fn inject_fully_negotiated_inbound( + &mut self, + proto: >::Output + ) { + // We should never reach here. `proto` is a `Void`. + void::unreachable(proto) + } + + fn inject_fully_negotiated_outbound( + &mut self, + (handshake_msg, sub): >::Output, + _: () + ) { + match mem::replace(&mut self.state, State::Poisoned) { + State::Opening => { + let ev = NotifsOutHandlerOut::Open { handshake: handshake_msg }; + self.events_queue.push(ProtocolsHandlerEvent::Custom(ev)); + self.state = State::Open(sub); + }, + // If the handler was disabled while we were negotiating the protocol, immediately + // close it. + State::DisabledOpening => self.state = State::Disabled, + State::Disabled | State::Refused | State::Open(_) | State::DisabledOpen(_) => + error!("State mismatch in notifications handler: substream already open"), + State::Poisoned => error!("Notifications handler in a poisoned state"), + } + } + + fn inject_event(&mut self, message: NotifsOutHandlerIn) { + match message { + NotifsOutHandlerIn::Enable => { + match mem::replace(&mut self.state, State::Poisoned) { + State::Disabled => { + self.events_queue.push(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(NotificationsOut::new(self.proto_name.clone())) + .with_timeout(Duration::from_secs(10)), // TODO: proper timeout config + info: (), + }); + self.state = State::Opening; + }, + State::DisabledOpening => self.state = State::Opening, + State::DisabledOpen(sub) => self.state = State::Open(sub), + State::Opening | State::Refused | State::Open(_) => + error!("Tried to enable notifications handler that was already enabled"), + State::Poisoned => error!("Notifications handler in a poisoned state"), + } + }, + NotifsOutHandlerIn::Disable => { + match mem::replace(&mut self.state, State::Poisoned) { + State::Disabled | State::DisabledOpening => + error!("Tried to disable notifications handler that was already disabled"), + State::DisabledOpen(sub) => self.state = State::Open(sub), + State::Opening => self.state = State::DisabledOpening, + State::Refused => self.state = State::Disabled, + State::Open(sub) => self.state = State::DisabledOpen(sub), + State::Poisoned => error!("Notifications handler in a poisoned state"), + } + }, + NotifsOutHandlerIn::Send(msg) => + if let State::Open(sub) = &mut self.state { + sub.push_message(msg); + }, + } + } + + fn inject_dial_upgrade_error(&mut self, _: (), err: ProtocolsHandlerUpgrErr) { + match mem::replace(&mut self.state, State::Poisoned) { + State::Disabled => {}, + State::DisabledOpen(_) | State::Refused | State::Open(_) => + error!("State mismatch in NotificationsOut"), + State::Opening => self.state = State::Refused, + State::DisabledOpening => self.state = State::Disabled, + State::Poisoned => error!("Notifications handler in a poisoned state"), + } + } + + fn connection_keep_alive(&self) -> KeepAlive { + KeepAlive::Yes // TODO: depends on state + } + + fn poll( + &mut self, + ) -> Poll< + ProtocolsHandlerEvent, + Self::Error, + > { + // Flush the events queue if necessary. + if !self.events_queue.is_empty() { + let event = self.events_queue.remove(0); + return Ok(Async::Ready(event)) + } + + match &mut self.state { + State::Open(sub) | State::DisabledOpen(sub) => match sub.process() { + Ok(()) => {}, + Err(err) => {}, // TODO: ? + }, + _ => {} + } + + Ok(Async::NotReady) + } +} + +impl fmt::Debug for NotifsOutHandler +where + TSubstream: AsyncRead + AsyncWrite, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + f.debug_struct("NotifsOutHandler") + .field("open", &self.is_open()) + .finish() + } +} diff --git a/client/network/src/legacy_proto/tests.rs b/client/network/src/generic_proto/tests.rs similarity index 91% rename from client/network/src/legacy_proto/tests.rs rename to client/network/src/generic_proto/tests.rs index dc6d40eb040d7..9b23cc89459b9 100644 --- a/client/network/src/legacy_proto/tests.rs +++ b/client/network/src/generic_proto/tests.rs @@ -26,7 +26,7 @@ use libp2p::{PeerId, Multiaddr, Transport}; use rand::seq::SliceRandom; use std::{io, time::Duration, time::Instant}; use crate::message::Message; -use crate::legacy_proto::{LegacyProto, LegacyProtoOut}; +use crate::generic_proto::{GenericProto, GenericProtoOut}; use test_client::runtime::Block; /// Builds two nodes that have each other as bootstrap nodes. @@ -85,7 +85,7 @@ fn build_nodes() }); let behaviour = CustomProtoWithAddr { - inner: LegacyProto::new(&b"test"[..], &[1], peerset), + inner: GenericProto::new(&b"test"[..], &[1], peerset), addrs: addrs .iter() .enumerate() @@ -115,12 +115,12 @@ fn build_nodes() /// Wraps around the `CustomBehaviour` network behaviour, and adds hardcoded node addresses to it. struct CustomProtoWithAddr { - inner: LegacyProto>, + inner: GenericProto>, addrs: Vec<(PeerId, Multiaddr)>, } impl std::ops::Deref for CustomProtoWithAddr { - type Target = LegacyProto>; + type Target = GenericProto>; fn deref(&self) -> &Self::Target { &self.inner @@ -135,8 +135,8 @@ impl std::ops::DerefMut for CustomProtoWithAddr { impl NetworkBehaviour for CustomProtoWithAddr { type ProtocolsHandler = - > as NetworkBehaviour>::ProtocolsHandler; - type OutEvent = > as NetworkBehaviour>::OutEvent; + > as NetworkBehaviour>::ProtocolsHandler; + type OutEvent = > as NetworkBehaviour>::OutEvent; fn new_handler(&mut self) -> Self::ProtocolsHandler { self.inner.new_handler() @@ -219,7 +219,7 @@ fn two_nodes_transfer_lots_of_packets() { let fut1 = future::poll_fn(move || -> io::Result<_> { loop { match try_ready!(service1.poll()) { - Some(LegacyProtoOut::CustomProtocolOpen { peer_id, .. }) => { + Some(GenericProtoOut::CustomProtocolOpen { peer_id, .. }) => { for n in 0 .. NUM_PACKETS { service1.send_packet( &peer_id, @@ -236,8 +236,8 @@ fn two_nodes_transfer_lots_of_packets() { let fut2 = future::poll_fn(move || -> io::Result<_> { loop { match try_ready!(service2.poll()) { - Some(LegacyProtoOut::CustomProtocolOpen { .. }) => {}, - Some(LegacyProtoOut::CustomMessage { message, .. }) => { + Some(GenericProtoOut::CustomProtocolOpen { .. }) => {}, + Some(GenericProtoOut::CustomMessage { message, .. }) => { match Message::::decode(&mut &message[..]).unwrap() { Message::::ChainSpecific(message) => { assert_eq!(message.len(), 1); @@ -280,7 +280,7 @@ fn basic_two_nodes_requests_in_parallel() { let fut1 = future::poll_fn(move || -> io::Result<_> { loop { match try_ready!(service1.poll()) { - Some(LegacyProtoOut::CustomProtocolOpen { peer_id, .. }) => { + Some(GenericProtoOut::CustomProtocolOpen { peer_id, .. }) => { for msg in to_send.drain(..) { service1.send_packet(&peer_id, msg.encode()); } @@ -293,8 +293,8 @@ fn basic_two_nodes_requests_in_parallel() { let fut2 = future::poll_fn(move || -> io::Result<_> { loop { match try_ready!(service2.poll()) { - Some(LegacyProtoOut::CustomProtocolOpen { .. }) => {}, - Some(LegacyProtoOut::CustomMessage { message, .. }) => { + Some(GenericProtoOut::CustomProtocolOpen { .. }) => {}, + Some(GenericProtoOut::CustomMessage { message, .. }) => { let pos = to_receive.iter().position(|m| m.encode() == message).unwrap(); to_receive.remove(pos); if to_receive.is_empty() { @@ -332,7 +332,7 @@ fn reconnect_after_disconnect() { let mut service1_not_ready = false; match service1.poll().unwrap() { - Async::Ready(Some(LegacyProtoOut::CustomProtocolOpen { .. })) => { + Async::Ready(Some(GenericProtoOut::CustomProtocolOpen { .. })) => { match service1_state { ServiceState::NotConnected => { service1_state = ServiceState::FirstConnec; @@ -344,7 +344,7 @@ fn reconnect_after_disconnect() { ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), } }, - Async::Ready(Some(LegacyProtoOut::CustomProtocolClosed { .. })) => { + Async::Ready(Some(GenericProtoOut::CustomProtocolClosed { .. })) => { match service1_state { ServiceState::FirstConnec => service1_state = ServiceState::Disconnected, ServiceState::ConnectedAgain| ServiceState::NotConnected | @@ -356,7 +356,7 @@ fn reconnect_after_disconnect() { } match service2.poll().unwrap() { - Async::Ready(Some(LegacyProtoOut::CustomProtocolOpen { .. })) => { + Async::Ready(Some(GenericProtoOut::CustomProtocolOpen { .. })) => { match service2_state { ServiceState::NotConnected => { service2_state = ServiceState::FirstConnec; @@ -368,7 +368,7 @@ fn reconnect_after_disconnect() { ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), } }, - Async::Ready(Some(LegacyProtoOut::CustomProtocolClosed { .. })) => { + Async::Ready(Some(GenericProtoOut::CustomProtocolClosed { .. })) => { match service2_state { ServiceState::FirstConnec => service2_state = ServiceState::Disconnected, ServiceState::ConnectedAgain| ServiceState::NotConnected | diff --git a/client/network/src/generic_proto/upgrade.rs b/client/network/src/generic_proto/upgrade.rs new file mode 100644 index 0000000000000..147a924b2fdd4 --- /dev/null +++ b/client/network/src/generic_proto/upgrade.rs @@ -0,0 +1,35 @@ +// Copyright 2018-2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +pub use self::collec::UpgradeCollec; +pub use self::legacy::{ + RegisteredProtocol, + RegisteredProtocolEvent, + RegisteredProtocolName, + RegisteredProtocolSubstream +}; +pub use self::notifications::{ + NotificationsIn, + NotificationsInSubstream, + NotificationsOut, + NotificationsOutSubstream +}; +pub use self::select::SelectUpgrade; + +mod collec; +mod legacy; +mod notifications; +mod select; diff --git a/client/network/src/generic_proto/upgrade/collec.rs b/client/network/src/generic_proto/upgrade/collec.rs new file mode 100644 index 0000000000000..dffedacf44886 --- /dev/null +++ b/client/network/src/generic_proto/upgrade/collec.rs @@ -0,0 +1,99 @@ +// Copyright 2018 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use futures::prelude::*; +use libp2p::core::{ + upgrade::{InboundUpgrade, ProtocolName, UpgradeInfo}, + Negotiated +}; +use std::{iter::FromIterator, vec}; + +// TODO: move this to libp2p + +/// Upgrade that combines multiple upgrades of the same type into one. Supports all the protocols +/// supported by either sub-upgrade. +#[derive(Debug, Clone)] +pub struct UpgradeCollec(pub Vec); + +impl From> for UpgradeCollec { + fn from(list: Vec) -> Self { + UpgradeCollec(list) + } +} + +impl FromIterator for UpgradeCollec { + fn from_iter>(iter: I) -> Self { + UpgradeCollec(iter.into_iter().collect()) + } +} + +impl UpgradeInfo for UpgradeCollec { + type Info = ProtoNameWithUsize; + type InfoIter = vec::IntoIter; + + fn protocol_info(&self) -> Self::InfoIter { + self.0.iter().enumerate() + .flat_map(|(n, p)| + p.protocol_info().into_iter().map(move |i| ProtoNameWithUsize(i, n))) + .collect::>() + .into_iter() + } +} + +impl InboundUpgrade for UpgradeCollec +where + T: InboundUpgrade, +{ + type Output = (T::Output, usize); + type Error = (T::Error, usize); + type Future = FutWithUsize; + + fn upgrade_inbound(mut self, sock: Negotiated, info: Self::Info) -> Self::Future { + let fut = self.0.remove(info.1).upgrade_inbound(sock, info.0); + FutWithUsize(fut, info.1) + } +} + +/// Groups a `ProtocolName` with a `usize`. +#[derive(Debug, Clone)] +pub struct ProtoNameWithUsize(T, usize); + +impl ProtocolName for ProtoNameWithUsize { + fn protocol_name(&self) -> &[u8] { + self.0.protocol_name() + } +} + +/// Equivalent to `fut.map(|v| (v, num))`, where `fut` and `num` are the two fields of this +/// struct. +pub struct FutWithUsize(T, usize); + +impl Future for FutWithUsize { + type Item = (T::Item, usize); + type Error = (T::Error, usize); + + fn poll(&mut self) -> Poll { + match self.0.poll() { + Ok(Async::NotReady) => Ok(Async::NotReady), + Ok(Async::Ready(v)) => Ok(Async::Ready((v, self.1))), + Err(err) => Err((err, self.1)), + } + } +} diff --git a/client/network/src/legacy_proto/upgrade.rs b/client/network/src/generic_proto/upgrade/legacy.rs similarity index 100% rename from client/network/src/legacy_proto/upgrade.rs rename to client/network/src/generic_proto/upgrade/legacy.rs diff --git a/client/network/src/generic_proto/upgrade/notifications.rs b/client/network/src/generic_proto/upgrade/notifications.rs new file mode 100644 index 0000000000000..0331d751509bb --- /dev/null +++ b/client/network/src/generic_proto/upgrade/notifications.rs @@ -0,0 +1,266 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +/// Notifications protocol. +/// +/// The Substrate notifications protocol consists in the following: +/// +/// - Node A opens a substream to node B. +/// - If node B accepts the substream, it sends back a message which contains some +/// protocol-specific higher-level logic. This message is prefixed with a variable-length +/// integer message length. This message can be empty, in which case `0` is sent. Afterwards, +/// the sending side of B is closed. +/// - If instead the node refuses the connection (which typically happens because no empty slot +/// is available), then it immediately closes the substream after the multistream-select +/// negotiation. +/// - Node A can then send notifications to B, prefixed with a variable-length integer indicating +/// the length of the message. +/// - Node A closes its writing side if it doesn't want the notifications substream anymore. +/// +/// Notification substreams are unidirectional. If A opens a substream with B, then B is +/// encouraged but not required to open a substream to A as well. +/// + +use bytes::BytesMut; +use futures::prelude::*; +use libp2p::core::{Negotiated, UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade}; +use libp2p::tokio_codec::Framed; +use log::error; +use std::{borrow::Cow, collections::VecDeque, io, iter, mem}; +use tokio_io::{AsyncRead, AsyncWrite}; +use unsigned_varint::codec::UviBytes; + +/// Upgrade that accepts a substream, sends back a status message, then becomes a unidirectional +/// stream of messages. +#[derive(Debug, Clone)] +pub struct NotificationsIn { + /// Protocol name to use when negotiating the substream. + protocol_name: Cow<'static, [u8]>, +} + +/// Upgrade that opens a substream, waits for the remote to accept by sending back a status +/// message, then becomes a unidirectional sink of data. +#[derive(Debug, Clone)] +pub struct NotificationsOut { + /// Protocol name to use when negotiating the substream. + protocol_name: Cow<'static, [u8]>, +} + +/// A substream for incoming notification messages. +/// +/// When creating, this struct starts in a state in which we must first send back a handshake +/// message to the remote. No message will come before this has been done. +pub struct NotificationsInSubstream { + socket: Framed, UviBytes>>, + handshake: NotificationsInSubstreamHandshake, +} + +/// State of the handshake sending back process. +enum NotificationsInSubstreamHandshake { + /// Waiting for the user to give us the handshake message. + NotSent, + /// User gave us the handshake message. Trying to push it in the socket. + PendingSend(Vec), + /// Handshake message was pushed in the socket. Still need to flush. + Close, + /// Handshake message successfully sent. + Sent, +} + +/// A substream for outgoing notification messages. +pub struct NotificationsOutSubstream { + /// Substream where to send messages. + socket: Framed, UviBytes>>, + /// Queue of messages waiting to be sent. + messages_queue: VecDeque>, + /// If true, we need to flush `socket`. + need_flush: bool, +} + +impl NotificationsIn { + /// Builds a new potential upgrade. + pub fn new(proto_name: impl Into>) -> Self { + NotificationsIn { + protocol_name: proto_name.into(), + } + } + + /// Returns the name of the protocol that we accept. + pub fn protocol_name(&self) -> &[u8] { + &self.protocol_name + } +} + +impl UpgradeInfo for NotificationsIn { + type Info = Cow<'static, [u8]>; + type InfoIter = iter::Once; + + fn protocol_info(&self) -> Self::InfoIter { + iter::once(self.protocol_name.clone()) + } +} + +impl InboundUpgrade for NotificationsIn +where TSubstream: AsyncRead + AsyncWrite + 'static, +{ + type Output = NotificationsInSubstream; + type Future = futures::future::FutureResult; + type Error = upgrade::ReadOneError; + + fn upgrade_inbound( + self, + socket: Negotiated, + _: Self::Info, + ) -> Self::Future { + futures::future::ok(NotificationsInSubstream { + socket: Framed::new(socket, UviBytes::default()), + handshake: NotificationsInSubstreamHandshake::NotSent, + }) + } +} + +impl NotificationsInSubstream +where TSubstream: AsyncRead + AsyncWrite + 'static, +{ + /// Sends the handshake in order to inform the remote that we accept the substream. + // TODO: doesn't seem to work if `message` is empty + pub fn send_handshake(&mut self, message: impl Into>) { + match self.handshake { + NotificationsInSubstreamHandshake::NotSent => {} + _ => { + error!(target: "sub-libp2p", "Tried to send handshake twice"); + return; + } + } + + self.handshake = NotificationsInSubstreamHandshake::PendingSend(message.into()); + } +} + +impl Stream for NotificationsInSubstream +where TSubstream: AsyncRead + AsyncWrite + 'static, +{ + type Item = BytesMut; + type Error = io::Error; + + fn poll(&mut self) -> Poll, Self::Error> { + // This `Stream` implementation first tries to send back the handshake if necessary. + loop { + match mem::replace(&mut self.handshake, NotificationsInSubstreamHandshake::Sent) { + NotificationsInSubstreamHandshake::Sent => + return self.socket.poll(), + NotificationsInSubstreamHandshake::NotSent => + return Ok(Async::NotReady), + NotificationsInSubstreamHandshake::PendingSend(msg) => + match self.socket.start_send(msg)? { + AsyncSink::Ready => + self.handshake = NotificationsInSubstreamHandshake::Close, + AsyncSink::NotReady(msg) => + self.handshake = NotificationsInSubstreamHandshake::PendingSend(msg), + }, + NotificationsInSubstreamHandshake::Close => + match self.socket.poll_complete()? { // TODO: close() + Async::Ready(()) => + self.handshake = NotificationsInSubstreamHandshake::Sent, + Async::NotReady => + self.handshake = NotificationsInSubstreamHandshake::Close, + }, + } + } + } +} + +impl NotificationsOut { + /// Builds a new potential upgrade. + pub fn new(proto_name: impl Into>) -> Self { + NotificationsOut { + protocol_name: proto_name.into(), + } + } +} + +impl UpgradeInfo for NotificationsOut { + type Info = Cow<'static, [u8]>; + type InfoIter = iter::Once; + + fn protocol_info(&self) -> Self::InfoIter { + iter::once(self.protocol_name.clone()) + } +} + +impl OutboundUpgrade for NotificationsOut +where TSubstream: AsyncRead + AsyncWrite + Send + 'static, +{ + type Output = (BytesMut, NotificationsOutSubstream); + type Future = Box + Send>; + type Error = io::Error; + + fn upgrade_outbound( + self, + socket: Negotiated, + proto_name: Self::Info, + ) -> Self::Future { + Box::new(Framed::new(socket, UviBytes::default()) + .into_future() + .map_err(|(err, _)| err) + .and_then(|(handshake, socket)| { + if let Some(handshake) = handshake { + let sub = NotificationsOutSubstream { + socket, + messages_queue: VecDeque::new(), + need_flush: false, + }; + Ok((handshake, sub)) + } else { + Err(io::Error::from(io::ErrorKind::UnexpectedEof)) + } + })) + } +} + +impl NotificationsOutSubstream +where TSubstream: AsyncRead + AsyncWrite + 'static, +{ + /// Pushes a message to the queue of messages. + pub fn push_message(&mut self, message: Vec) { + // TODO: limit the size of the queue + self.messages_queue.push_back(message); + } + + /// Processes the substream. Must be called within the context of a task. + pub fn process(&mut self) -> Result<(), io::Error> { + while let Some(msg) = self.messages_queue.pop_front() { + match self.socket.start_send(msg) { + Err(err) => return Err(err), + Ok(AsyncSink::Ready) => self.need_flush = true, + Ok(AsyncSink::NotReady(msg)) => { + self.messages_queue.push_front(msg); + return Ok(()); + } + } + } + + if self.need_flush { + match self.socket.poll_complete() { + Err(err) => return Err(err), + Ok(Async::Ready(())) => self.need_flush = false, + Ok(Async::NotReady) => {}, + } + } + + Ok(()) + } +} diff --git a/client/network/src/generic_proto/upgrade/select.rs b/client/network/src/generic_proto/upgrade/select.rs new file mode 100644 index 0000000000000..6677e557f094b --- /dev/null +++ b/client/network/src/generic_proto/upgrade/select.rs @@ -0,0 +1,120 @@ +// Copyright 2018 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use libp2p::core::{ + either::{EitherOutput, EitherError, EitherFuture2, EitherName}, + upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}, + Negotiated +}; + +/// Upgrade that combines two upgrades into one. Supports all the protocols supported by either +/// sub-upgrade. +/// +/// The protocols supported by the first element have a higher priority. +#[derive(Debug, Clone)] +pub struct SelectUpgrade(pub A, pub B); + +impl SelectUpgrade { + /// Combines two upgrades into an `SelectUpgrade`. + /// + /// The protocols supported by the first element have a higher priority. + pub fn new(a: A, b: B) -> Self { + SelectUpgrade(a, b) + } +} + +impl UpgradeInfo for SelectUpgrade +where + A: UpgradeInfo, + B: UpgradeInfo +{ + type Info = EitherName; + type InfoIter = InfoIterChain< + ::IntoIter, + ::IntoIter + >; + + fn protocol_info(&self) -> Self::InfoIter { + InfoIterChain(self.0.protocol_info().into_iter(), self.1.protocol_info().into_iter()) + } +} + +impl InboundUpgrade for SelectUpgrade +where + A: InboundUpgrade, + B: InboundUpgrade, +{ + type Output = EitherOutput; + type Error = EitherError; + type Future = EitherFuture2; + + fn upgrade_inbound(self, sock: Negotiated, info: Self::Info) -> Self::Future { + match info { + EitherName::A(info) => EitherFuture2::A(self.0.upgrade_inbound(sock, info)), + EitherName::B(info) => EitherFuture2::B(self.1.upgrade_inbound(sock, info)) + } + } +} + +impl OutboundUpgrade for SelectUpgrade +where + A: OutboundUpgrade, + B: OutboundUpgrade, +{ + type Output = EitherOutput; + type Error = EitherError; + type Future = EitherFuture2; + + fn upgrade_outbound(self, sock: Negotiated, info: Self::Info) -> Self::Future { + match info { + EitherName::A(info) => EitherFuture2::A(self.0.upgrade_outbound(sock, info)), + EitherName::B(info) => EitherFuture2::B(self.1.upgrade_outbound(sock, info)) + } + } +} + +/// Iterator that combines the protocol names of twp upgrades. +#[derive(Debug, Clone)] +pub struct InfoIterChain(A, B); + +impl Iterator for InfoIterChain +where + A: Iterator, + B: Iterator +{ + type Item = EitherName; + + fn next(&mut self) -> Option { + if let Some(info) = self.0.next() { + return Some(EitherName::A(info)) + } + if let Some(info) = self.1.next() { + return Some(EitherName::B(info)) + } + None + } + + fn size_hint(&self) -> (usize, Option) { + let (min1, max1) = self.0.size_hint(); + let (min2, max2) = self.1.size_hint(); + let max = max1.and_then(move |m1| max2.and_then(move |m2| m1.checked_add(m2))); + (min1.saturating_add(min2), max) + } +} diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index d0977d90c9005..a4f52896ab3a9 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -170,9 +170,9 @@ mod behaviour; mod chain; -mod legacy_proto; mod debug_info; mod discovery; +mod generic_proto; mod on_demand_layer; mod protocol; mod service; @@ -189,7 +189,7 @@ pub use service::{ NetworkService, NetworkWorker, TransactionPool, ExHashT, ReportHandle, NetworkStateInfo, }; -pub use protocol::{PeerInfo, Context, consensus_gossip, message, specialization}; +pub use protocol::{PeerInfo, Context, message, specialization}; pub use protocol::event::{Event, DhtEvent}; pub use protocol::sync::SyncState; pub use libp2p::{Multiaddr, PeerId}; diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 780a071505a65..2a3df2f234462 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -15,7 +15,7 @@ // along with Substrate. If not, see . use crate::{DiscoveryNetBehaviour, config::ProtocolId}; -use crate::legacy_proto::{LegacyProto, LegacyProtoOut}; +use crate::generic_proto::{GenericProto, GenericProtoOut}; use bytes::BytesMut; use futures::prelude::*; use futures03::{StreamExt as _, TryStreamExt as _}; @@ -37,7 +37,6 @@ use sr_primitives::traits::{ use sr_arithmetic::traits::SaturatedConversion; use message::{BlockAnnounce, BlockAttributes, Direction, FromBlock, Message, RequestId}; use message::generic::{Message as GenericMessage, ConsensusMessage}; -use consensus_gossip::{ConsensusGossip, MessageRecipient as GossipMessageRecipient}; use light_dispatch::{LightDispatch, LightDispatchNetwork, RequestData}; use specialization::NetworkSpecialization; use sync::{ChainSync, SyncState}; @@ -47,7 +46,7 @@ use rustc_hex::ToHex; use std::collections::{BTreeMap, HashMap}; use std::sync::Arc; use std::fmt::Write; -use std::{cmp, num::NonZeroUsize, time}; +use std::{borrow::Cow, cmp, num::NonZeroUsize, time}; use log::{trace, debug, warn, error}; use crate::chain::{Client, FinalityProofProvider}; use client_api::{FetchChecker, ChangesProof, StorageProof}; @@ -55,7 +54,6 @@ use crate::error; use util::LruHashSet; mod util; -pub mod consensus_gossip; pub mod message; pub mod event; pub mod light_dispatch; @@ -117,17 +115,18 @@ pub struct Protocol, H: ExHashT> { genesis_hash: B::Hash, sync: ChainSync, specialization: S, - consensus_gossip: ConsensusGossip, context_data: ContextData, // Connected peers pending Status message. handshaking_peers: HashMap, + /// For each legacy gossiping engine ID, the corresponding new protocol name. + protocol_name_by_engine: HashMap>, /// Used to report reputation changes. peerset_handle: peerset::PeersetHandle, transaction_pool: Arc>, /// When asked for a proof of finality, we use this struct to build one. finality_proof_provider: Option>>, /// Handles opening the unique substream and sending and receiving raw messages. - behaviour: LegacyProto>, + behaviour: GenericProto>, } #[derive(Default)] @@ -174,7 +173,7 @@ pub struct PeerInfo { } struct LightDispatchIn<'a> { - behaviour: &'a mut LegacyProto>, + behaviour: &'a mut GenericProto>, peerset: peerset::PeersetHandle, } @@ -193,7 +192,7 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { block, }); - self.behaviour.send_packet(who, message.encode()) + self.behaviour.send_packet(who, None, message.encode()) } fn send_read_request( @@ -209,7 +208,7 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { keys, }); - self.behaviour.send_packet(who, message.encode()) + self.behaviour.send_packet(who, None, message.encode()) } fn send_read_child_request( @@ -227,7 +226,7 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { keys, }); - self.behaviour.send_packet(who, message.encode()) + self.behaviour.send_packet(who, None, message.encode()) } fn send_call_request( @@ -245,7 +244,7 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { data, }); - self.behaviour.send_packet(who, message.encode()) + self.behaviour.send_packet(who, None, message.encode()) } fn send_changes_request( @@ -269,7 +268,7 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { key, }); - self.behaviour.send_packet(who, message.encode()) + self.behaviour.send_packet(who, None, message.encode()) } fn send_body_request( @@ -291,7 +290,7 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { max, }); - self.behaviour.send_packet(who, message.encode()) + self.behaviour.send_packet(who, None, message.encode()) } } @@ -313,7 +312,7 @@ pub trait Context { /// Protocol context. struct ProtocolContext<'a, B: 'a + BlockT, H: 'a + ExHashT> { - behaviour: &'a mut LegacyProto>, + behaviour: &'a mut GenericProto>, context_data: &'a mut ContextData, peerset_handle: &'a peerset::PeersetHandle, } @@ -321,7 +320,7 @@ struct ProtocolContext<'a, B: 'a + BlockT, H: 'a + ExHashT> { impl<'a, B: BlockT + 'a, H: 'a + ExHashT> ProtocolContext<'a, B, H> { fn new( context_data: &'a mut ContextData, - behaviour: &'a mut LegacyProto>, + behaviour: &'a mut GenericProto>, peerset_handle: &'a peerset::PeersetHandle, ) -> Self { ProtocolContext { context_data, peerset_handle, behaviour } @@ -427,7 +426,7 @@ impl, H: ExHashT> Protocol { ); let (peerset, peerset_handle) = peerset::Peerset::from_config(peerset_config); let versions = &((MIN_VERSION as u8)..=(CURRENT_VERSION as u8)).collect::>(); - let behaviour = LegacyProto::new(protocol_id, versions, peerset); + let behaviour = GenericProto::new(protocol_id, versions, peerset); let protocol = Protocol { tick_timeout: Box::new(futures_timer::Interval::new(TICK_TIMEOUT).map(|v| Ok::<_, ()>(v)).compat()), @@ -442,7 +441,7 @@ impl, H: ExHashT> Protocol { genesis_hash: info.chain.genesis_hash, sync, specialization, - consensus_gossip: ConsensusGossip::new(), + protocol_name_by_engine: HashMap::new(), handshaking_peers: HashMap::new(), transaction_pool, finality_proof_provider, @@ -625,18 +624,35 @@ impl, H: ExHashT> Protocol { GenericMessage::RemoteReadChildRequest(request) => self.on_remote_read_child_request(who, request), GenericMessage::Consensus(msg) => { - self.consensus_gossip.on_incoming( - &mut ProtocolContext::new(&mut self.context_data, &mut self.behaviour, &self.peerset_handle), - who, - vec![msg], - ); + let outcome = if let Some(proto_name) = self.protocol_name_by_engine.get(&msg.engine_id) { + // TODO: what if not open? check if open? + CustomMessageOutcome::NotifMessages { + remote: who.clone(), + messages: vec![(proto_name.clone(), msg.data.clone())], + } + } else { + CustomMessageOutcome::None + }; + + return outcome; } GenericMessage::ConsensusBatch(messages) => { - self.consensus_gossip.on_incoming( - &mut ProtocolContext::new(&mut self.context_data, &mut self.behaviour, &self.peerset_handle), - who, - messages, - ); + let outcome_messages = messages + .iter() + .filter_map(|msg| { + if let Some(proto_name) = self.protocol_name_by_engine.get(&msg.engine_id) { + // TODO: what if not open? check if open? + Some((proto_name.clone(), msg.data.clone())) + } else { + None + } + }) + .collect::>(); + + return CustomMessageOutcome::NotifMessages { + remote: who, + messages: outcome_messages, + }; } GenericMessage::ChainSpecific(msg) => self.specialization.on_message( &mut ProtocolContext::new(&mut self.context_data, &mut self.behaviour, &self.peerset_handle), @@ -667,14 +683,6 @@ impl, H: ExHashT> Protocol { ); } - /// Locks `self` and returns a context plus the `ConsensusGossip` struct. - pub fn consensus_gossip_lock<'a>( - &'a mut self, - ) -> (impl Context + 'a, &'a mut ConsensusGossip) { - let context = ProtocolContext::new(&mut self.context_data, &mut self.behaviour, &self.peerset_handle); - (context, &mut self.consensus_gossip) - } - /// Locks `self` and returns a context plus the network specialization. pub fn specialization_lock<'a>( &'a mut self, @@ -683,26 +691,6 @@ impl, H: ExHashT> Protocol { (context, &mut self.specialization) } - /// Gossip a consensus message to the network. - pub fn gossip_consensus_message( - &mut self, - topic: B::Hash, - engine_id: ConsensusEngineId, - message: Vec, - recipient: GossipMessageRecipient, - ) { - let mut context = ProtocolContext::new(&mut self.context_data, &mut self.behaviour, &self.peerset_handle); - let message = ConsensusMessage { data: message, engine_id }; - match recipient { - GossipMessageRecipient::BroadcastToAll => - self.consensus_gossip.multicast(&mut context, topic, message, true), - GossipMessageRecipient::BroadcastNew => - self.consensus_gossip.multicast(&mut context, topic, message, false), - GossipMessageRecipient::Peer(who) => - self.send_message(&who, GenericMessage::Consensus(message)), - } - } - /// Called when a new peer is connected pub fn on_peer_connected(&mut self, who: PeerId) { trace!(target: "sync", "Connecting {}", who); @@ -720,9 +708,6 @@ impl, H: ExHashT> Protocol { }; if let Some(peer_data) = removed { let mut context = ProtocolContext::new(&mut self.context_data, &mut self.behaviour, &self.peerset_handle); - if peer_data.info.protocol_version > 2 { - self.consensus_gossip.peer_disconnected(&mut context, peer.clone()); - } self.sync.peer_disconnected(peer.clone()); self.specialization.on_disconnect(&mut context, peer.clone()); self.light_dispatch.on_disconnect(LightDispatchIn { @@ -885,9 +870,6 @@ impl, H: ExHashT> Protocol { /// /// > **Note**: This method normally doesn't have to be called except for testing purposes. pub fn tick(&mut self) { - self.consensus_gossip.tick( - &mut ProtocolContext::new(&mut self.context_data, &mut self.behaviour, &self.peerset_handle) - ); self.maintain_peers(); self.light_dispatch.maintain_peers(LightDispatchIn { behaviour: &mut self.behaviour, @@ -1025,9 +1007,6 @@ impl, H: ExHashT> Protocol { } } let mut context = ProtocolContext::new(&mut self.context_data, &mut self.behaviour, &self.peerset_handle); - if protocol_version > 2 { - self.consensus_gossip.new_peer(&mut context, who.clone(), status.roles); - } self.specialization.on_connect(&mut context, who, status); } @@ -1067,6 +1046,36 @@ impl, H: ExHashT> Protocol { } } + /// Send a notification to the given peer we're connected to. + /// + /// Doesn't do anything if we're not connected to that peer. + pub fn write_notif( + &mut self, + target: PeerId, + proto_name: impl Into>, + message: impl Into> + ) { + self.behaviour.send_packet(&target, Some(proto_name.into()), message) + } + + /// Registers a new notifications protocol. + /// + /// You are very strongly encouraged to call this method very early on. Any connection open + /// will retain the protocols that were registered then, and not any new one. + pub fn register_notif_protocol( + &mut self, + proto_name: impl Into>, + engine_id: ConsensusEngineId, + handshake: impl Into>, + ) { + let proto_name = proto_name.into(); + if self.protocol_name_by_engine.insert(engine_id, proto_name.clone()).is_some() { + error!("Notifications protocol already registered: {:?}", proto_name); + } else { + self.behaviour.register_notif_protocol(proto_name, handshake); + } + } + /// Call when we must propagate ready extrinsics to peers. pub fn propagate_extrinsics( &mut self, @@ -1700,11 +1709,16 @@ pub enum CustomMessageOutcome { BlockImport(BlockOrigin, Vec>), JustificationImport(Origin, B::Hash, NumberFor, Justification), FinalityProofImport(Origin, B::Hash, NumberFor, Vec), + /// Notif protocols have been opened with the remote. + NotifOpened { remote: PeerId, proto_names: Vec> }, + /// Notif protocols have been closed with the remote. + NotifClosed { remote: PeerId, proto_names: Vec> }, + NotifMessages { remote: PeerId, messages: Vec<(Cow<'static, [u8]>, Vec)> }, None, } fn send_request( - behaviour: &mut LegacyProto>, + behaviour: &mut GenericProto>, stats: &mut HashMap<&'static str, PacketStats>, peers: &mut HashMap>, who: &PeerId, @@ -1725,7 +1739,7 @@ fn send_request( } fn send_message( - behaviour: &mut LegacyProto>, + behaviour: &mut GenericProto>, stats: &mut HashMap<&'static str, PacketStats>, who: &PeerId, message: Message, @@ -1734,12 +1748,12 @@ fn send_message( let mut stats = stats.entry(message.id()).or_default(); stats.bytes_out += encoded.len() as u64; stats.count_out += 1; - behaviour.send_packet(who, encoded); + behaviour.send_packet(who, None, encoded); } impl, H: ExHashT> NetworkBehaviour for Protocol { - type ProtocolsHandler = > as NetworkBehaviour>::ProtocolsHandler; + type ProtocolsHandler = > as NetworkBehaviour>::ProtocolsHandler; type OutEvent = CustomMessageOutcome; fn new_handler(&mut self) -> Self::ProtocolsHandler { @@ -1824,21 +1838,28 @@ Protocol { }; let outcome = match event { - LegacyProtoOut::CustomProtocolOpen { peer_id, version, .. } => { - debug_assert!( - version <= CURRENT_VERSION as u8 - && version >= MIN_VERSION as u8 - ); - self.on_peer_connected(peer_id); - CustomMessageOutcome::None + GenericProtoOut::CustomProtocolOpen { peer_id, .. } => { + self.on_peer_connected(peer_id.clone()); + CustomMessageOutcome::NotifOpened { + remote: peer_id, + proto_names: self.behaviour.notif_protocols_names() + .map(|n| From::from(n.to_owned())) + .collect(), + } } - LegacyProtoOut::CustomProtocolClosed { peer_id, .. } => { - self.on_peer_disconnected(peer_id); - CustomMessageOutcome::None + GenericProtoOut::CustomProtocolClosed { peer_id, .. } => { + self.on_peer_disconnected(peer_id.clone()); + CustomMessageOutcome::NotifClosed { + remote: peer_id, + proto_names: self.behaviour.notif_protocols_names() + .map(|n| From::from(n.to_owned())) + .collect(), + } }, - LegacyProtoOut::CustomMessage { peer_id, message } => + GenericProtoOut::CustomMessage { peer_id, message } => + // TODO: NotifMessages self.on_custom_message(peer_id, message), - LegacyProtoOut::Clogged { peer_id, messages } => { + GenericProtoOut::Clogged { peer_id, messages } => { debug!(target: "sync", "{} clogging messages:", messages.len()); for msg in messages.into_iter().take(5) { let message: Option> = Decode::decode(&mut &msg[..]).ok(); diff --git a/client/network/src/protocol/event.rs b/client/network/src/protocol/event.rs index c8bee5588c704..278650f616b3e 100644 --- a/client/network/src/protocol/event.rs +++ b/client/network/src/protocol/event.rs @@ -17,7 +17,9 @@ //! Network event types. These are are not the part of the protocol, but rather //! events that happen on the network like DHT get/put results received. +use libp2p::core::PeerId; use libp2p::kad::record::Key; +use std::borrow::Cow; /// Events generated by DHT as a response to get_value and put_value requests. #[derive(Debug, Clone)] @@ -38,6 +40,34 @@ pub enum DhtEvent { /// Type for events generated by networking layer. #[derive(Debug, Clone)] pub enum Event { + /// Opened a substream with the given node with the given notifications protocol. + /// + /// The protocol name is always one of the notification protocols that have been registered + /// in the configuration. + NotifOpened { + /// Node we opened the substream with. + remote: PeerId, + /// Name of the concerned protocol. Each protocol uses a different substream. + proto_name: Cow<'static, [u8]> + }, + + /// Closed a substream with the given node. Always matches a corresponding previous + /// `NotifOpened` message. + NotifClosed { + /// Node we closed the substream with. + remote: PeerId, + /// Name of the concerned protocol. Each protocol uses a different substream. + proto_name: Cow<'static, [u8]>, + }, + + /// Received a message from the given node using the given protocol. + NotifMessages { + /// Node we received the message from. + remote: PeerId, + /// Concerned protocol and associated message. + messages: Vec<(Cow<'static, [u8]>, Vec)>, + }, + /// Event generated by a DHT. Dht(DhtEvent), } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 437e978f4bd47..6a4d2edc459de 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -25,9 +25,10 @@ //! The methods of the [`NetworkService`] are implemented by sending a message over a channel, //! which is then processed by [`NetworkWorker::poll`]. -use std::{collections::{HashMap, HashSet}, fs, marker::PhantomData, io, path::Path}; +use std::{borrow::Cow, collections::{HashMap, HashSet}, fs, marker::PhantomData, io, path::Path}; use std::sync::{Arc, atomic::{AtomicBool, AtomicUsize, Ordering}}; +use codec::Encode; use consensus::import_queue::{ImportQueue, Link}; use consensus::import_queue::{BlockImportResult, BlockImportError}; use futures::{prelude::*, sync::mpsc}; @@ -45,8 +46,7 @@ use crate::{NetworkState, NetworkStateNotConnectedPeer, NetworkStatePeer}; use crate::{transport, config::NonReservedPeerMode}; use crate::config::{Params, TransportConfig}; use crate::error::Error; -use crate::protocol::{self, Protocol, Context, CustomMessageOutcome, PeerInfo}; -use crate::protocol::consensus_gossip::{ConsensusGossip, MessageRecipient as GossipMessageRecipient}; +use crate::protocol::{self, Protocol, Context, PeerInfo}; use crate::protocol::{event::Event, light_dispatch::{AlwaysBadChecker, RequestData}}; use crate::protocol::specialization::NetworkSpecialization; use crate::protocol::sync::SyncState; @@ -193,7 +193,7 @@ impl, H: ExHashT> NetworkWorker let num_connected = Arc::new(AtomicUsize::new(0)); let is_major_syncing = Arc::new(AtomicBool::new(false)); - let (protocol, peerset_handle) = Protocol::new( + let (mut protocol, peerset_handle) = Protocol::new( protocol::ProtocolConfig { roles: params.roles, max_parallel_downloads: params.network_config.max_parallel_downloads, @@ -210,6 +210,10 @@ impl, H: ExHashT> NetworkWorker params.block_announce_validator )?; + for (proto, engine_id) in params.network_config.extra_notif_protos { + protocol.register_notif_protocol(proto, engine_id, Vec::new()); + } + // Build the swarm. let (mut swarm, bandwidth) = { let user_agent = format!( @@ -276,6 +280,7 @@ impl, H: ExHashT> NetworkWorker import_queue: params.import_queue, from_worker, light_client_rqs: params.on_demand.and_then(|od| od.extract_receiver()), + events_streams: Vec::new(), }) } @@ -416,6 +421,57 @@ impl, H: ExHashT> NetworkServic self.local_peer_id.clone() } + /// Writes a message on an open notifications channel. Has no effect if the notifications + /// channel with this protocol name is closed. + /// + /// > **Note**: The reason why this is a no-op in the situation where we have no channel is + /// > that we don't guarantee message delivery anyway. Networking issues can cause + /// > connections to drop at any time, and higher-level logic shouldn't differentiate + /// > between the remote voluntarily closing a substream or a network error + /// > preventing the message from being delivered. + /// + /// The protocol name must be one of the elements of `extra_notif_protos` that was passed in + /// the configuration, or a protocol registered with `register_notif_protocol`. + pub fn write_notif(&self, target: PeerId, proto_name: impl Into>, message: impl Encode) { + let _ = self.to_worker.unbounded_send(ServerToWorkerMsg::WriteNotif { + target, + proto_name: proto_name.into(), + message: message.encode(), + }); + } + + /// Returns a stream containing the events that happen on the network. + /// + /// If this method is called multiple times, the events are duplicated. + /// + /// The stream never ends (unless the `NetworkWorker` gets shut down). + // Note: when transitioning to stable futures, remove the `Error` entirely + pub fn events_stream(&self) -> impl Stream { + let (tx, rx) = mpsc::unbounded(); + let _ = self.to_worker.unbounded_send(ServerToWorkerMsg::EventsStream(tx)); + rx + } + + /// Registers a new notifications protocol. + /// + /// This has the same effect as having an extra entry in + /// [`NetworkConfiguration::extra_notif_protos`]. + /// + /// You are very strongly encouraged to call this method very early on. Any connection open + /// will retain the protocols that were registered then, and not any new one. + pub fn register_notif_protocol( + &self, + proto_name: impl Into>, + engine_id: ConsensusEngineId, + handshake: impl Into> + ) { + let _ = self.to_worker.unbounded_send(ServerToWorkerMsg::RegisterNotifProtocol { + proto_name: proto_name.into(), + engine_id, + handshake: handshake.into(), + }); + } + /// You must call this when new transactons are imported by the transaction pool. /// /// The latest transactions will be fetched from the `TransactionPool` that was passed at @@ -432,21 +488,6 @@ impl, H: ExHashT> NetworkServic let _ = self.to_worker.unbounded_send(ServerToWorkerMsg::AnnounceBlock(hash, data)); } - /// Send a consensus message through the gossip - pub fn gossip_consensus_message( - &self, - topic: B::Hash, - engine_id: ConsensusEngineId, - message: Vec, - recipient: GossipMessageRecipient, - ) { - let _ = self - .to_worker - .unbounded_send(ServerToWorkerMsg::GossipConsensusMessage( - topic, engine_id, message, recipient, - )); - } - /// Report a given peer as either beneficial (+) or costly (-) according to the /// given scalar. pub fn report_peer(&self, who: PeerId, cost_benefit: i32) { @@ -472,15 +513,6 @@ impl, H: ExHashT> NetworkServic .unbounded_send(ServerToWorkerMsg::ExecuteWithSpec(Box::new(f))); } - /// Execute a closure with the consensus gossip. - pub fn with_gossip(&self, f: F) - where F: FnOnce(&mut ConsensusGossip, &mut dyn Context) + Send + 'static - { - let _ = self - .to_worker - .unbounded_send(ServerToWorkerMsg::ExecuteWithGossip(Box::new(f))); - } - /// Are we in the process of downloading the chain? pub fn is_major_syncing(&self) -> bool { self.is_major_syncing.load(Ordering::Relaxed) @@ -572,6 +604,21 @@ impl, H: ExHashT> NetworkServic } } +impl, H: ExHashT> Clone for NetworkService { + fn clone(&self) -> Self { + NetworkService { + num_connected: self.num_connected.clone(), + external_addresses: self.external_addresses.clone(), + is_major_syncing: self.is_major_syncing.clone(), + local_peer_id: self.local_peer_id.clone(), + bandwidth: self.bandwidth.clone(), + peerset: self.peerset.clone(), + to_worker: self.to_worker.clone(), + _marker: self._marker.clone(), + } + } +} + impl, H: ExHashT> consensus::SyncOracle for NetworkService { @@ -630,12 +677,21 @@ enum ServerToWorkerMsg> { RequestJustification(B::Hash, NumberFor), AnnounceBlock(B::Hash, Vec), ExecuteWithSpec(Box) + Send>), - ExecuteWithGossip(Box, &mut dyn Context) + Send>), - GossipConsensusMessage(B::Hash, ConsensusEngineId, Vec, GossipMessageRecipient), GetValue(record::Key), PutValue(record::Key, Vec), AddKnownAddress(PeerId, Multiaddr), SyncFork(Vec, B::Hash, NumberFor), + EventsStream(mpsc::UnboundedSender), + WriteNotif { + message: Vec, + proto_name: Vec, + target: PeerId, + }, + RegisterNotifProtocol { + proto_name: Cow<'static, [u8]>, + engine_id: ConsensusEngineId, + handshake: Vec, + }, } /// Main network worker. Must be polled in order for the network to advance. @@ -659,6 +715,8 @@ pub struct NetworkWorker, H: Ex from_worker: mpsc::UnboundedReceiver>, /// Receiver for queries from the light client that must be processed. light_client_rqs: Option>>, + /// Senders for events that happen on the network. + events_streams: Vec>, } impl, H: ExHashT> Stream for NetworkWorker { @@ -695,13 +753,6 @@ impl, H: ExHashT> Stream for Ne let (mut context, spec) = protocol.specialization_lock(); task(spec, &mut context); }, - ServerToWorkerMsg::ExecuteWithGossip(task) => { - let protocol = self.network_service.user_protocol_mut(); - let (mut context, gossip) = protocol.consensus_gossip_lock(); - task(gossip, &mut context); - } - ServerToWorkerMsg::GossipConsensusMessage(topic, engine_id, message, recipient) => - self.network_service.user_protocol_mut().gossip_consensus_message(topic, engine_id, message, recipient), ServerToWorkerMsg::AnnounceBlock(hash, data) => self.network_service.user_protocol_mut().announce_block(hash, data), ServerToWorkerMsg::RequestJustification(hash, number) => @@ -716,6 +767,13 @@ impl, H: ExHashT> Stream for Ne self.network_service.add_known_address(peer_id, addr), ServerToWorkerMsg::SyncFork(peer_ids, hash, number) => self.network_service.user_protocol_mut().set_sync_fork_request(peer_ids, &hash, number), + ServerToWorkerMsg::EventsStream(sender) => + self.events_streams.push(sender), + ServerToWorkerMsg::WriteNotif { message, proto_name, target } => + self.network_service.user_protocol_mut().write_notif(target, proto_name, message), + ServerToWorkerMsg::RegisterNotifProtocol { proto_name, engine_id, handshake } => + self.network_service.user_protocol_mut() + .register_notif_protocol(proto_name, engine_id, handshake), } } @@ -723,27 +781,24 @@ impl, H: ExHashT> Stream for Ne // Process the next action coming from the network. let poll_value = self.network_service.poll(); - let outcome = match poll_value { + match poll_value { Ok(Async::NotReady) => break, - Ok(Async::Ready(Some(BehaviourOut::SubstrateAction(outcome)))) => outcome, - Ok(Async::Ready(Some(BehaviourOut::Dht(ev)))) => - return Ok(Async::Ready(Some(Event::Dht(ev)))), - Ok(Async::Ready(None)) => CustomMessageOutcome::None, + Ok(Async::Ready(Some(BehaviourOut::BlockImport(origin, blocks)))) => + self.import_queue.import_blocks(origin, blocks), + Ok(Async::Ready(Some(BehaviourOut::JustificationImport(origin, hash, nb, justification)))) => + self.import_queue.import_justification(origin, hash, nb, justification), + Ok(Async::Ready(Some(BehaviourOut::FinalityProofImport(origin, hash, nb, proof)))) => + self.import_queue.import_finality_proof(origin, hash, nb, proof), + Ok(Async::Ready(Some(BehaviourOut::Event(ev)))) => { + self.events_streams.retain(|sender| sender.unbounded_send(ev.clone()).is_ok()); + return Ok(Async::Ready(Some(ev))); + }, + Ok(Async::Ready(None)) => {}, Err(err) => { error!(target: "sync", "Error in the network: {:?}", err); return Err(err) } }; - - match outcome { - CustomMessageOutcome::BlockImport(origin, blocks) => - self.import_queue.import_blocks(origin, blocks), - CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) => - self.import_queue.import_justification(origin, hash, nb, justification), - CustomMessageOutcome::FinalityProofImport(origin, hash, nb, proof) => - self.import_queue.import_finality_proof(origin, hash, nb, proof), - CustomMessageOutcome::None => {} - } } // Update the variables shared with the `NetworkService`. diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 878e3e3c4a592..6db98a1aee6d9 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -156,6 +156,7 @@ fn node_config ( non_reserved_mode: NonReservedPeerMode::Accept, client_version: "network/test/0.1".to_owned(), node_name: "unknown".to_owned(), + extra_notif_protos: Vec::new(), transport: TransportConfig::Normal { enable_mdns: false, allow_private_ipv4: true, From c5fdfd558ab2afb04174fd43a88c08915aa6a708 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 18 Nov 2019 18:15:20 +0100 Subject: [PATCH 02/15] Work on removing GrandPA Network trait --- .../src/communication/gossip.rs | 13 ++- .../finality-grandpa/src/communication/mod.rs | 81 +++++-------------- .../src/communication/periodic.rs | 6 +- 3 files changed, 27 insertions(+), 73 deletions(-) diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 9cf6234fec4b3..a2c43335aa0fa 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -83,7 +83,7 @@ //! We only send polite messages to peers, use sr_primitives::traits::{NumberFor, Block as BlockT, Zero}; -use network_gossip::{self, MessageIntent, ValidatorContext}; +use network_gossip::{self, GossipEngine, MessageIntent, ValidatorContext}; use network::{config::Roles, PeerId}; use codec::{Encode, Decode}; use fg_primitives::AuthorityId; @@ -1402,29 +1402,26 @@ pub(super) struct ReportStream { impl ReportStream { /// Consume the report stream, converting it into a future that /// handles all reports. - pub(super) fn consume(self, net: N) + pub(super) fn consume(self, net: GossipEngine) -> impl Future + Send + 'static where B: BlockT, - N: super::Network + Send + 'static, { ReportingTask { reports: self.reports, net, - _marker: Default::default(), } } } /// A future for reporting peers. #[must_use = "Futures do nothing unless polled"] -struct ReportingTask { +struct ReportingTask { reports: mpsc::UnboundedReceiver, - net: N, - _marker: std::marker::PhantomData, + net: GossipEngine, } -impl> Future for ReportingTask { +impl Future for ReportingTask { type Item = (); type Error = (); diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 5e8556f689659..e9de1808b58b3 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -94,50 +94,6 @@ mod benefit { pub(super) const PER_EQUIVOCATION: i32 = 10; } -/// A handle to the network. This is generally implemented by providing some -/// handle to a gossip service or similar. -/// -/// Intended to be a lightweight handle such as an `Arc`. -pub trait Network: Clone + Send + 'static { - /// A stream of input messages for a topic. - type In: Stream; - - /// Get a stream of messages for a specific gossip topic. - fn messages_for(&self, topic: Block::Hash) -> Self::In; - - /// Register a gossip validator. - fn register_validator(&self, validator: Arc>); - - /// Gossip a message out to all connected peers. - /// - /// Force causes it to be sent to all peers, even if they've seen it already. - /// Only should be used in case of consensus stall. - fn gossip_message(&self, topic: Block::Hash, data: Vec, force: bool); - - /// Register a message with the gossip service, it isn't broadcast right - /// away to any peers, but may be sent to new peers joining or when asked to - /// broadcast the topic. Useful to register previous messages on node - /// startup. - fn register_gossip_message(&self, topic: Block::Hash, data: Vec); - - /// Send a message to a bunch of specific peers, even if they've seen it already. - fn send_message(&self, who: Vec, data: Vec); - - /// Report a peer's cost or benefit after some action. - fn report(&self, who: network::PeerId, cost_benefit: i32); - - /// Inform peers that a block with given hash should be downloaded. - fn announce(&self, block: Block::Hash, associated_data: Vec); - - /// Notifies the sync service to try and sync the given block from the given - /// peers. - /// - /// If the given vector of peers is empty then the underlying implementation - /// should make a best effort to fetch the block from any peers it is - /// connected to (NOTE: this assumption will change in the future #3629). - fn set_sync_fork_request(&self, peers: Vec, hash: Block::Hash, number: NumberFor); -} - /// Create a unique topic for a round and set-id combo. pub(crate) fn round_topic(round: RoundNumber, set_id: SetIdNumber) -> B::Hash { <::Hashing as HashT>::hash(format!("{}-{}", set_id, round).as_bytes()) @@ -148,12 +104,12 @@ pub(crate) fn global_topic(set_id: SetIdNumber) -> B::Hash { <::Hashing as HashT>::hash(format!("{}-GLOBAL", set_id).as_bytes()) } -impl Network for GossipEngine where +/*impl Network for GossipEngine where B: BlockT, { - type In = Box + Send + 'static>; + type In = ; - fn messages_for(&self, topic: B::Hash) -> Self::In { + fn messages_for(&self, topic: B::Hash) -> Box + Send + 'static> { let stream = self.messages_for(GRANDPA_ENGINE_ID, topic) .map(|x| Ok(x)) .compat(); @@ -189,7 +145,7 @@ impl Network for GossipEngine where fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { // TODO: NetworkService::set_sync_fork_request(self, peers, hash, number) } -} +}*/ /// Bridge between the underlying network service, gossiping consensus messages and Grandpa pub(crate) struct NetworkBridge { @@ -428,7 +384,7 @@ impl NetworkBridge { self.neighbor_sender.clone(), ); - let outgoing = CommitsOut::>::new( + let outgoing = CommitsOut::::new( self.service.clone(), set_id.0, is_voter, @@ -455,8 +411,8 @@ impl NetworkBridge { } } -fn incoming_global>( - mut service: N, +fn incoming_global( + mut service: GossipEngine, topic: B::Hash, voters: Arc>, gossip_validator: Arc>, @@ -465,7 +421,7 @@ fn incoming_global>( let process_commit = move | msg: FullCommitMessage, mut notification: network_gossip::TopicNotification, - service: &mut N, + service: &mut GossipEngine, gossip_validator: &Arc>, voters: &VoterSet, | { @@ -527,7 +483,7 @@ fn incoming_global>( let process_catch_up = move | msg: FullCatchUpMessage, mut notification: network_gossip::TopicNotification, - service: &mut N, + service: &mut GossipEngine, gossip_validator: &Arc>, voters: &VoterSet, | { @@ -562,7 +518,8 @@ fn incoming_global>( Some(voter::CommunicationIn::CatchUp(msg.message, cb)) }; - service.messages_for(topic) + service.messages_for(GRANDPA_ENGINE_ID, topic) + .map(|m| Ok::<_, ()>(m)) .filter_map(|notification| { // this could be optimized by decoding piecewise. let decoded = GossipMessage::::decode(&mut ¬ification.message[..]); @@ -633,16 +590,16 @@ pub(crate) fn check_message_sig( /// use the same raw message and key to sign. This is currently true for /// `ed25519` and `BLS` signatures (which we might use in the future), care must /// be taken when switching to different key types. -struct OutgoingMessages> { +struct OutgoingMessages { round: RoundNumber, set_id: SetIdNumber, locals: Option<(AuthorityPair, AuthorityId)>, sender: mpsc::UnboundedSender>, - network: N, + network: GossipEngine, has_voted: HasVoted, } -impl> Sink for OutgoingMessages +impl Sink for OutgoingMessages { type SinkItem = Message; type SinkError = Error; @@ -886,18 +843,18 @@ fn check_catch_up( } /// An output sink for commit messages. -struct CommitsOut> { - network: N, +struct CommitsOut { + network: GossipEngine, set_id: SetId, is_voter: bool, gossip_validator: Arc>, neighbor_sender: periodic::NeighborPacketSender, } -impl> CommitsOut { +impl CommitsOut { /// Create a new commit output stream. pub(crate) fn new( - network: N, + network: GossipEngine, set_id: SetIdNumber, is_voter: bool, gossip_validator: Arc>, @@ -913,7 +870,7 @@ impl> CommitsOut { } } -impl> Sink for CommitsOut { +impl Sink for CommitsOut { type SinkItem = (RoundNumber, Commit); type SinkError = Error; diff --git a/client/finality-grandpa/src/communication/periodic.rs b/client/finality-grandpa/src/communication/periodic.rs index 9dd662ce43461..d080701a390b5 100644 --- a/client/finality-grandpa/src/communication/periodic.rs +++ b/client/finality-grandpa/src/communication/periodic.rs @@ -25,8 +25,9 @@ use log::{debug, warn}; use tokio_timer::Delay; use network::PeerId; +use network_gossip::GossipEngine; use sr_primitives::traits::{NumberFor, Block as BlockT}; -use super::{gossip::{NeighborPacket, GossipMessage}, Network}; +use super::gossip::{NeighborPacket, GossipMessage}; // how often to rebroadcast, if no other const REBROADCAST_AFTER: Duration = Duration::from_secs(2 * 60); @@ -58,12 +59,11 @@ impl NeighborPacketSender { /// /// It may rebroadcast the last neighbor packet periodically when no /// progress is made. -pub(super) fn neighbor_packet_worker(net: N) -> ( +pub(super) fn neighbor_packet_worker(net: GossipEngine) -> ( impl Future + Send + 'static, NeighborPacketSender, ) where B: BlockT, - N: Network, { let mut last = None; let (tx, mut rx) = mpsc::unbounded::<(Vec, NeighborPacket>)>(); From 50eacf1fa78b6ba97969d962a70fd155fda13db6 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 19 Nov 2019 12:11:54 +0100 Subject: [PATCH 03/15] Finish GrandPA Network trait removal --- .../finality-grandpa/src/communication/mod.rs | 53 ++---------------- client/finality-grandpa/src/lib.rs | 1 + client/network-gossip/src/bridge.rs | 56 +++++++++++-------- 3 files changed, 39 insertions(+), 71 deletions(-) diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index e9de1808b58b3..439b6ba09ceae 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -31,7 +31,7 @@ use std::sync::Arc; use futures::prelude::*; use futures::sync::mpsc; -use futures03::{compat::Compat, stream::{StreamExt, TryStreamExt}}; +use futures03::{compat::Compat, stream::StreamExt}; use grandpa::Message::{Prevote, Precommit, PrimaryPropose}; use grandpa::{voter, voter_set::VoterSet}; use log::{debug, trace}; @@ -104,49 +104,6 @@ pub(crate) fn global_topic(set_id: SetIdNumber) -> B::Hash { <::Hashing as HashT>::hash(format!("{}-GLOBAL", set_id).as_bytes()) } -/*impl Network for GossipEngine where - B: BlockT, -{ - type In = ; - - fn messages_for(&self, topic: B::Hash) -> Box + Send + 'static> { - let stream = self.messages_for(GRANDPA_ENGINE_ID, topic) - .map(|x| Ok(x)) - .compat(); - Box::new(stream) - } - - fn register_validator(&self, validator: Arc>) { - unimplemented!() - } - - fn gossip_message(&self, topic: B::Hash, data: Vec, force: bool) { - self.multicast(topic, GRANDPA_ENGINE_ID, data, force) - } - - fn register_gossip_message(&self, topic: B::Hash, data: Vec) { - self.register_message(topic, GRANDPA_ENGINE_ID, data) - } - - fn send_message(&self, who: Vec, data: Vec) { - for who in &who { - self.send_message(who, GRANDPA_ENGINE_ID, data.clone()) - } - } - - fn report(&self, who: network::PeerId, cost_benefit: i32) { - // TODO: self.report_peer(who, cost_benefit) - } - - fn announce(&self, block: B::Hash, associated_data: Vec) { - // TODO: self.announce_block(block, associated_data) - } - - fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { - // TODO: NetworkService::set_sync_fork_request(self, peers, hash, number) - } -}*/ - /// Bridge between the underlying network service, gossiping consensus messages and Grandpa pub(crate) struct NetworkBridge { service: GossipEngine, @@ -283,7 +240,7 @@ impl NetworkBridge { }); let topic = round_topic::(round.0, set_id.0); - let incoming = Compat::new(self.service.messages_for(GRANDPA_ENGINE_ID, topic) + let incoming = Compat::new(self.service.messages_for(topic) .map(|item| Ok::<_, ()>(item))) .filter_map(|notification| { let decoded = GossipMessage::::decode(&mut ¬ification.message[..]); @@ -337,7 +294,7 @@ impl NetworkBridge { .map_err(|()| Error::Network(format!("Failed to receive message on unbounded stream"))); let (tx, out_rx) = mpsc::unbounded(); - let outgoing = OutgoingMessages::> { + let outgoing = OutgoingMessages:: { round: round.0, set_id: set_id.0, network: self.service.clone(), @@ -518,8 +475,8 @@ fn incoming_global( Some(voter::CommunicationIn::CatchUp(msg.message, cb)) }; - service.messages_for(GRANDPA_ENGINE_ID, topic) - .map(|m| Ok::<_, ()>(m)) + Compat::new(service.messages_for(topic) + .map(|m| Ok::<_, ()>(m))) .filter_map(|notification| { // this could be optimized by decoding piecewise. let decoded = GossipMessage::::decode(&mut ¬ification.message[..]); diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index b27cdca300647..98d4c85ead26c 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -908,6 +908,7 @@ pub fn setup_disabled_grandpa, RA, N>( inherent_data_providers, )?; + // TODO: figure out before merging //network.register_validator(Arc::new(network_gossip::DiscardAll)); Ok(()) diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 9f88b8bac172b..9bb7a1aafb037 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -24,11 +24,12 @@ use network::{Event, config::Roles}; use futures::{prelude::*, channel::mpsc, compat::Compat01As03}; use libp2p::PeerId; use parking_lot::Mutex; -use sr_primitives::{traits::Block as BlockT, ConsensusEngineId}; +use sr_primitives::{traits::{Block as BlockT, NumberFor}, ConsensusEngineId}; use std::{borrow::Cow, sync::Arc, time::Duration}; pub struct GossipEngine { inner: Arc>>, + engine_id: ConsensusEngineId, } struct GossipEngineInner { @@ -62,6 +63,7 @@ impl GossipEngine { let gossip_engine = GossipEngine { inner: inner.clone(), + engine_id, }; async_std::task::spawn({ @@ -127,19 +129,22 @@ impl GossipEngine { self.inner.lock().state_machine.abort(); } + pub fn report(&self, who: PeerId, reputation: i32) { + self.inner.lock().context.report_peer(who, reputation); + } + /// Registers a message without propagating it to any peers. The message /// becomes available to new peers or when the service is asked to GossipEngine /// the message's topic. No validation is performed on the message, if the /// message is already expired it should be dropped on the next garbage /// collection. - pub fn register_message( + pub fn register_gossip_message( &self, topic: B::Hash, - engine_id: ConsensusEngineId, message: Vec, ) { let message = ConsensusMessage { - engine_id, + engine_id: self.engine_id, data: message, }; @@ -154,10 +159,10 @@ impl GossipEngine { } /// Get data of valid, incoming messages for a topic (but might have expired meanwhile) - pub fn messages_for(&self, engine_id: ConsensusEngineId, topic: B::Hash) + pub fn messages_for(&self, topic: B::Hash) -> mpsc::UnboundedReceiver { - self.inner.lock().state_machine.messages_for(engine_id, topic) + self.inner.lock().state_machine.messages_for(self.engine_id, topic) } /// Send all messages with given topic to a peer. @@ -165,24 +170,22 @@ impl GossipEngine { &self, who: &PeerId, topic: B::Hash, - engine_id: ConsensusEngineId, force: bool ) { let mut inner = self.inner.lock(); let inner = &mut *inner; - inner.state_machine.send_topic(&mut *inner.context, who, topic, engine_id, force) + inner.state_machine.send_topic(&mut *inner.context, who, topic, self.engine_id, force) } /// Multicast a message to all peers. - pub fn multicast( + pub fn gossip_message( &self, topic: B::Hash, - engine_id: ConsensusEngineId, message: Vec, force: bool, ) { let message = ConsensusMessage { - engine_id, + engine_id: self.engine_id, data: message, }; @@ -191,27 +194,34 @@ impl GossipEngine { inner.state_machine.multicast(&mut *inner.context, topic, message, force) } - /// Send addressed message to a peer. The message is not kept or multicast + /// Send addressed message to the given peers. The message is not kept or multicast /// later on. - pub fn send_message( - &self, - who: &PeerId, - engine_id: ConsensusEngineId, - message: Vec, - ) { + pub fn send_message(&self, who: Vec, data: Vec) { let mut inner = self.inner.lock(); let inner = &mut *inner; - inner.state_machine.send_message(&mut *inner.context, who, ConsensusMessage { - engine_id, - data: message, - }); + + for who in &who { + inner.state_machine.send_message(&mut *inner.context, who, ConsensusMessage { + engine_id: self.engine_id, + data: data.clone(), + }); + } + } + + pub fn announce(&self, block: B::Hash, associated_data: Vec) { + // TODO: self.announce_block(block, associated_data) + } + + pub fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { + // TODO: NetworkService::set_sync_fork_request(self, peers, hash, number) } } impl Clone for GossipEngine { fn clone(&self) -> Self { GossipEngine { - inner: self.inner.clone() + inner: self.inner.clone(), + engine_id: self.engine_id.clone(), } } } From 64b81b4da61a01648507d1fb608d5bb42740da09 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 19 Nov 2019 14:12:11 +0100 Subject: [PATCH 04/15] More work --- client/network/src/generic_proto/behaviour.rs | 36 ++++++++++++++-- .../src/generic_proto/handler/group.rs | 43 +++++++++++++------ client/network/src/protocol.rs | 34 ++++++++++----- 3 files changed, 86 insertions(+), 27 deletions(-) diff --git a/client/network/src/generic_proto/behaviour.rs b/client/network/src/generic_proto/behaviour.rs index cc1631f2850bd..f0e360f3c9868 100644 --- a/client/network/src/generic_proto/behaviour.rs +++ b/client/network/src/generic_proto/behaviour.rs @@ -26,6 +26,7 @@ use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; use log::{debug, error, trace, warn}; use rand::distributions::{Distribution as _, Uniform}; use smallvec::SmallVec; +use sr_primitives::ConsensusEngineId; use std::{borrow::Cow, collections::hash_map::Entry, cmp, error, marker::PhantomData, mem, pin::Pin}; use std::time::{Duration, Instant}; use tokio_io::{AsyncRead, AsyncWrite}; @@ -386,20 +387,49 @@ impl GenericProto { pub fn send_packet( &mut self, target: &PeerId, - proto_name: Option>, message: impl Into>, ) { if !self.is_open(target) { return; } - trace!(target: "sub-libp2p", "External API => Packet for {:?} with protocol {:?}", target, proto_name); + trace!(target: "sub-libp2p", "External API => Packet for {:?}", target); trace!(target: "sub-libp2p", "Handler({:?}) <= Packet", target); + self.events.push(NetworkBehaviourAction::SendEvent { peer_id: target.clone(), event: NotifsHandlerIn::Send { message: message.into(), - proto_name: proto_name.map(Into::into), + }, + }); + } + + /// Sends a notification to a peer. + /// + /// Has no effect if the custom protocol is not open with the given peer. + /// + /// Also note that even we have a valid open substream, it may in fact be already closed + /// without us knowing, in which case the packet will not be received. + pub fn write_notif( + &mut self, + target: &PeerId, + proto_name: Cow<'static, [u8]>, + engine_id: ConsensusEngineId, + message: impl Into>, + ) { + if !self.is_open(target) { + return; + } + + trace!(target: "sub-libp2p", "External API => Notification for {:?} with protocol {:?}", target, proto_name); + trace!(target: "sub-libp2p", "Handler({:?}) <= Packet", target); + + self.events.push(NetworkBehaviourAction::SendEvent { + peer_id: target.clone(), + event: NotifsHandlerIn::SendNotif { + message: message.into(), + proto_name, + engine_id, }, }); } diff --git a/client/network/src/generic_proto/handler/group.rs b/client/network/src/generic_proto/handler/group.rs index 7f5ed11b29a36..47ac2d0471da4 100644 --- a/client/network/src/generic_proto/handler/group.rs +++ b/client/network/src/generic_proto/handler/group.rs @@ -20,7 +20,9 @@ use crate::generic_proto::{ handler::notif_out::{NotifsOutHandlerProto, NotifsOutHandler, NotifsOutHandlerIn, NotifsOutHandlerOut}, upgrade::{NotificationsIn, NotificationsOut, RegisteredProtocol, SelectUpgrade, UpgradeCollec}, }; +use crate::protocol::message::generic::ConsensusMessage; use bytes::BytesMut; +use codec::Encode as _; use futures::prelude::*; use libp2p::core::{ConnectedPoint, PeerId}; use libp2p::core::either::{EitherError, EitherOutput}; @@ -33,6 +35,7 @@ use libp2p::swarm::{ SubstreamProtocol, }; use log::error; +use sr_primitives::ConsensusEngineId; use std::{borrow::Cow, error, io}; use tokio_io::{AsyncRead, AsyncWrite}; @@ -119,13 +122,22 @@ pub enum NotifsHandlerIn { /// The node should stop using custom protocols. Disable, - /// Sends a message through a custom protocol substream. + /// Sends a message through the custom protocol substream. Send { - /// Name of the protocol for the message, or `None` to force the legacy protocol. + /// The message to send. + message: Vec, + }, + + /// Sends a notifications message. + SendNotif { + /// Name of the protocol for the message. /// - /// If `Some`, must match one of the registered protocols. For backwards-compatibility - /// reasons, if the remote doesn't support this protocol, we use the legacy substream. - proto_name: Option>, + /// Must match one of the registered protocols. For backwards-compatibility reasons, if + /// the remote doesn't support this protocol, we use the legacy substream. + proto_name: Cow<'static, [u8]>, + + /// For legacy reasons, the name to use if we send the message on the legacy substream. + engine_id: ConsensusEngineId, /// The message to send. message: Vec, @@ -257,17 +269,22 @@ where TSubstream: AsyncRead + AsyncWrite + Send + 'static { self.in_handlers[num].inject_event(NotifsInHandlerIn::Refuse); } }, - NotifsHandlerIn::Send { proto_name, message } => { - if let Some(proto_name) = proto_name { - for handler in &mut self.out_handlers { - if handler.is_open() && handler.protocol_name() == &proto_name[..] { - handler.inject_event(NotifsOutHandlerIn::Send(message)); - return; - } + NotifsHandlerIn::Send { message } => + self.legacy.inject_event(LegacyProtoHandlerIn::SendCustomMessage { message }), + NotifsHandlerIn::SendNotif { message, engine_id, proto_name } => { + for handler in &mut self.out_handlers { + if handler.is_open() && handler.protocol_name() == &proto_name[..] { + handler.inject_event(NotifsOutHandlerIn::Send(message)); + return; } } - self.legacy.inject_event(LegacyProtoHandlerIn::SendCustomMessage { message }); + let message = ConsensusMessage { + engine_id, + data: message, + }; + + self.legacy.inject_event(LegacyProtoHandlerIn::SendCustomMessage { message: message.encode() }); }, } } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 2a3df2f234462..5d75b5e64d0b8 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -120,6 +120,8 @@ pub struct Protocol, H: ExHashT> { handshaking_peers: HashMap, /// For each legacy gossiping engine ID, the corresponding new protocol name. protocol_name_by_engine: HashMap>, + /// For each protocol name, the legacy gossiping engine ID. + protocol_engine_by_name: HashMap, ConsensusEngineId>, /// Used to report reputation changes. peerset_handle: peerset::PeersetHandle, transaction_pool: Arc>, @@ -192,7 +194,7 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { block, }); - self.behaviour.send_packet(who, None, message.encode()) + self.behaviour.send_packet(who, message.encode()) } fn send_read_request( @@ -208,7 +210,7 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { keys, }); - self.behaviour.send_packet(who, None, message.encode()) + self.behaviour.send_packet(who, message.encode()) } fn send_read_child_request( @@ -226,7 +228,7 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { keys, }); - self.behaviour.send_packet(who, None, message.encode()) + self.behaviour.send_packet(who, message.encode()) } fn send_call_request( @@ -244,7 +246,7 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { data, }); - self.behaviour.send_packet(who, None, message.encode()) + self.behaviour.send_packet(who, message.encode()) } fn send_changes_request( @@ -268,7 +270,7 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { key, }); - self.behaviour.send_packet(who, None, message.encode()) + self.behaviour.send_packet(who, message.encode()) } fn send_body_request( @@ -290,7 +292,7 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { max, }); - self.behaviour.send_packet(who, None, message.encode()) + self.behaviour.send_packet(who, message.encode()) } } @@ -337,6 +339,8 @@ impl<'a, B: BlockT + 'a, H: ExHashT + 'a> Context for ProtocolContext<'a, B, } fn send_consensus(&mut self, who: PeerId, messages: Vec) { + panic!(); // TODO: shouldn't be reached + if self.context_data.peers.get(&who).map_or(false, |peer| peer.info.protocol_version > 4) { let mut batch = Vec::new(); let len = messages.len(); @@ -442,6 +446,7 @@ impl, H: ExHashT> Protocol { sync, specialization, protocol_name_by_engine: HashMap::new(), + protocol_engine_by_name: HashMap::new(), handshaking_peers: HashMap::new(), transaction_pool, finality_proof_provider, @@ -626,6 +631,7 @@ impl, H: ExHashT> Protocol { GenericMessage::Consensus(msg) => { let outcome = if let Some(proto_name) = self.protocol_name_by_engine.get(&msg.engine_id) { // TODO: what if not open? check if open? + panic!("notif message!"); CustomMessageOutcome::NotifMessages { remote: who.clone(), messages: vec![(proto_name.clone(), msg.data.clone())], @@ -641,6 +647,7 @@ impl, H: ExHashT> Protocol { .iter() .filter_map(|msg| { if let Some(proto_name) = self.protocol_name_by_engine.get(&msg.engine_id) { + panic!("notif message!"); // TODO: what if not open? check if open? Some((proto_name.clone(), msg.data.clone())) } else { @@ -1055,7 +1062,12 @@ impl, H: ExHashT> Protocol { proto_name: impl Into>, message: impl Into> ) { - self.behaviour.send_packet(&target, Some(proto_name.into()), message) + let proto_name = proto_name.into(); + if let Some(engine_id) = self.protocol_engine_by_name.get(&proto_name) { + self.behaviour.write_notif(&target, proto_name, *engine_id, message); + } else { + error!(target: "sub-libp2p", "Sending a notification with a protocol that wasn't registered"); + } } /// Registers a new notifications protocol. @@ -1070,9 +1082,10 @@ impl, H: ExHashT> Protocol { ) { let proto_name = proto_name.into(); if self.protocol_name_by_engine.insert(engine_id, proto_name.clone()).is_some() { - error!("Notifications protocol already registered: {:?}", proto_name); + error!(target: "sub-libp2p", "Notifications protocol already registered: {:?}", proto_name); } else { - self.behaviour.register_notif_protocol(proto_name, handshake); + self.behaviour.register_notif_protocol(proto_name.clone(), handshake); + self.protocol_engine_by_name.insert(proto_name, engine_id); } } @@ -1748,7 +1761,7 @@ fn send_message( let mut stats = stats.entry(message.id()).or_default(); stats.bytes_out += encoded.len() as u64; stats.count_out += 1; - behaviour.send_packet(who, None, encoded); + behaviour.send_packet(who, encoded); } impl, H: ExHashT> NetworkBehaviour for @@ -1857,7 +1870,6 @@ Protocol { } }, GenericProtoOut::CustomMessage { peer_id, message } => - // TODO: NotifMessages self.on_custom_message(peer_id, message), GenericProtoOut::Clogged { peer_id, messages } => { debug!(target: "sync", "{} clogging messages:", messages.len()); From 6a8e7dffd28f5ab5c18d4d10f38ae1da6200f816 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 19 Nov 2019 16:08:02 +0100 Subject: [PATCH 05/15] Rename AbstractNetwork to Network --- client/finality-grandpa/src/communication/mod.rs | 4 ++-- client/finality-grandpa/src/communication/tests.rs | 2 +- client/finality-grandpa/src/lib.rs | 8 ++++---- client/finality-grandpa/src/observer.rs | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 439b6ba09ceae..b59e0a70bf679 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -35,7 +35,7 @@ use futures03::{compat::Compat, stream::StreamExt}; use grandpa::Message::{Prevote, Precommit, PrimaryPropose}; use grandpa::{voter, voter_set::VoterSet}; use log::{debug, trace}; -use network_gossip::{GossipEngine, Network as AbstractNetwork}; +use network_gossip::{GossipEngine, Network}; use codec::{Encode, Decode}; use primitives::Pair; use sr_primitives::traits::{Block as BlockT, Hash as HashT, Header as HeaderT, NumberFor}; @@ -116,7 +116,7 @@ impl NetworkBridge { /// handle and a future that must be polled to completion to finish startup. /// On creation it will register previous rounds' votes with the gossip /// service taken from the VoterSetState. - pub(crate) fn new( + pub(crate) fn new( service: N, config: crate::Config, set_state: crate::environment::SharedVoterSetState, diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index 59389b2b35251..63995cd3c989b 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -103,7 +103,7 @@ impl network_gossip::ValidatorContext for TestNetwork { fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) { } fn send_message(&mut self, who: &network::PeerId, data: Vec) { - >::send_message(self, vec![who.clone()], data); + //>::send_message(self, vec![who.clone()], data); } fn send_topic(&mut self, _: &network::PeerId, _: Hash, _: bool) { } diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 98d4c85ead26c..6fd88151d2c60 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -92,7 +92,7 @@ mod observer; mod until_imported; mod voting_rule; -pub use network_gossip::{GossipEngine, Network as AbstractNetwork}; +pub use network_gossip::{GossipEngine, Network}; pub use finality_proof::FinalityProofProvider; pub use justification::GrandpaJustification; pub use light_import::light_block_import; @@ -548,7 +548,7 @@ pub fn run_grandpa_voter, N, RA, SC, VR, X>( Block::Hash: Ord, B: Backend + 'static, E: CallExecutor + Send + Sync + 'static, - N: AbstractNetwork + Send + Clone + 'static, + N: Network + Send + Clone + 'static, SC: SelectChain + 'static, VR: VotingRule> + Clone + 'static, NumberFor: BlockNumberOps, @@ -876,7 +876,7 @@ pub fn run_grandpa, N, RA, SC, VR, X>( Block::Hash: Ord, B: Backend + 'static, E: CallExecutor + Send + Sync + 'static, - N: AbstractNetwork + Send + Clone + 'static, + N: Network + Send + Clone + 'static, SC: SelectChain + 'static, NumberFor: BlockNumberOps, DigestFor: Encode, @@ -901,7 +901,7 @@ pub fn setup_disabled_grandpa, RA, N>( B: Backend + 'static, E: CallExecutor + Send + Sync + 'static, RA: Send + Sync + 'static, - N: AbstractNetwork + Send + Sync + 'static, + N: Network + Send + Sync + 'static, { register_finality_tracker_inherent_data_provider( client, diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index 3d33aa426a514..7dc8b649b8710 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -23,7 +23,7 @@ use grandpa::{ BlockNumberOps, Error as GrandpaError, voter, voter_set::VoterSet }; use log::{debug, info, warn}; -use network_gossip::Network as AbstractNetwork; +use network_gossip::Network; use consensus_common::SelectChain; use client_api::{CallExecutor, backend::Backend}; @@ -160,7 +160,7 @@ pub fn run_grandpa_observer, N, RA, SC>( ) -> ::client_api::error::Result + Send + 'static> where B: Backend + 'static, E: CallExecutor + Send + Sync + 'static, - N: AbstractNetwork + Send + Clone + 'static, + N: Network + Send + Clone + 'static, SC: SelectChain + 'static, NumberFor: BlockNumberOps, RA: Send + Sync + 'static, From 717a7ee9214628957c02f068ada7deb690a2e952 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 20 Nov 2019 10:54:20 +0100 Subject: [PATCH 06/15] notif -> notification --- client/network-gossip/src/bridge.rs | 2 +- client/network-gossip/src/lib.rs | 6 +++--- client/network/src/generic_proto/behaviour.rs | 2 +- client/network/src/generic_proto/handler/group.rs | 4 ++-- client/network/src/protocol.rs | 2 +- client/network/src/service.rs | 4 ++-- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 9bb7a1aafb037..3e49a08051e35 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -243,7 +243,7 @@ impl Context for ContextOverService { fn send_consensus(&mut self, who: PeerId, messages: Vec) { // TODO: send batch for message in messages { - self.network.write_notif(who.clone(), self.proto_name.clone(), message.engine_id, message.data); + self.network.write_notification(who.clone(), self.proto_name.clone(), message.engine_id, message.data); } } diff --git a/client/network-gossip/src/lib.rs b/client/network-gossip/src/lib.rs index 0b42f1aa1e71f..be33423d7eeb2 100644 --- a/client/network-gossip/src/lib.rs +++ b/client/network-gossip/src/lib.rs @@ -38,7 +38,7 @@ pub trait Network { fn disconnect_peer(&mut self, who: PeerId); /// Send a notification to a peer. - fn write_notif(&self, who: PeerId, proto_name: Cow<'static, [u8]>, engine_id: ConsensusEngineId, message: Vec); + fn write_notification(&self, who: PeerId, proto_name: Cow<'static, [u8]>, engine_id: ConsensusEngineId, message: Vec); /// Registers a notifications protocol. /// @@ -64,13 +64,13 @@ impl, H: ExHashT> Network for Arc, engine_id: ConsensusEngineId, message: Vec) { + fn write_notification(&self, who: PeerId, proto_name: Cow<'static, [u8]>, engine_id: ConsensusEngineId, message: Vec) { let message = ConsensusMessage { engine_id, data: message, }; - NetworkService::write_notif(self, who, proto_name, message) + NetworkService::write_notification(self, who, proto_name, message) } fn register_notif_protocol( diff --git a/client/network/src/generic_proto/behaviour.rs b/client/network/src/generic_proto/behaviour.rs index f0e360f3c9868..dba9993a72553 100644 --- a/client/network/src/generic_proto/behaviour.rs +++ b/client/network/src/generic_proto/behaviour.rs @@ -426,7 +426,7 @@ impl GenericProto { self.events.push(NetworkBehaviourAction::SendEvent { peer_id: target.clone(), - event: NotifsHandlerIn::SendNotif { + event: NotifsHandlerIn::SendNotification { message: message.into(), proto_name, engine_id, diff --git a/client/network/src/generic_proto/handler/group.rs b/client/network/src/generic_proto/handler/group.rs index 47ac2d0471da4..a95ec02c1835c 100644 --- a/client/network/src/generic_proto/handler/group.rs +++ b/client/network/src/generic_proto/handler/group.rs @@ -129,7 +129,7 @@ pub enum NotifsHandlerIn { }, /// Sends a notifications message. - SendNotif { + SendNotification { /// Name of the protocol for the message. /// /// Must match one of the registered protocols. For backwards-compatibility reasons, if @@ -271,7 +271,7 @@ where TSubstream: AsyncRead + AsyncWrite + Send + 'static { }, NotifsHandlerIn::Send { message } => self.legacy.inject_event(LegacyProtoHandlerIn::SendCustomMessage { message }), - NotifsHandlerIn::SendNotif { message, engine_id, proto_name } => { + NotifsHandlerIn::SendNotification { message, engine_id, proto_name } => { for handler in &mut self.out_handlers { if handler.is_open() && handler.protocol_name() == &proto_name[..] { handler.inject_event(NotifsOutHandlerIn::Send(message)); diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 5d75b5e64d0b8..4b66fd7cbdd28 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1056,7 +1056,7 @@ impl, H: ExHashT> Protocol { /// Send a notification to the given peer we're connected to. /// /// Doesn't do anything if we're not connected to that peer. - pub fn write_notif( + pub fn write_notification( &mut self, target: PeerId, proto_name: impl Into>, diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 6a4d2edc459de..ec42c49a9c739 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -432,7 +432,7 @@ impl, H: ExHashT> NetworkServic /// /// The protocol name must be one of the elements of `extra_notif_protos` that was passed in /// the configuration, or a protocol registered with `register_notif_protocol`. - pub fn write_notif(&self, target: PeerId, proto_name: impl Into>, message: impl Encode) { + pub fn write_notification(&self, target: PeerId, proto_name: impl Into>, message: impl Encode) { let _ = self.to_worker.unbounded_send(ServerToWorkerMsg::WriteNotif { target, proto_name: proto_name.into(), @@ -770,7 +770,7 @@ impl, H: ExHashT> Stream for Ne ServerToWorkerMsg::EventsStream(sender) => self.events_streams.push(sender), ServerToWorkerMsg::WriteNotif { message, proto_name, target } => - self.network_service.user_protocol_mut().write_notif(target, proto_name, message), + self.network_service.user_protocol_mut().write_notification(target, proto_name, message), ServerToWorkerMsg::RegisterNotifProtocol { proto_name, engine_id, handshake } => self.network_service.user_protocol_mut() .register_notif_protocol(proto_name, engine_id, handshake), From 2fa6a2647ee430604139c2b9efb2e156983d166f Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 20 Nov 2019 11:04:24 +0100 Subject: [PATCH 07/15] Register GrandPa even if disabled --- client/finality-grandpa/src/communication/mod.rs | 8 +++++++- client/finality-grandpa/src/lib.rs | 3 +-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index b59e0a70bf679..4c6055f83c5db 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -60,6 +60,7 @@ mod periodic; #[cfg(test)] mod tests; +pub const GRANDPA_PROTOCOL_NAME: &'static [u8] = b"/sub/grandpa"; pub use fg_primitives::GRANDPA_ENGINE_ID; // cost scalars for reporting peers. @@ -104,6 +105,11 @@ pub(crate) fn global_topic(set_id: SetIdNumber) -> B::Hash { <::Hashing as HashT>::hash(format!("{}-GLOBAL", set_id).as_bytes()) } +/// Registers the notifications protocol towards the network. +pub(crate) fn register_dummy_protocol(network: N) { + network.register_notif_protocol(GRANDPA_PROTOCOL_NAME, GRANDPA_ENGINE_ID, Vec::new()); +} + /// Bridge between the underlying network service, gossiping consensus messages and Grandpa pub(crate) struct NetworkBridge { service: GossipEngine, @@ -132,7 +138,7 @@ impl NetworkBridge { ); let validator = Arc::new(validator); - let service = GossipEngine::new(service, &b"/sub/grandpa"[..], GRANDPA_ENGINE_ID, validator.clone()); + let service = GossipEngine::new(service, GRANDPA_PROTOCOL_NAME, GRANDPA_ENGINE_ID, validator.clone()); { // register all previous votes with the gossip service so that they're diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 6fd88151d2c60..77ed9d09a889d 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -908,8 +908,7 @@ pub fn setup_disabled_grandpa, RA, N>( inherent_data_providers, )?; - // TODO: figure out before merging - //network.register_validator(Arc::new(network_gossip::DiscardAll)); + communication::register_dummy_protocol(network); Ok(()) } From 500f180183e754096bf606f95f1f8f63e57abb1a Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 20 Nov 2019 11:14:23 +0100 Subject: [PATCH 08/15] WriteNotif -> WriteNotification --- client/network/src/service.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/client/network/src/service.rs b/client/network/src/service.rs index ec42c49a9c739..049975f45db51 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -433,7 +433,7 @@ impl, H: ExHashT> NetworkServic /// The protocol name must be one of the elements of `extra_notif_protos` that was passed in /// the configuration, or a protocol registered with `register_notif_protocol`. pub fn write_notification(&self, target: PeerId, proto_name: impl Into>, message: impl Encode) { - let _ = self.to_worker.unbounded_send(ServerToWorkerMsg::WriteNotif { + let _ = self.to_worker.unbounded_send(ServerToWorkerMsg::WriteNotification { target, proto_name: proto_name.into(), message: message.encode(), @@ -682,7 +682,7 @@ enum ServerToWorkerMsg> { AddKnownAddress(PeerId, Multiaddr), SyncFork(Vec, B::Hash, NumberFor), EventsStream(mpsc::UnboundedSender), - WriteNotif { + WriteNotification { message: Vec, proto_name: Vec, target: PeerId, @@ -769,7 +769,7 @@ impl, H: ExHashT> Stream for Ne self.network_service.user_protocol_mut().set_sync_fork_request(peer_ids, &hash, number), ServerToWorkerMsg::EventsStream(sender) => self.events_streams.push(sender), - ServerToWorkerMsg::WriteNotif { message, proto_name, target } => + ServerToWorkerMsg::WriteNotification { message, proto_name, target } => self.network_service.user_protocol_mut().write_notification(target, proto_name, message), ServerToWorkerMsg::RegisterNotifProtocol { proto_name, engine_id, handshake } => self.network_service.user_protocol_mut() From 3b8b1ca133e8913c97b36927022b7bfac8691322 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 20 Nov 2019 11:21:08 +0100 Subject: [PATCH 09/15] Implement NetworkService::disconnect_peer --- client/network-gossip/src/lib.rs | 6 +++--- client/network/src/service.rs | 10 ++++++++++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/client/network-gossip/src/lib.rs b/client/network-gossip/src/lib.rs index be33423d7eeb2..1b44483659e62 100644 --- a/client/network-gossip/src/lib.rs +++ b/client/network-gossip/src/lib.rs @@ -35,7 +35,7 @@ pub trait Network { fn report_peer(&self, peer_id: PeerId, reputation: i32); /// Force-disconnect a peer. - fn disconnect_peer(&mut self, who: PeerId); + fn disconnect_peer(&self, who: PeerId); /// Send a notification to a peer. fn write_notification(&self, who: PeerId, proto_name: Cow<'static, [u8]>, engine_id: ConsensusEngineId, message: Vec); @@ -60,8 +60,8 @@ impl, H: ExHashT> Network for Arc, engine_id: ConsensusEngineId, message: Vec) { diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 049975f45db51..6f77c31c8466f 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -494,6 +494,13 @@ impl, H: ExHashT> NetworkServic self.peerset.report_peer(who, cost_benefit); } + /// Disconnect from a node as soon as possible. + /// + /// This triggers the same effects as if the connection had closed itself spontaneously. + pub fn disconnect_peer(&self, who: PeerId) { + let _ = self.to_worker.unbounded_send(ServerToWorkerMsg::DisconnectPeer(who)); + } + /// Request a justification for the given block from the network. /// /// On success, the justification will be passed to the import queue that was part at @@ -692,6 +699,7 @@ enum ServerToWorkerMsg> { engine_id: ConsensusEngineId, handshake: Vec, }, + DisconnectPeer(PeerId), } /// Main network worker. Must be polled in order for the network to advance. @@ -774,6 +782,8 @@ impl, H: ExHashT> Stream for Ne ServerToWorkerMsg::RegisterNotifProtocol { proto_name, engine_id, handshake } => self.network_service.user_protocol_mut() .register_notif_protocol(proto_name, engine_id, handshake), + ServerToWorkerMsg::DisconnectPeer(who) => + self.network_service.user_protocol_mut().disconnect_peer(&who), } } From c8846e48e757f4f5934caea8c30a936c7c08c425 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 20 Nov 2019 11:44:16 +0100 Subject: [PATCH 10/15] Fix lots of TODOs --- .../src/generic_proto/handler/notif_in.rs | 14 +--- .../src/generic_proto/handler/notif_out.rs | 69 +++++++++++++------ 2 files changed, 48 insertions(+), 35 deletions(-) diff --git a/client/network/src/generic_proto/handler/notif_in.rs b/client/network/src/generic_proto/handler/notif_in.rs index 8c696257cf3fb..0464b61bde05f 100644 --- a/client/network/src/generic_proto/handler/notif_in.rs +++ b/client/network/src/generic_proto/handler/notif_in.rs @@ -66,13 +66,11 @@ where self.in_protocol.clone() } - fn into_handler(self, remote_peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { + fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self::Handler { NotifsInHandler { in_protocol: self.in_protocol, substream: None, pending_accept_refuses: 0, - endpoint: connected_point.to_endpoint(), - remote_peer_id: remote_peer_id.clone(), events_queue: SmallVec::new(), } } @@ -83,16 +81,6 @@ pub struct NotifsInHandler { /// Configuration for the protocol upgrade to negotiate for inbound substreams. in_protocol: NotificationsIn, - /// Identifier of the node we're talking to. Used only for logging purposes and shouldn't have - /// any influence on the behaviour. - // TODO: remove? - remote_peer_id: PeerId, - - /// Whether we are the connection dialer or listener. Used only for logging purposes and - /// shouldn't have any influence on the behaviour. - // TODO: remove? - endpoint: Endpoint, - /// Substream that is open with the remote. substream: Option>, diff --git a/client/network/src/generic_proto/handler/notif_out.rs b/client/network/src/generic_proto/handler/notif_out.rs index 4f968eecd43de..6d0968d84a9b7 100644 --- a/client/network/src/generic_proto/handler/notif_out.rs +++ b/client/network/src/generic_proto/handler/notif_out.rs @@ -28,9 +28,17 @@ use libp2p::swarm::{ }; use log::error; use smallvec::SmallVec; -use std::{borrow::Cow, fmt, io, marker::PhantomData, mem, time::Duration}; +use std::{borrow::Cow, fmt, io, marker::PhantomData, mem, time::Duration, time::Instant}; use tokio_io::{AsyncRead, AsyncWrite}; +/// Maximum duration to open a substream and receive the handshake message. After that, we +/// consider that we failed to open the substream. +const OPEN_TIMEOUT: Duration = Duration::from_secs(10); +/// After successfully establishing a connection with the remote, we keep the connection open for +/// at least this amount of time in order to give the rest of the code the chance to notify us to +/// open substreams. +const INITIAL_KEEPALIVE_TIME: Duration = Duration::from_secs(5); + /// Implements the `IntoProtocolsHandler` trait of libp2p. /// /// Every time a connection with a remote starts, an instance of this struct is created and @@ -67,11 +75,10 @@ where DeniedUpgrade } - fn into_handler(self, remote_peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { + fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self::Handler { NotifsOutHandler { proto_name: self.proto_name, - endpoint: connected_point.to_endpoint(), - remote_peer_id: remote_peer_id.clone(), + when_connection_open: Instant::now(), state: State::Disabled, events_queue: SmallVec::new(), } @@ -90,19 +97,12 @@ pub struct NotifsOutHandler { /// Name of the protocol to negotiate. proto_name: Cow<'static, [u8]>, - /// Identifier of the node we're talking to. Used only for logging purposes and shouldn't have - /// any influence on the behaviour. - // TODO: remove? - remote_peer_id: PeerId, - - /// Whether we are the connection dialer or listener. Used only for logging purposes and - /// shouldn't have any influence on the behaviour. - // TODO: remove? - endpoint: Endpoint, - /// Relationship with the node we're connected to. state: State, + /// When the connection with the remote has been successfully established. + when_connection_open: Instant, + /// Queue of events to send to the outside. /// /// This queue must only ever be modified to insert elements at the back, or remove the first @@ -116,7 +116,6 @@ enum State { Disabled, /// The handler is disabled. A substream is open and needs to be closed. - // TODO: needed? DisabledOpen(NotificationsOutSubstream), /// The handler is disabled but we are still trying to open a substream with the remote. @@ -151,7 +150,6 @@ pub enum NotifsOutHandlerIn { /// Sends a message on the notifications substream. Ignored if the substream isn't open. /// /// It is only valid to send this if the handler has been enabled. - // TODO: is ignoring the correct way to do this? Send(Vec), } @@ -169,7 +167,7 @@ pub enum NotifsOutHandlerOut { /// We tried to open a notifications substream, but the remote refused it. /// - /// The handler is still enabled and will try again in a few seconds. + /// Can only happen if we're in a closed state. Refused, } @@ -242,7 +240,7 @@ where TSubstream: AsyncRead + AsyncWrite + Send + 'static { State::Disabled => { self.events_queue.push(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol: SubstreamProtocol::new(NotificationsOut::new(self.proto_name.clone())) - .with_timeout(Duration::from_secs(10)), // TODO: proper timeout config + .with_timeout(OPEN_TIMEOUT), info: (), }); self.state = State::Opening; @@ -277,14 +275,23 @@ where TSubstream: AsyncRead + AsyncWrite + Send + 'static { State::Disabled => {}, State::DisabledOpen(_) | State::Refused | State::Open(_) => error!("State mismatch in NotificationsOut"), - State::Opening => self.state = State::Refused, + State::Opening => { + self.state = State::Refused; + let ev = NotifsOutHandlerOut::Refused; + self.events_queue.push(ProtocolsHandlerEvent::Custom(ev)); + }, State::DisabledOpening => self.state = State::Disabled, State::Poisoned => error!("Notifications handler in a poisoned state"), } } fn connection_keep_alive(&self) -> KeepAlive { - KeepAlive::Yes // TODO: depends on state + match self.state { + State::Disabled | State::DisabledOpen(_) | State::DisabledOpening => + KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME), + State::Opening | State::Open(_) => KeepAlive::Yes, + State::Refused | State::Poisoned => KeepAlive::No, + } } fn poll( @@ -300,9 +307,27 @@ where TSubstream: AsyncRead + AsyncWrite + Send + 'static { } match &mut self.state { - State::Open(sub) | State::DisabledOpen(sub) => match sub.process() { + State::Open(sub) => match sub.process() { + Ok(()) => {}, + Err(err) => { + // We try to re-open a substream. + self.state = State::Opening; + self.events_queue.push(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(NotificationsOut::new(self.proto_name.clone())) + .with_timeout(OPEN_TIMEOUT), + info: (), + }); + let ev = NotifsOutHandlerOut::Closed; + return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(ev))); + } + }, + State::DisabledOpen(sub) => match sub.process() { Ok(()) => {}, - Err(err) => {}, // TODO: ? + Err(_) => { + self.state = State::Disabled; + let ev = NotifsOutHandlerOut::Closed; + return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(ev))); + }, }, _ => {} } From b30e0a687f637cba0a2b32ac115030866b737feb Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 20 Nov 2019 11:44:46 +0100 Subject: [PATCH 11/15] Delete consensus_gossip.rs --- .../network/src/protocol/consensus_gossip.rs | 920 ------------------ 1 file changed, 920 deletions(-) delete mode 100644 client/network/src/protocol/consensus_gossip.rs diff --git a/client/network/src/protocol/consensus_gossip.rs b/client/network/src/protocol/consensus_gossip.rs deleted file mode 100644 index 0fd20092c1d84..0000000000000 --- a/client/network/src/protocol/consensus_gossip.rs +++ /dev/null @@ -1,920 +0,0 @@ -// Copyright 2017-2019 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Utility for gossip of network messages between nodes. -//! Handles chain-specific and standard BFT messages. -//! -//! Gossip messages are separated by two categories: "topics" and consensus engine ID. -//! The consensus engine ID is sent over the wire with the message, while the topic is not, -//! with the expectation that the topic can be derived implicitly from the content of the -//! message, assuming it is valid. -//! -//! Topics are a single 32-byte tag associated with a message, used to group those messages -//! in an opaque way. Consensus code can invoke `broadcast_topic` to attempt to send all messages -//! under a single topic to all peers who don't have them yet, and `send_topic` to -//! send all messages under a single topic to a specific peer. -//! -//! Each consensus engine ID must have an associated, -//! registered `Validator` for all gossip messages. The primary role of this `Validator` is -//! to process incoming messages from peers, and decide whether to discard them or process -//! them. It also decides whether to re-broadcast the message. -//! -//! The secondary role of the `Validator` is to check if a message is allowed to be sent to a given -//! peer. All messages, before being sent, will be checked against this filter. -//! This enables the validator to use information it's aware of about connected peers to decide -//! whether to send messages to them at any given moment in time - In particular, to wait until -//! peers can accept and process the message before sending it. -//! -//! Lastly, the fact that gossip validators can decide not to rebroadcast messages -//! opens the door for neighbor status packets to be baked into the gossip protocol. -//! These status packets will typically contain light pieces of information -//! used to inform peers of a current view of protocol state. - -use std::collections::{HashMap, HashSet, hash_map::Entry}; -use std::sync::Arc; -use std::iter; -use std::time; -use log::{trace, debug}; -use futures03::channel::mpsc; -use lru_cache::LruCache; -use libp2p::PeerId; -use sr_primitives::traits::{Block as BlockT, Hash, HashFor}; -use sr_primitives::ConsensusEngineId; -pub use crate::message::generic::{Message, ConsensusMessage}; -use crate::protocol::Context; -use crate::config::Roles; - -// FIXME: Add additional spam/DoS attack protection: https://github.com/paritytech/substrate/issues/1115 -const KNOWN_MESSAGES_CACHE_SIZE: usize = 4096; - -const REBROADCAST_INTERVAL: time::Duration = time::Duration::from_secs(30); -/// Reputation change when a peer sends us a gossip message that we didn't know about. -const GOSSIP_SUCCESS_REPUTATION_CHANGE: i32 = 1 << 4; -/// Reputation change when a peer sends us a gossip message that we already knew about. -const DUPLICATE_GOSSIP_REPUTATION_CHANGE: i32 = -(1 << 2); -/// Reputation change when a peer sends us a gossip message for an unknown engine, whatever that -/// means. -const UNKNOWN_GOSSIP_REPUTATION_CHANGE: i32 = -(1 << 6); -/// Reputation change when a peer sends a message from a topic it isn't registered on. -const UNREGISTERED_TOPIC_REPUTATION_CHANGE: i32 = -(1 << 10); - -struct PeerConsensus { - known_messages: HashSet, - filtered_messages: HashMap, - roles: Roles, -} - -/// Topic stream message with sender. -#[derive(Debug, Eq, PartialEq)] -pub struct TopicNotification { - /// Message data. - pub message: Vec, - /// Sender if available. - pub sender: Option, -} - -struct MessageEntry { - message_hash: B::Hash, - topic: B::Hash, - message: ConsensusMessage, - sender: Option, -} - -/// Consensus message destination. -pub enum MessageRecipient { - /// Send to all peers. - BroadcastToAll, - /// Send to peers that don't have that message already. - BroadcastNew, - /// Send to specific peer. - Peer(PeerId), -} - -/// The reason for sending out the message. -#[derive(Eq, PartialEq, Copy, Clone)] -#[cfg_attr(test, derive(Debug))] -pub enum MessageIntent { - /// Requested broadcast. - Broadcast { - /// How many times this message was previously filtered by the gossip - /// validator when trying to propagate to a given peer. - previous_attempts: usize - }, - /// Requested broadcast to all peers. - ForcedBroadcast, - /// Periodic rebroadcast of all messages to all peers. - PeriodicRebroadcast, -} - -/// Message validation result. -pub enum ValidationResult { - /// Message should be stored and propagated under given topic. - ProcessAndKeep(H), - /// Message should be processed, but not propagated. - ProcessAndDiscard(H), - /// Message should be ignored. - Discard, -} - -impl MessageIntent { - fn broadcast() -> MessageIntent { - MessageIntent::Broadcast { previous_attempts: 0 } - } -} - -/// Validation context. Allows reacting to incoming messages by sending out further messages. -pub trait ValidatorContext { - /// Broadcast all messages with given topic to peers that do not have it yet. - fn broadcast_topic(&mut self, topic: B::Hash, force: bool); - /// Broadcast a message to all peers that have not received it previously. - fn broadcast_message(&mut self, topic: B::Hash, message: Vec, force: bool); - /// Send addressed message to a peer. - fn send_message(&mut self, who: &PeerId, message: Vec); - /// Send all messages with given topic to a peer. - fn send_topic(&mut self, who: &PeerId, topic: B::Hash, force: bool); -} - -struct NetworkContext<'g, 'p, B: BlockT> { - gossip: &'g mut ConsensusGossip, - protocol: &'p mut dyn Context, - engine_id: ConsensusEngineId, -} - -impl<'g, 'p, B: BlockT> ValidatorContext for NetworkContext<'g, 'p, B> { - /// Broadcast all messages with given topic to peers that do not have it yet. - fn broadcast_topic(&mut self, topic: B::Hash, force: bool) { - self.gossip.broadcast_topic(self.protocol, topic, force); - } - - /// Broadcast a message to all peers that have not received it previously. - fn broadcast_message(&mut self, topic: B::Hash, message: Vec, force: bool) { - self.gossip.multicast( - self.protocol, - topic, - ConsensusMessage{ data: message, engine_id: self.engine_id.clone() }, - force, - ); - } - - /// Send addressed message to a peer. - fn send_message(&mut self, who: &PeerId, message: Vec) { - self.protocol.send_consensus(who.clone(), vec![ConsensusMessage { - engine_id: self.engine_id, - data: message, - }]); - } - - /// Send all messages with given topic to a peer. - fn send_topic(&mut self, who: &PeerId, topic: B::Hash, force: bool) { - self.gossip.send_topic(self.protocol, who, topic, self.engine_id, force); - } -} - -fn propagate<'a, B: BlockT, I>( - protocol: &mut dyn Context, - messages: I, - intent: MessageIntent, - peers: &mut HashMap>, - validators: &HashMap>>, -) - where I: Clone + IntoIterator, // (msg_hash, topic, message) -{ - let mut check_fns = HashMap::new(); - let mut message_allowed = move |who: &PeerId, intent: MessageIntent, topic: &B::Hash, message: &ConsensusMessage| { - let engine_id = message.engine_id; - let check_fn = match check_fns.entry(engine_id) { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(vacant) => match validators.get(&engine_id) { - None => return false, // treat all messages with no validator as not allowed - Some(validator) => vacant.insert(validator.message_allowed()), - } - }; - - (check_fn)(who, intent, topic, &message.data) - }; - - for (id, ref mut peer) in peers.iter_mut() { - let mut batch = Vec::new(); - for (message_hash, topic, message) in messages.clone() { - let previous_attempts = peer.filtered_messages - .get(&message_hash) - .cloned() - .unwrap_or(0); - - let intent = match intent { - MessageIntent::Broadcast { .. } => - if peer.known_messages.contains(&message_hash) { - continue; - } else { - MessageIntent::Broadcast { previous_attempts } - }, - MessageIntent::PeriodicRebroadcast => - if peer.known_messages.contains(&message_hash) { - MessageIntent::PeriodicRebroadcast - } else { - // peer doesn't know message, so the logic should treat it as an - // initial broadcast. - MessageIntent::Broadcast { previous_attempts } - }, - other => other, - }; - - if !message_allowed(id, intent, &topic, &message) { - let count = peer.filtered_messages - .entry(message_hash.clone()) - .or_insert(0); - - *count += 1; - - continue; - } - - peer.filtered_messages.remove(message_hash); - peer.known_messages.insert(message_hash.clone()); - - trace!(target: "gossip", "Propagating to {}: {:?}", id, message); - batch.push(message.clone()) - } - protocol.send_consensus(id.clone(), batch); - } -} - -/// Validates consensus messages. -pub trait Validator: Send + Sync { - /// New peer is connected. - fn new_peer(&self, _context: &mut dyn ValidatorContext, _who: &PeerId, _roles: Roles) { - } - - /// New connection is dropped. - fn peer_disconnected(&self, _context: &mut dyn ValidatorContext, _who: &PeerId) { - } - - /// Validate consensus message. - fn validate( - &self, - context: &mut dyn ValidatorContext, - sender: &PeerId, - data: &[u8] - ) -> ValidationResult; - - /// Produce a closure for validating messages on a given topic. - fn message_expired<'a>(&'a self) -> Box bool + 'a> { - Box::new(move |_topic, _data| false) - } - - /// Produce a closure for filtering egress messages. - fn message_allowed<'a>(&'a self) -> Box bool + 'a> { - Box::new(move |_who, _intent, _topic, _data| true) - } -} - -/// Consensus network protocol handler. Manages statements and candidate requests. -pub struct ConsensusGossip { - peers: HashMap>, - live_message_sinks: HashMap<(ConsensusEngineId, B::Hash), Vec>>, - messages: Vec>, - known_messages: LruCache, - validators: HashMap>>, - next_broadcast: time::Instant, -} - -impl ConsensusGossip { - /// Create a new instance. - pub fn new() -> Self { - ConsensusGossip { - peers: HashMap::new(), - live_message_sinks: HashMap::new(), - messages: Default::default(), - known_messages: LruCache::new(KNOWN_MESSAGES_CACHE_SIZE), - validators: Default::default(), - next_broadcast: time::Instant::now() + REBROADCAST_INTERVAL, - } - } - - /// Closes all notification streams. - pub fn abort(&mut self) { - self.live_message_sinks.clear(); - } - - /// Register message validator for a message type. - pub fn register_validator( - &mut self, - protocol: &mut dyn Context, - engine_id: ConsensusEngineId, - validator: Arc> - ) { - self.register_validator_internal(engine_id, validator.clone()); - let peers: Vec<_> = self.peers.iter().map(|(id, peer)| (id.clone(), peer.roles)).collect(); - for (id, roles) in peers { - let mut context = NetworkContext { gossip: self, protocol, engine_id: engine_id.clone() }; - validator.new_peer(&mut context, &id, roles); - } - } - - fn register_validator_internal(&mut self, engine_id: ConsensusEngineId, validator: Arc>) { - self.validators.insert(engine_id, validator.clone()); - } - - /// Handle new connected peer. - pub fn new_peer(&mut self, protocol: &mut dyn Context, who: PeerId, roles: Roles) { - // light nodes are not valid targets for consensus gossip messages - if !roles.is_full() { - return; - } - - trace!(target:"gossip", "Registering {:?} {}", roles, who); - self.peers.insert(who.clone(), PeerConsensus { - known_messages: HashSet::new(), - filtered_messages: HashMap::new(), - roles, - }); - for (engine_id, v) in self.validators.clone() { - let mut context = NetworkContext { gossip: self, protocol, engine_id: engine_id.clone() }; - v.new_peer(&mut context, &who, roles); - } - } - - fn register_message_hashed( - &mut self, - message_hash: B::Hash, - topic: B::Hash, - message: ConsensusMessage, - sender: Option, - ) { - if self.known_messages.insert(message_hash.clone(), ()).is_none() { - self.messages.push(MessageEntry { - message_hash, - topic, - message, - sender, - }); - } - } - - /// Registers a message without propagating it to any peers. The message - /// becomes available to new peers or when the service is asked to gossip - /// the message's topic. No validation is performed on the message, if the - /// message is already expired it should be dropped on the next garbage - /// collection. - pub fn register_message( - &mut self, - topic: B::Hash, - message: ConsensusMessage, - ) { - let message_hash = HashFor::::hash(&message.data[..]); - self.register_message_hashed(message_hash, topic, message, None); - } - - /// Call when a peer has been disconnected to stop tracking gossip status. - pub fn peer_disconnected(&mut self, protocol: &mut dyn Context, who: PeerId) { - for (engine_id, v) in self.validators.clone() { - let mut context = NetworkContext { gossip: self, protocol, engine_id: engine_id.clone() }; - v.peer_disconnected(&mut context, &who); - } - } - - /// Perform periodic maintenance - pub fn tick(&mut self, protocol: &mut dyn Context) { - self.collect_garbage(); - if time::Instant::now() >= self.next_broadcast { - self.rebroadcast(protocol); - self.next_broadcast = time::Instant::now() + REBROADCAST_INTERVAL; - } - } - - /// Rebroadcast all messages to all peers. - fn rebroadcast(&mut self, protocol: &mut dyn Context) { - let messages = self.messages.iter() - .map(|entry| (&entry.message_hash, &entry.topic, &entry.message)); - propagate(protocol, messages, MessageIntent::PeriodicRebroadcast, &mut self.peers, &self.validators); - } - - /// Broadcast all messages with given topic. - pub fn broadcast_topic(&mut self, protocol: &mut dyn Context, topic: B::Hash, force: bool) { - let messages = self.messages.iter() - .filter_map(|entry| - if entry.topic == topic { Some((&entry.message_hash, &entry.topic, &entry.message)) } else { None } - ); - let intent = if force { MessageIntent::ForcedBroadcast } else { MessageIntent::broadcast() }; - propagate(protocol, messages, intent, &mut self.peers, &self.validators); - } - - /// Prune old or no longer relevant consensus messages. Provide a predicate - /// for pruning, which returns `false` when the items with a given topic should be pruned. - pub fn collect_garbage(&mut self) { - self.live_message_sinks.retain(|_, sinks| { - sinks.retain(|sink| !sink.is_closed()); - !sinks.is_empty() - }); - - let known_messages = &mut self.known_messages; - let before = self.messages.len(); - let validators = &self.validators; - - let mut check_fns = HashMap::new(); - let mut message_expired = move |entry: &MessageEntry| { - let engine_id = entry.message.engine_id; - let check_fn = match check_fns.entry(engine_id) { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(vacant) => match validators.get(&engine_id) { - None => return true, // treat all messages with no validator as expired - Some(validator) => vacant.insert(validator.message_expired()), - } - }; - - (check_fn)(entry.topic, &entry.message.data) - }; - - self.messages.retain(|entry| !message_expired(entry)); - - trace!(target: "gossip", "Cleaned up {} stale messages, {} left ({} known)", - before - self.messages.len(), - self.messages.len(), - known_messages.len(), - ); - - for (_, ref mut peer) in self.peers.iter_mut() { - peer.known_messages.retain(|h| known_messages.contains_key(h)); - peer.filtered_messages.retain(|h, _| known_messages.contains_key(h)); - } - } - - /// Get data of valid, incoming messages for a topic (but might have expired meanwhile) - pub fn messages_for(&mut self, engine_id: ConsensusEngineId, topic: B::Hash) - -> mpsc::UnboundedReceiver - { - let (tx, rx) = mpsc::unbounded(); - for entry in self.messages.iter_mut() - .filter(|e| e.topic == topic && e.message.engine_id == engine_id) - { - tx.unbounded_send(TopicNotification { - message: entry.message.data.clone(), - sender: entry.sender.clone(), - }) - .expect("receiver known to be live; qed"); - } - - self.live_message_sinks.entry((engine_id, topic)).or_default().push(tx); - - rx - } - - /// Handle an incoming ConsensusMessage for topic by who via protocol. Discard message if topic - /// already known, the message is old, its source peers isn't a registered peer or the connection - /// to them is broken. Return `Some(topic, message)` if it was added to the internal queue, `None` - /// in all other cases. - pub fn on_incoming( - &mut self, - protocol: &mut dyn Context, - who: PeerId, - messages: Vec, - ) { - trace!(target:"gossip", "Received {} messages from peer {}", messages.len(), who); - for message in messages { - let message_hash = HashFor::::hash(&message.data[..]); - - if self.known_messages.contains_key(&message_hash) { - trace!(target:"gossip", "Ignored already known message from {}", who); - protocol.report_peer(who.clone(), DUPLICATE_GOSSIP_REPUTATION_CHANGE); - continue; - } - - let engine_id = message.engine_id; - // validate the message - let validation = self.validators.get(&engine_id) - .cloned() - .map(|v| { - let mut context = NetworkContext { gossip: self, protocol, engine_id }; - v.validate(&mut context, &who, &message.data) - }); - - let validation_result = match validation { - Some(ValidationResult::ProcessAndKeep(topic)) => Some((topic, true)), - Some(ValidationResult::ProcessAndDiscard(topic)) => Some((topic, false)), - Some(ValidationResult::Discard) => None, - None => { - trace!(target:"gossip", "Unknown message engine id {:?} from {}", engine_id, who); - protocol.report_peer(who.clone(), UNKNOWN_GOSSIP_REPUTATION_CHANGE); - protocol.disconnect_peer(who.clone()); - continue; - } - }; - - if let Some((topic, keep)) = validation_result { - protocol.report_peer(who.clone(), GOSSIP_SUCCESS_REPUTATION_CHANGE); - if let Some(ref mut peer) = self.peers.get_mut(&who) { - peer.known_messages.insert(message_hash); - if let Entry::Occupied(mut entry) = self.live_message_sinks.entry((engine_id, topic)) { - debug!(target: "gossip", "Pushing consensus message to sinks for {}.", topic); - entry.get_mut().retain(|sink| { - if let Err(e) = sink.unbounded_send(TopicNotification { - message: message.data.clone(), - sender: Some(who.clone()) - }) { - trace!(target: "gossip", "Error broadcasting message notification: {:?}", e); - } - !sink.is_closed() - }); - if entry.get().is_empty() { - entry.remove_entry(); - } - } - if keep { - self.register_message_hashed(message_hash, topic, message, Some(who.clone())); - } - } else { - trace!(target:"gossip", "Ignored statement from unregistered peer {}", who); - protocol.report_peer(who.clone(), UNREGISTERED_TOPIC_REPUTATION_CHANGE); - } - } else { - trace!(target:"gossip", "Handled valid one hop message from peer {}", who); - } - } - } - - /// Send all messages with given topic to a peer. - pub fn send_topic( - &mut self, - protocol: &mut dyn Context, - who: &PeerId, - topic: B::Hash, - engine_id: ConsensusEngineId, - force: bool - ) { - let validator = self.validators.get(&engine_id); - let mut message_allowed = match validator { - None => return, // treat all messages with no validator as not allowed - Some(validator) => validator.message_allowed(), - }; - - if let Some(ref mut peer) = self.peers.get_mut(who) { - let mut batch = Vec::new(); - for entry in self.messages.iter().filter(|m| m.topic == topic && m.message.engine_id == engine_id) { - let intent = if force { - MessageIntent::ForcedBroadcast - } else { - let previous_attempts = peer.filtered_messages - .get(&entry.message_hash) - .cloned() - .unwrap_or(0); - - MessageIntent::Broadcast { previous_attempts } - }; - - if !force && peer.known_messages.contains(&entry.message_hash) { - continue; - } - - if !message_allowed(who, intent, &entry.topic, &entry.message.data) { - let count = peer.filtered_messages - .entry(entry.message_hash) - .or_insert(0); - - *count += 1; - - continue; - } - - peer.filtered_messages.remove(&entry.message_hash); - peer.known_messages.insert(entry.message_hash.clone()); - - trace!(target: "gossip", "Sending topic message to {}: {:?}", who, entry.message); - batch.push(ConsensusMessage { - engine_id: engine_id.clone(), - data: entry.message.data.clone(), - }); - } - protocol.send_consensus(who.clone(), batch); - } - } - - /// Multicast a message to all peers. - pub fn multicast( - &mut self, - protocol: &mut dyn Context, - topic: B::Hash, - message: ConsensusMessage, - force: bool, - ) { - let message_hash = HashFor::::hash(&message.data); - self.register_message_hashed(message_hash, topic, message.clone(), None); - let intent = if force { MessageIntent::ForcedBroadcast } else { MessageIntent::broadcast() }; - propagate(protocol, iter::once((&message_hash, &topic, &message)), intent, &mut self.peers, &self.validators); - } - - /// Send addressed message to a peer. The message is not kept or multicast - /// later on. - pub fn send_message( - &mut self, - protocol: &mut dyn Context, - who: &PeerId, - message: ConsensusMessage, - ) { - let peer = match self.peers.get_mut(who) { - None => return, - Some(peer) => peer, - }; - - let message_hash = HashFor::::hash(&message.data); - - trace!(target: "gossip", "Sending direct to {}: {:?}", who, message); - - peer.filtered_messages.remove(&message_hash); - peer.known_messages.insert(message_hash); - protocol.send_consensus(who.clone(), vec![message.clone()]); - } -} - -/// A gossip message validator that discards all messages. -pub struct DiscardAll; - -impl Validator for DiscardAll { - fn validate( - &self, - _context: &mut dyn ValidatorContext, - _sender: &PeerId, - _data: &[u8], - ) -> ValidationResult { - ValidationResult::Discard - } - - fn message_expired<'a>(&'a self) -> Box bool + 'a> { - Box::new(move |_topic, _data| true) - } - - fn message_allowed<'a>(&'a self) -> Box bool + 'a> { - Box::new(move |_who, _intent, _topic, _data| false) - } -} - -#[cfg(test)] -mod tests { - use std::sync::{Arc, atomic::{AtomicBool, Ordering}}; - use parking_lot::Mutex; - use sr_primitives::testing::{H256, Block as RawBlock, ExtrinsicWrapper}; - use futures03::executor::block_on_stream; - - use super::*; - - type Block = RawBlock>; - - macro_rules! push_msg { - ($consensus:expr, $topic:expr, $hash: expr, $m:expr) => { - if $consensus.known_messages.insert($hash, ()).is_none() { - $consensus.messages.push(MessageEntry { - message_hash: $hash, - topic: $topic, - message: ConsensusMessage { data: $m, engine_id: [0, 0, 0, 0]}, - sender: None, - }); - } - } - } - - struct AllowAll; - impl Validator for AllowAll { - fn validate( - &self, - _context: &mut dyn ValidatorContext, - _sender: &PeerId, - _data: &[u8], - ) -> ValidationResult { - ValidationResult::ProcessAndKeep(H256::default()) - } - } - - #[test] - fn collects_garbage() { - struct AllowOne; - impl Validator for AllowOne { - fn validate( - &self, - _context: &mut dyn ValidatorContext, - _sender: &PeerId, - data: &[u8], - ) -> ValidationResult { - if data[0] == 1 { - ValidationResult::ProcessAndKeep(H256::default()) - } else { - ValidationResult::Discard - } - } - - fn message_expired<'a>(&'a self) -> Box bool + 'a> { - Box::new(move |_topic, data| data[0] != 1) - } - } - - let prev_hash = H256::random(); - let best_hash = H256::random(); - let mut consensus = ConsensusGossip::::new(); - let m1_hash = H256::random(); - let m2_hash = H256::random(); - let m1 = vec![1, 2, 3]; - let m2 = vec![4, 5, 6]; - - push_msg!(consensus, prev_hash, m1_hash, m1); - push_msg!(consensus, best_hash, m2_hash, m2); - consensus.known_messages.insert(m1_hash, ()); - consensus.known_messages.insert(m2_hash, ()); - - let test_engine_id = Default::default(); - consensus.register_validator_internal(test_engine_id, Arc::new(AllowAll)); - consensus.collect_garbage(); - assert_eq!(consensus.messages.len(), 2); - assert_eq!(consensus.known_messages.len(), 2); - - consensus.register_validator_internal(test_engine_id, Arc::new(AllowOne)); - - // m2 is expired - consensus.collect_garbage(); - assert_eq!(consensus.messages.len(), 1); - // known messages are only pruned based on size. - assert_eq!(consensus.known_messages.len(), 2); - assert!(consensus.known_messages.contains_key(&m2_hash)); - } - - #[test] - fn message_stream_include_those_sent_before_asking_for_stream() { - let mut consensus = ConsensusGossip::::new(); - consensus.register_validator_internal([0, 0, 0, 0], Arc::new(AllowAll)); - - let message = ConsensusMessage { data: vec![4, 5, 6], engine_id: [0, 0, 0, 0] }; - let topic = HashFor::::hash(&[1,2,3]); - - consensus.register_message(topic, message.clone()); - let mut stream = block_on_stream(consensus.messages_for([0, 0, 0, 0], topic)); - - assert_eq!(stream.next(), Some(TopicNotification { message: message.data, sender: None })); - } - - #[test] - fn can_keep_multiple_messages_per_topic() { - let mut consensus = ConsensusGossip::::new(); - - let topic = [1; 32].into(); - let msg_a = ConsensusMessage { data: vec![1, 2, 3], engine_id: [0, 0, 0, 0] }; - let msg_b = ConsensusMessage { data: vec![4, 5, 6], engine_id: [0, 0, 0, 0] }; - - consensus.register_message(topic, msg_a); - consensus.register_message(topic, msg_b); - - assert_eq!(consensus.messages.len(), 2); - } - - #[test] - fn can_keep_multiple_subscribers_per_topic() { - let mut consensus = ConsensusGossip::::new(); - consensus.register_validator_internal([0, 0, 0, 0], Arc::new(AllowAll)); - - let data = vec![4, 5, 6]; - let message = ConsensusMessage { data: data.clone(), engine_id: [0, 0, 0, 0] }; - let topic = HashFor::::hash(&[1, 2, 3]); - - consensus.register_message(topic, message.clone()); - - let mut stream1 = block_on_stream(consensus.messages_for([0, 0, 0, 0], topic)); - let mut stream2 = block_on_stream(consensus.messages_for([0, 0, 0, 0], topic)); - - assert_eq!(stream1.next(), Some(TopicNotification { message: data.clone(), sender: None })); - assert_eq!(stream2.next(), Some(TopicNotification { message: data, sender: None })); - } - - #[test] - fn topics_are_localized_to_engine_id() { - let mut consensus = ConsensusGossip::::new(); - consensus.register_validator_internal([0, 0, 0, 0], Arc::new(AllowAll)); - - let topic = [1; 32].into(); - let msg_a = ConsensusMessage { data: vec![1, 2, 3], engine_id: [0, 0, 0, 0] }; - let msg_b = ConsensusMessage { data: vec![4, 5, 6], engine_id: [0, 0, 0, 1] }; - - consensus.register_message(topic, msg_a); - consensus.register_message(topic, msg_b); - - let mut stream = block_on_stream(consensus.messages_for([0, 0, 0, 0], topic)); - - assert_eq!(stream.next(), Some(TopicNotification { message: vec![1, 2, 3], sender: None })); - - let _ = consensus.live_message_sinks.remove(&([0, 0, 0, 0], topic)); - assert_eq!(stream.next(), None); - } - - #[test] - fn keeps_track_of_broadcast_attempts() { - struct DummyNetworkContext; - impl Context for DummyNetworkContext { - fn report_peer(&mut self, _who: PeerId, _reputation: i32) {} - fn disconnect_peer(&mut self, _who: PeerId) {} - fn send_consensus(&mut self, _who: PeerId, _consensus: Vec) {} - fn send_chain_specific(&mut self, _who: PeerId, _message: Vec) {} - } - - // A mock gossip validator that never expires any message, allows - // setting whether messages should be allowed and keeps track of any - // messages passed to `message_allowed`. - struct MockValidator { - allow: AtomicBool, - messages: Arc, MessageIntent)>>>, - } - - impl MockValidator { - fn new() -> MockValidator { - MockValidator { - allow: AtomicBool::new(false), - messages: Arc::new(Mutex::new(Vec::new())), - } - } - } - - impl Validator for MockValidator { - fn validate( - &self, - _context: &mut dyn ValidatorContext, - _sender: &PeerId, - _data: &[u8], - ) -> ValidationResult { - ValidationResult::ProcessAndKeep(H256::default()) - } - - fn message_expired<'a>(&'a self) -> Box bool + 'a> { - Box::new(move |_topic, _data| false) - } - - fn message_allowed<'a>(&'a self) -> Box bool + 'a> { - let messages = self.messages.clone(); - Box::new(move |_, intent, _, data| { - messages.lock().push((data.to_vec(), intent)); - self.allow.load(Ordering::SeqCst) - }) - } - } - - // we setup an instance of the mock gossip validator, add a new peer to - // it and register a message. - let mut consensus = ConsensusGossip::::new(); - let validator = Arc::new(MockValidator::new()); - consensus.register_validator_internal([0, 0, 0, 0], validator.clone()); - consensus.new_peer( - &mut DummyNetworkContext, - PeerId::random(), - Roles::AUTHORITY, - ); - - let data = vec![1, 2, 3]; - let msg = ConsensusMessage { data: data.clone(), engine_id: [0, 0, 0, 0] }; - consensus.register_message(H256::default(), msg); - - // tick the gossip handler and make sure it triggers a message rebroadcast - let mut tick = || { - consensus.next_broadcast = std::time::Instant::now(); - consensus.tick(&mut DummyNetworkContext); - }; - - // by default we won't allow the message we registered, so everytime we - // tick the gossip handler, the message intent should be kept as - // `Broadcast` but the previous attempts should be incremented. - tick(); - assert_eq!( - validator.messages.lock().pop().unwrap(), - (data.clone(), MessageIntent::Broadcast { previous_attempts: 0 }), - ); - - tick(); - assert_eq!( - validator.messages.lock().pop().unwrap(), - (data.clone(), MessageIntent::Broadcast { previous_attempts: 1 }), - ); - - // we set the validator to allow the message to go through - validator.allow.store(true, Ordering::SeqCst); - - // we still get the same message intent but it should be delivered now - tick(); - assert_eq!( - validator.messages.lock().pop().unwrap(), - (data.clone(), MessageIntent::Broadcast { previous_attempts: 2 }), - ); - - // ticking the gossip handler again the message intent should change to - // `PeriodicRebroadcast` since it was sent. - tick(); - assert_eq!( - validator.messages.lock().pop().unwrap(), - (data.clone(), MessageIntent::PeriodicRebroadcast), - ); - } -} From d2e64807ef362ddab5562bd7c2692953f8184f1e Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 20 Nov 2019 11:53:53 +0100 Subject: [PATCH 12/15] handshake -> handshake_msg --- client/network-gossip/src/lib.rs | 6 +++--- client/network/src/generic_proto/behaviour.rs | 4 ++-- client/network/src/protocol.rs | 4 ++-- client/network/src/service.rs | 10 +++++----- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/client/network-gossip/src/lib.rs b/client/network-gossip/src/lib.rs index 1b44483659e62..0cfbc9621bbd5 100644 --- a/client/network-gossip/src/lib.rs +++ b/client/network-gossip/src/lib.rs @@ -47,7 +47,7 @@ pub trait Network { &self, proto_name: impl Into>, engine_id: ConsensusEngineId, - handshake: impl Into> + handshake_msg: impl Into> ); } @@ -77,8 +77,8 @@ impl, H: ExHashT> Network for Arc>, engine_id: ConsensusEngineId, - handshake: impl Into> + handshake_msg: impl Into> ) { - NetworkService::register_notif_protocol(self, proto_name, engine_id, handshake) + NetworkService::register_notif_protocol(self, proto_name, engine_id, handshake_msg) } } diff --git a/client/network/src/generic_proto/behaviour.rs b/client/network/src/generic_proto/behaviour.rs index dba9993a72553..04d210383e1ff 100644 --- a/client/network/src/generic_proto/behaviour.rs +++ b/client/network/src/generic_proto/behaviour.rs @@ -270,9 +270,9 @@ impl GenericProto { pub fn register_notif_protocol( &mut self, proto_name: impl Into>, - handshake: impl Into> + handshake_msg: impl Into> ) { - self.notif_protocols.push((proto_name.into(), handshake.into())); + self.notif_protocols.push((proto_name.into(), handshake_msg.into())); } /// Returns a list of all the notification protocols that have been registered. diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 4b66fd7cbdd28..e2feacb087646 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1078,13 +1078,13 @@ impl, H: ExHashT> Protocol { &mut self, proto_name: impl Into>, engine_id: ConsensusEngineId, - handshake: impl Into>, + handshake_msg: impl Into>, ) { let proto_name = proto_name.into(); if self.protocol_name_by_engine.insert(engine_id, proto_name.clone()).is_some() { error!(target: "sub-libp2p", "Notifications protocol already registered: {:?}", proto_name); } else { - self.behaviour.register_notif_protocol(proto_name.clone(), handshake); + self.behaviour.register_notif_protocol(proto_name.clone(), handshake_msg); self.protocol_engine_by_name.insert(proto_name, engine_id); } } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 6f77c31c8466f..7c6c24145a1e4 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -463,12 +463,12 @@ impl, H: ExHashT> NetworkServic &self, proto_name: impl Into>, engine_id: ConsensusEngineId, - handshake: impl Into> + handshake_msg: impl Into> ) { let _ = self.to_worker.unbounded_send(ServerToWorkerMsg::RegisterNotifProtocol { proto_name: proto_name.into(), engine_id, - handshake: handshake.into(), + handshake_msg: handshake_msg.into(), }); } @@ -697,7 +697,7 @@ enum ServerToWorkerMsg> { RegisterNotifProtocol { proto_name: Cow<'static, [u8]>, engine_id: ConsensusEngineId, - handshake: Vec, + handshake_msg: Vec, }, DisconnectPeer(PeerId), } @@ -779,9 +779,9 @@ impl, H: ExHashT> Stream for Ne self.events_streams.push(sender), ServerToWorkerMsg::WriteNotification { message, proto_name, target } => self.network_service.user_protocol_mut().write_notification(target, proto_name, message), - ServerToWorkerMsg::RegisterNotifProtocol { proto_name, engine_id, handshake } => + ServerToWorkerMsg::RegisterNotifProtocol { proto_name, engine_id, handshake_msg } => self.network_service.user_protocol_mut() - .register_notif_protocol(proto_name, engine_id, handshake), + .register_notif_protocol(proto_name, engine_id, handshake_msg), ServerToWorkerMsg::DisconnectPeer(who) => self.network_service.user_protocol_mut().disconnect_peer(&who), } From 0c7da32710cbe0bfbbc0840640cc0791c9a0acd8 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 20 Nov 2019 11:59:34 +0100 Subject: [PATCH 13/15] events_streams -> event_streams --- client/network/src/service.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 7c6c24145a1e4..f4774bd209e97 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -280,7 +280,7 @@ impl, H: ExHashT> NetworkWorker import_queue: params.import_queue, from_worker, light_client_rqs: params.on_demand.and_then(|od| od.extract_receiver()), - events_streams: Vec::new(), + event_streams: Vec::new(), }) } @@ -724,7 +724,7 @@ pub struct NetworkWorker, H: Ex /// Receiver for queries from the light client that must be processed. light_client_rqs: Option>>, /// Senders for events that happen on the network. - events_streams: Vec>, + event_streams: Vec>, } impl, H: ExHashT> Stream for NetworkWorker { @@ -776,7 +776,7 @@ impl, H: ExHashT> Stream for Ne ServerToWorkerMsg::SyncFork(peer_ids, hash, number) => self.network_service.user_protocol_mut().set_sync_fork_request(peer_ids, &hash, number), ServerToWorkerMsg::EventsStream(sender) => - self.events_streams.push(sender), + self.event_streams.push(sender), ServerToWorkerMsg::WriteNotification { message, proto_name, target } => self.network_service.user_protocol_mut().write_notification(target, proto_name, message), ServerToWorkerMsg::RegisterNotifProtocol { proto_name, engine_id, handshake_msg } => @@ -800,7 +800,7 @@ impl, H: ExHashT> Stream for Ne Ok(Async::Ready(Some(BehaviourOut::FinalityProofImport(origin, hash, nb, proof)))) => self.import_queue.import_finality_proof(origin, hash, nb, proof), Ok(Async::Ready(Some(BehaviourOut::Event(ev)))) => { - self.events_streams.retain(|sender| sender.unbounded_send(ev.clone()).is_ok()); + self.event_streams.retain(|sender| sender.unbounded_send(ev.clone()).is_ok()); return Ok(Async::Ready(Some(ev))); }, Ok(Async::Ready(None)) => {}, From ff7458f002a6ed3f1f1fa3479b7cf11e313ada4f Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 20 Nov 2019 12:04:52 +0100 Subject: [PATCH 14/15] Fix the TODOs in notif_in --- .../src/generic_proto/handler/notif_in.rs | 42 +++++-------------- 1 file changed, 11 insertions(+), 31 deletions(-) diff --git a/client/network/src/generic_proto/handler/notif_in.rs b/client/network/src/generic_proto/handler/notif_in.rs index 0464b61bde05f..58f678c4eba68 100644 --- a/client/network/src/generic_proto/handler/notif_in.rs +++ b/client/network/src/generic_proto/handler/notif_in.rs @@ -140,7 +140,7 @@ where TSubstream: AsyncRead + AsyncWrite + 'static { type InEvent = NotifsInHandlerIn; type OutEvent = NotifsInHandlerOut; type Substream = TSubstream; - type Error = ConnectionKillError; + type Error = void::Void; type InboundProtocol = NotificationsIn; type OutboundProtocol = DeniedUpgrade; type OutboundOpenInfo = (); @@ -196,16 +196,7 @@ where TSubstream: AsyncRead + AsyncWrite + 'static { } fn inject_dial_upgrade_error(&mut self, _: (), err: ProtocolsHandlerUpgrErr) { - unimplemented!() // TODO: - /*let is_severe = match err { - ProtocolsHandlerUpgrErr::Upgrade(_) => true, - _ => false, - }; - - self.events_queue.push(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::ProtocolError { - is_severe, - error: Box::new(err), - }));*/ + error!(target: "sub-libp2p", "Received dial upgrade error in inbound-only handler"); } fn connection_keep_alive(&self) -> KeepAlive { @@ -228,13 +219,15 @@ where TSubstream: AsyncRead + AsyncWrite + 'static { return Ok(Async::Ready(event)) } - if let Some(substream) = self.substream.as_mut() { - match substream.poll() { - Ok(Async::Ready(Some(msg))) => - return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(msg)))), - Ok(Async::NotReady) => {}, - Ok(Async::Ready(None)) | Err(_) => return Err(ConnectionKillError), // TODO: ? - } + match self.substream.as_mut().map(|s| s.poll()) { + None => {}, + Some(Ok(Async::Ready(Some(msg)))) => + return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(msg)))), + Some(Ok(Async::NotReady)) => {}, + Some(Ok(Async::Ready(None))) | Some(Err(_)) => { + self.substream = None; + return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed))); + }, } Ok(Async::NotReady) @@ -250,16 +243,3 @@ where .finish() } } - -// TODO: remove -#[derive(Debug)] -pub struct ConnectionKillError; - -impl error::Error for ConnectionKillError { -} - -impl fmt::Display for ConnectionKillError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - unimplemented!() // TODO: - } -} From 83999cac4470c0a4f4b1e474d649f092a7126d96 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 20 Nov 2019 12:29:15 +0100 Subject: [PATCH 15/15] Log when the wrong function is called --- client/network-gossip/src/bridge.rs | 7 ++++-- client/network/src/protocol.rs | 34 +++++------------------------ 2 files changed, 10 insertions(+), 31 deletions(-) diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 3e49a08051e35..82f3a879db560 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -247,8 +247,11 @@ impl Context for ContextOverService { } } - fn send_chain_specific(&mut self, who: PeerId, message: Vec) { - unreachable!() // TODO: handle that + fn send_chain_specific(&mut self, _: PeerId, _: Vec) { + log::error!( + target: "sub-libp2p", + "send_chain_specific has been called in a context where it shouldn't" + ); } } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index e2feacb087646..723a50b0cfc5d 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -338,35 +338,11 @@ impl<'a, B: BlockT + 'a, H: ExHashT + 'a> Context for ProtocolContext<'a, B, self.behaviour.disconnect_peer(&who) } - fn send_consensus(&mut self, who: PeerId, messages: Vec) { - panic!(); // TODO: shouldn't be reached - - if self.context_data.peers.get(&who).map_or(false, |peer| peer.info.protocol_version > 4) { - let mut batch = Vec::new(); - let len = messages.len(); - for (index, message) in messages.into_iter().enumerate() { - batch.reserve(MAX_CONSENSUS_MESSAGES); - batch.push(message); - if batch.len() == MAX_CONSENSUS_MESSAGES || index == len - 1 { - send_message:: ( - self.behaviour, - &mut self.context_data.stats, - &who, - GenericMessage::ConsensusBatch(std::mem::replace(&mut batch, Vec::new())), - ) - } - } - } else { - // Backwards compatibility - for message in messages { - send_message:: ( - self.behaviour, - &mut self.context_data.stats, - &who, - GenericMessage::Consensus(message) - ) - } - } + fn send_consensus(&mut self, _: PeerId, _: Vec) { + error!( + target: "sub-libp2p", + "send_consensus has been called in a context where it shouldn't" + ); } fn send_chain_specific(&mut self, who: PeerId, message: Vec) {