Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions src/protocol/libp2p/ping/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.

use std::time::Duration;
use crate::{
codec::ProtocolCodec, protocol::libp2p::ping::PingEvent, types::protocol::ProtocolName,
DEFAULT_CHANNEL_SIZE,
Expand All @@ -36,6 +37,8 @@ const PING_PAYLOAD_SIZE: usize = 32;
/// Maximum PING failures.
const MAX_FAILURES: usize = 3;

pub const PING_INTERVAL: Duration = Duration::from_secs(15);

/// Ping configuration.
pub struct Config {
/// Protocol name.
Expand All @@ -49,6 +52,8 @@ pub struct Config {

/// TX channel for sending events to the user protocol.
pub(crate) tx_event: Sender<PingEvent>,

pub(crate) ping_interval: Duration,
}

impl Config {
Expand All @@ -61,6 +66,7 @@ impl Config {
(
Self {
tx_event,
ping_interval: PING_INTERVAL,
max_failures: MAX_FAILURES,
protocol: ProtocolName::from(PROTOCOL_NAME),
codec: ProtocolCodec::Identity(PING_PAYLOAD_SIZE),
Expand All @@ -80,6 +86,7 @@ pub struct ConfigBuilder {

/// Maximum failures before the peer is considered unreachable.
max_failures: usize,
ping_interval: Duration,
}

impl Default for ConfigBuilder {
Expand All @@ -92,6 +99,7 @@ impl ConfigBuilder {
/// Create new default [`Config`] which can be modified by the user.
pub fn new() -> Self {
Self {
ping_interval: PING_INTERVAL,
max_failures: MAX_FAILURES,
protocol: ProtocolName::from(PROTOCOL_NAME),
codec: ProtocolCodec::Identity(PING_PAYLOAD_SIZE),
Expand All @@ -104,13 +112,19 @@ impl ConfigBuilder {
self
}

pub fn with_ping_interval(mut self, ping_interval: Duration) -> Self {
self.ping_interval = ping_interval;
self
}

/// Build [`Config`].
pub fn build(self) -> (Config, Box<dyn Stream<Item = PingEvent> + Send + Unpin>) {
let (tx_event, rx_event) = channel(DEFAULT_CHANNEL_SIZE);

(
Config {
tx_event,
ping_interval: self.ping_interval,
max_failures: self.max_failures,
protocol: self.protocol,
codec: self.codec,
Expand Down
185 changes: 79 additions & 106 deletions src/protocol/libp2p/ping/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,26 +24,27 @@ use crate::{
error::{Error, SubstreamError},
protocol::{Direction, TransportEvent, TransportService},
substream::Substream,
types::SubstreamId,
PeerId,
};

use futures::{future::BoxFuture, stream::FuturesUnordered, StreamExt};
use tokio::sync::mpsc::Sender;

use futures::{SinkExt, StreamExt};
use std::{
collections::HashSet,
collections::HashMap,
time::{Duration, Instant},
};
use bytes::Bytes;
use futures::stream::SplitSink;
use tokio::sync::mpsc;
use tokio_stream::StreamMap;

pub use config::{Config, ConfigBuilder};

mod config;

// TODO: https://github.com/paritytech/litep2p/issues/132 let the user handle max failures

/// Log target for the file.
const LOG_TARGET: &str = "litep2p::ipfs::ping";
const PING_TIMEOUT: Duration = Duration::from_secs(10);

/// Events emitted by the ping protocol.
#[derive(Debug)]
Expand All @@ -60,23 +61,24 @@ pub enum PingEvent {

/// Ping protocol.
pub(crate) struct Ping {
/// Maximum failures before the peer is considered unreachable.
_max_failures: usize,

// Connection service.
service: TransportService,

/// TX channel for sending events to the user protocol.
tx: Sender<PingEvent>,
tx: mpsc::Sender<PingEvent>,

/// Connected peers.
peers: HashSet<PeerId>,
/// Inbound: The "Listening" half of the substreams.
/// StreamMap handles polling all of them efficiently.
read_streams: StreamMap<PeerId, futures::stream::SplitStream<Substream>>,

/// Pending outbound substreams.
pending_outbound: FuturesUnordered<BoxFuture<'static, crate::Result<(PeerId, Duration)>>>,
/// Outbound: The "Writing" half of the substreams.
/// We look these up when the timer ticks to send a Ping.
write_sinks: HashMap<PeerId, SplitSink<Substream, Bytes>>,

/// Pending inbound substreams.
pending_inbound: FuturesUnordered<BoxFuture<'static, crate::Result<()>>>,
/// We need to track when we sent the ping to calculate the duration.
ping_times: HashMap<PeerId, Instant>,

ping_interval: Duration,
}

impl Ping {
Expand All @@ -85,129 +87,100 @@ impl Ping {
Self {
service,
tx: config.tx_event,
peers: HashSet::new(),
pending_outbound: FuturesUnordered::new(),
pending_inbound: FuturesUnordered::new(),
_max_failures: config.max_failures,
ping_interval: config.ping_interval,
read_streams: StreamMap::new(),
write_sinks: HashMap::new(),
ping_times: HashMap::new(),
}
}

/// Connection established to remote peer.
fn on_connection_established(&mut self, peer: PeerId) -> crate::Result<()> {
tracing::trace!(target: LOG_TARGET, ?peer, "connection established");

self.service.open_substream(peer)?;
self.peers.insert(peer);
fn on_connection_established(&mut self, peer: PeerId) {
tracing::debug!(target: LOG_TARGET, ?peer, "connection established, opening ping substream");

Ok(())
if let Err(error) = self.service.open_substream(peer) {
tracing::debug!(target: LOG_TARGET, ?peer, ?error, "failed to open substream");
}
}

/// Connection closed to remote peer.
fn on_connection_closed(&mut self, peer: PeerId) {
tracing::trace!(target: LOG_TARGET, ?peer, "connection closed");

self.peers.remove(&peer);
tracing::debug!(target: LOG_TARGET, ?peer, "connection closed");
self.read_streams.remove(&peer);
self.write_sinks.remove(&peer);
self.ping_times.remove(&peer);
}

/// Handle outbound substream.
fn on_outbound_substream(
&mut self,
peer: PeerId,
substream_id: SubstreamId,
mut substream: Substream,
) {
tracing::trace!(target: LOG_TARGET, ?peer, "handle outbound substream");

self.pending_outbound.push(Box::pin(async move {
let future = async move {
// TODO: https://github.com/paritytech/litep2p/issues/134 generate random payload and verify it
substream.send_framed(vec![0u8; 32].into()).await?;
let now = Instant::now();
let _ = substream.next().await.ok_or(Error::SubstreamError(
SubstreamError::ReadFailure(Some(substream_id)),
))?;
let _ = substream.close().await;

Ok(now.elapsed())
};

match tokio::time::timeout(Duration::from_secs(10), future).await {
Err(_) => Err(Error::Timeout),
Ok(Err(error)) => Err(error),
Ok(Ok(elapsed)) => Ok((peer, elapsed)),
}
}));
}
/// Helper to register a substream (used for both inbound and outbound).
fn register_substream(&mut self, peer: PeerId, substream: Substream) {
let (sink, stream) = substream.split();

/// Substream opened to remote peer.
fn on_inbound_substream(&mut self, peer: PeerId, mut substream: Substream) {
tracing::trace!(target: LOG_TARGET, ?peer, "handle inbound substream");

self.pending_inbound.push(Box::pin(async move {
let future = async move {
let payload = substream
.next()
.await
.ok_or(Error::SubstreamError(SubstreamError::ReadFailure(None)))??;
substream.send_framed(payload.freeze()).await?;
let _ = substream.next().await.map(|_| ());

Ok(())
};

match tokio::time::timeout(Duration::from_secs(10), future).await {
Err(_) => Err(Error::Timeout),
Ok(Err(error)) => Err(error),
Ok(Ok(())) => Ok(()),
}
}));
self.read_streams.insert(peer, stream);
self.write_sinks.insert(peer, sink);
}

/// Start [`Ping`] event loop.
pub async fn run(mut self) {
tracing::debug!(target: LOG_TARGET, "starting ping event loop");
let mut interval = tokio::time::interval(self.ping_interval);

loop {
tokio::select! {
event = self.service.next() => match event {
Some(TransportEvent::ConnectionEstablished { peer, .. }) => {
let _ = self.on_connection_established(peer);
self.on_connection_established(peer);
}
Some(TransportEvent::ConnectionClosed { peer }) => {
self.on_connection_closed(peer);
}
Some(TransportEvent::SubstreamOpened {
peer,
substream,
direction,
..
}) => match direction {
Direction::Inbound => {
self.on_inbound_substream(peer, substream);
}
Direction::Outbound(substream_id) => {
self.on_outbound_substream(peer, substream_id, substream);
}
},
Some(TransportEvent::SubstreamOpened { peer, substream, .. }) => {
tracing::trace!(target: LOG_TARGET, ?peer, "registering ping substream");
self.register_substream(peer, substream);
}
Some(_) => {}
None => return,
},
_event = self.pending_inbound.next(), if !self.pending_inbound.is_empty() => {}
event = self.pending_outbound.next(), if !self.pending_outbound.is_empty() => {

_ = interval.tick() => {
for (peer, sink) in self.write_sinks.iter_mut() {
let payload = vec![0u8; 32];

self.ping_times.insert(*peer, Instant::now());
tracing::trace!(target: LOG_TARGET, ?peer, "sending ping");

if let Err(error) = sink.send(Bytes::from(payload)).await {
tracing::debug!(target: LOG_TARGET, ?peer, ?error, "failed to send ping");

}
}
}

Some((peer, event)) = self.read_streams.next() => {
match event {
Some(Ok((peer, elapsed))) => {
let _ = self
.tx
.send(PingEvent::Ping {
peer,
ping: elapsed,
})
.await;
Ok(payload) => {
if let Some(started) = self.ping_times.remove(&peer) {

let elapsed = started.elapsed();
tracing::trace!(target: LOG_TARGET, ?peer, ?elapsed, "pong received");
let _ = self.tx.send(PingEvent::Ping { peer, ping: elapsed }).await;
} else {
if let Some(sink) = self.write_sinks.get_mut(&peer) {
tracing::trace!(target: LOG_TARGET, ?peer, "sending pong");
if let Err(error) = sink.send(payload.freeze()).await {
tracing::debug!(target: LOG_TARGET, ?peer, ?error, "failed to send pong");
}
}
}
}
Err(error) => {
tracing::debug!(target: LOG_TARGET, ?peer, ?error, "ping substream closed/error");
self.read_streams.remove(&peer);
self.write_sinks.remove(&peer);
self.ping_times.remove(&peer);
}
event => tracing::debug!(target: LOG_TARGET, "failed to handle ping for an outbound peer: {event:?}"),
}
}
}
}
}
}
}
13 changes: 10 additions & 3 deletions src/protocol/transport_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -287,6 +287,8 @@ pub struct TransportService {

/// Close the connection if no substreams are open within this time frame.
keep_alive_tracker: KeepAliveTracker,

counts_towards_keep_alive: bool,
}

impl TransportService {
Expand All @@ -298,6 +300,7 @@ impl TransportService {
next_substream_id: Arc<AtomicUsize>,
transport_handle: TransportManagerHandle,
keep_alive_timeout: Duration,
counts_towards_keep_alive: bool,
) -> (Self, Sender<InnerTransportEvent>) {
let (tx, rx) = channel(DEFAULT_CHANNEL_SIZE);

Expand All @@ -313,6 +316,7 @@ impl TransportService {
next_substream_id,
connections: HashMap::new(),
keep_alive_tracker,
counts_towards_keep_alive
},
tx,
)
Expand Down Expand Up @@ -507,8 +511,11 @@ impl TransportService {
"open substream",
);

self.keep_alive_tracker.substream_activity(peer, connection_id);
connection.try_upgrade();
if self.counts_towards_keep_alive {
self.keep_alive_tracker.substream_activity(peer, connection_id);
connection.try_upgrade();
}


connection
.open_substream(
Expand Down Expand Up @@ -592,7 +599,7 @@ impl Stream for TransportService {
substream,
connection_id,
}) => {
if protocol == self.protocol {
if protocol == self.protocol && self.counts_towards_keep_alive {
self.keep_alive_tracker.substream_activity(peer, connection_id);
if let Some(context) = self.connections.get_mut(&peer) {
context.try_upgrade(&connection_id);
Expand Down
1 change: 1 addition & 0 deletions src/transport/manager/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -348,6 +348,7 @@ impl TransportManager {
self.next_substream_id.clone(),
self.transport_manager_handle.clone(),
keep_alive_timeout,
true
);

self.protocols.insert(
Expand Down