where
diff --git a/core/src/upgrade/error.rs b/core/src/upgrade/error.rs
index 3d349587c2c..c81ed7cf75b 100644
--- a/core/src/upgrade/error.rs
+++ b/core/src/upgrade/error.rs
@@ -18,9 +18,10 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use multistream_select::NegotiationError;
use std::fmt;
+use multistream_select::NegotiationError;
+
/// Error that can happen when upgrading a connection or substream to use a protocol.
#[derive(Debug)]
pub enum UpgradeError {
diff --git a/core/src/upgrade/pending.rs b/core/src/upgrade/pending.rs
index 5e3c65422f1..60a9fb9aba1 100644
--- a/core/src/upgrade/pending.rs
+++ b/core/src/upgrade/pending.rs
@@ -19,10 +19,11 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo};
+use std::{convert::Infallible, iter};
+
use futures::future;
-use std::convert::Infallible;
-use std::iter;
+
+use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo};
/// Implementation of [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`] that always
/// returns a pending upgrade.
diff --git a/core/src/upgrade/ready.rs b/core/src/upgrade/ready.rs
index 13270aa8b6d..22708d726e7 100644
--- a/core/src/upgrade/ready.rs
+++ b/core/src/upgrade/ready.rs
@@ -19,12 +19,14 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo};
+use std::{convert::Infallible, iter};
+
use futures::future;
-use std::convert::Infallible;
-use std::iter;
-/// Implementation of [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`] that directly yields the substream.
+use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo};
+
+/// Implementation of [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`]
+/// that directly yields the substream.
#[derive(Debug, Copy, Clone)]
pub struct ReadyUpgrade {
protocol_name: P,
diff --git a/core/src/upgrade/select.rs b/core/src/upgrade/select.rs
index 037045a2f29..b7fe4a53a7f 100644
--- a/core/src/upgrade/select.rs
+++ b/core/src/upgrade/select.rs
@@ -18,14 +18,18 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::either::EitherFuture;
-use crate::upgrade::{
- InboundConnectionUpgrade, InboundUpgrade, OutboundConnectionUpgrade, OutboundUpgrade,
- UpgradeInfo,
-};
+use std::iter::{Chain, Map};
+
use either::Either;
use futures::future;
-use std::iter::{Chain, Map};
+
+use crate::{
+ either::EitherFuture,
+ upgrade::{
+ InboundConnectionUpgrade, InboundUpgrade, OutboundConnectionUpgrade, OutboundUpgrade,
+ UpgradeInfo,
+ },
+};
/// Upgrade that combines two upgrades into one. Supports all the protocols supported by either
/// sub-upgrade.
diff --git a/core/tests/transport_upgrade.rs b/core/tests/transport_upgrade.rs
index d8bec6f2b59..b9733e38322 100644
--- a/core/tests/transport_upgrade.rs
+++ b/core/tests/transport_upgrade.rs
@@ -18,18 +18,19 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::{io, pin::Pin};
+
use futures::prelude::*;
-use libp2p_core::transport::{DialOpts, ListenerId, MemoryTransport, PortUse, Transport};
-use libp2p_core::upgrade::{
- self, InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo,
+use libp2p_core::{
+ transport::{DialOpts, ListenerId, MemoryTransport, PortUse, Transport},
+ upgrade::{self, InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo},
+ Endpoint,
};
-use libp2p_core::Endpoint;
use libp2p_identity as identity;
use libp2p_mplex::MplexConfig;
use libp2p_noise as noise;
use multiaddr::{Multiaddr, Protocol};
use rand::random;
-use std::{io, pin::Pin};
#[derive(Clone)]
struct HelloUpgrade {}
diff --git a/deny.toml b/deny.toml
index 5be86107edf..47487553028 100644
--- a/deny.toml
+++ b/deny.toml
@@ -43,6 +43,8 @@ allow = [
"MIT",
"MPL-2.0",
"Unlicense",
+ "Unicode-3.0",
+ "Zlib",
]
# The confidence threshold for detecting a license from license text.
# The higher the value, the more closely the license text must be to the
diff --git a/docs/coding-guidelines.md b/docs/coding-guidelines.md
index bacbfe9509e..473d7020fcf 100644
--- a/docs/coding-guidelines.md
+++ b/docs/coding-guidelines.md
@@ -236,7 +236,7 @@ Concurrency adds complexity. Concurrency adds overhead due to synchronization.
Thus unless proven to be a bottleneck, don't make things concurrent. As an example
the hierarchical `NetworkBehaviour` state machine runs sequentially. It is easy
to debug as it runs sequentially. Thus far there has been no proof that
-shows a speed up when running it concurrently.
+shows a speed-up when running it concurrently.
## Use `async/await` for sequential execution only
diff --git a/examples/autonat/Cargo.toml b/examples/autonat/Cargo.toml
index 010b76623e0..7c06b48a105 100644
--- a/examples/autonat/Cargo.toml
+++ b/examples/autonat/Cargo.toml
@@ -13,7 +13,6 @@ tokio = { workspace = true, features = ["full"] }
clap = { version = "4.5.6", features = ["derive"] }
futures = { workspace = true }
libp2p = { path = "../../libp2p", features = ["tokio", "tcp", "noise", "yamux", "autonat", "identify", "macros"] }
-tracing = { workspace = true }
tracing-subscriber = { workspace = true, features = ["env-filter"] }
[lints]
diff --git a/examples/autonat/src/bin/autonat_client.rs b/examples/autonat/src/bin/autonat_client.rs
index def66c4823b..768a2052c80 100644
--- a/examples/autonat/src/bin/autonat_client.rs
+++ b/examples/autonat/src/bin/autonat_client.rs
@@ -20,15 +20,17 @@
#![doc = include_str!("../../README.md")]
+use std::{error::Error, net::Ipv4Addr, time::Duration};
+
use clap::Parser;
use futures::StreamExt;
-use libp2p::core::multiaddr::Protocol;
-use libp2p::core::Multiaddr;
-use libp2p::swarm::{NetworkBehaviour, SwarmEvent};
-use libp2p::{autonat, identify, identity, noise, tcp, yamux, PeerId};
-use std::error::Error;
-use std::net::Ipv4Addr;
-use std::time::Duration;
+use libp2p::{
+ autonat,
+ core::{multiaddr::Protocol, Multiaddr},
+ identify, identity, noise,
+ swarm::{NetworkBehaviour, SwarmEvent},
+ tcp, yamux, PeerId,
+};
use tracing_subscriber::EnvFilter;
#[derive(Debug, Parser)]
@@ -60,7 +62,6 @@ async fn main() -> Result<(), Box> {
yamux::Config::default,
)?
.with_behaviour(|key| Behaviour::new(key.public()))?
- .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60)))
.build();
swarm.listen_on(
diff --git a/examples/autonat/src/bin/autonat_server.rs b/examples/autonat/src/bin/autonat_server.rs
index 389cc0fa26f..f3bb6b6a439 100644
--- a/examples/autonat/src/bin/autonat_server.rs
+++ b/examples/autonat/src/bin/autonat_server.rs
@@ -20,14 +20,17 @@
#![doc = include_str!("../../README.md")]
+use std::{error::Error, net::Ipv4Addr};
+
use clap::Parser;
use futures::StreamExt;
-use libp2p::core::{multiaddr::Protocol, Multiaddr};
-use libp2p::swarm::{NetworkBehaviour, SwarmEvent};
-use libp2p::{autonat, identify, identity, noise, tcp, yamux};
-use std::error::Error;
-use std::net::Ipv4Addr;
-use std::time::Duration;
+use libp2p::{
+ autonat,
+ core::{multiaddr::Protocol, Multiaddr},
+ identify, identity, noise,
+ swarm::{NetworkBehaviour, SwarmEvent},
+ tcp, yamux,
+};
use tracing_subscriber::EnvFilter;
#[derive(Debug, Parser)]
@@ -53,7 +56,6 @@ async fn main() -> Result<(), Box> {
yamux::Config::default,
)?
.with_behaviour(|key| Behaviour::new(key.public()))?
- .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60)))
.build();
swarm.listen_on(
diff --git a/examples/autonatv2/Cargo.toml b/examples/autonatv2/Cargo.toml
index 6c862ee22e4..d400c53e7fd 100644
--- a/examples/autonatv2/Cargo.toml
+++ b/examples/autonatv2/Cargo.toml
@@ -19,17 +19,15 @@ libp2p = { workspace = true, features = ["macros", "tokio", "tcp", "noise", "yam
clap = { version = "4.4.18", features = ["derive"] }
tokio = { version = "1.35.1", features = ["macros", "rt-multi-thread"] }
tracing = "0.1.40"
-tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
+tracing-subscriber = { workspace = true, features = ["env-filter"] }
rand = "0.8.5"
-opentelemetry = { version = "0.21.0", optional = true }
opentelemetry_sdk = { version = "0.21.1", optional = true, features = ["rt-tokio"] }
tracing-opentelemetry = { version = "0.22.0", optional = true }
opentelemetry-jaeger = { version = "0.20.0", optional = true, features = ["rt-tokio"] }
cfg-if = "1.0.0"
[features]
-jaeger = ["opentelemetry", "opentelemetry_sdk", "tracing-opentelemetry", "opentelemetry-jaeger"]
-opentelemetry = ["dep:opentelemetry"]
+jaeger = ["opentelemetry_sdk", "tracing-opentelemetry", "opentelemetry-jaeger"]
opentelemetry_sdk = ["dep:opentelemetry_sdk"]
tracing-opentelemetry = ["dep:tracing-opentelemetry"]
opentelemetry-jaeger = ["dep:opentelemetry-jaeger"]
diff --git a/examples/autonatv2/Dockerfile b/examples/autonatv2/Dockerfile
index 6bc92e4d11b..083f9f5c113 100644
--- a/examples/autonatv2/Dockerfile
+++ b/examples/autonatv2/Dockerfile
@@ -1,4 +1,4 @@
-FROM rust:1.81-alpine as builder
+FROM rust:1.83-alpine as builder
RUN apk add musl-dev
diff --git a/examples/browser-webrtc/src/lib.rs b/examples/browser-webrtc/src/lib.rs
index 9499ccbd158..e2d884cb445 100644
--- a/examples/browser-webrtc/src/lib.rs
+++ b/examples/browser-webrtc/src/lib.rs
@@ -1,13 +1,11 @@
#![cfg(target_arch = "wasm32")]
+use std::{io, time::Duration};
+
use futures::StreamExt;
use js_sys::Date;
-use libp2p::core::Multiaddr;
-use libp2p::ping;
-use libp2p::swarm::SwarmEvent;
+use libp2p::{core::Multiaddr, ping, swarm::SwarmEvent};
use libp2p_webrtc_websys as webrtc_websys;
-use std::io;
-use std::time::Duration;
use wasm_bindgen::prelude::*;
use web_sys::{Document, HtmlElement};
diff --git a/examples/browser-webrtc/src/main.rs b/examples/browser-webrtc/src/main.rs
index 7f06b0d0d99..52222dc882b 100644
--- a/examples/browser-webrtc/src/main.rs
+++ b/examples/browser-webrtc/src/main.rs
@@ -1,23 +1,24 @@
#![allow(non_upper_case_globals)]
+use std::net::{Ipv4Addr, SocketAddr};
+
use anyhow::Result;
-use axum::extract::{Path, State};
-use axum::http::header::CONTENT_TYPE;
-use axum::http::StatusCode;
-use axum::response::{Html, IntoResponse};
-use axum::{http::Method, routing::get, Router};
+use axum::{
+ extract::{Path, State},
+ http::{header::CONTENT_TYPE, Method, StatusCode},
+ response::{Html, IntoResponse},
+ routing::get,
+ Router,
+};
use futures::StreamExt;
use libp2p::{
- core::muxing::StreamMuxerBox,
- core::Transport,
+ core::{muxing::StreamMuxerBox, Transport},
multiaddr::{Multiaddr, Protocol},
ping,
swarm::SwarmEvent,
};
use libp2p_webrtc as webrtc;
use rand::thread_rng;
-use std::net::{Ipv4Addr, SocketAddr};
-use std::time::Duration;
use tokio::net::TcpListener;
use tower_http::cors::{Any, CorsLayer};
@@ -37,11 +38,6 @@ async fn main() -> anyhow::Result<()> {
.map(|(peer_id, conn), _| (peer_id, StreamMuxerBox::new(conn))))
})?
.with_behaviour(|_| ping::Behaviour::default())?
- .with_swarm_config(|cfg| {
- cfg.with_idle_connection_timeout(
- Duration::from_secs(u64::MAX), // Allows us to observe the pings.
- )
- })
.build();
let address_webrtc = Multiaddr::from(Ipv4Addr::UNSPECIFIED)
@@ -127,7 +123,8 @@ struct Libp2pEndpoint(Multiaddr);
/// Serves the index.html file for our client.
///
/// Our server listens on a random UDP port for the WebRTC transport.
-/// To allow the client to connect, we replace the `__LIBP2P_ENDPOINT__` placeholder with the actual address.
+/// To allow the client to connect, we replace the `__LIBP2P_ENDPOINT__`
+/// placeholder with the actual address.
async fn get_index(
State(Libp2pEndpoint(libp2p_endpoint)): State,
) -> Result, StatusCode> {
diff --git a/examples/chat/Cargo.toml b/examples/chat/Cargo.toml
index a1d32956825..031f84b6f95 100644
--- a/examples/chat/Cargo.toml
+++ b/examples/chat/Cargo.toml
@@ -10,10 +10,8 @@ release = false
[dependencies]
tokio = { workspace = true, features = ["full"] }
-async-trait = "0.1"
futures = { workspace = true }
libp2p = { path = "../../libp2p", features = [ "tokio", "gossipsub", "mdns", "noise", "macros", "tcp", "yamux", "quic"] }
-tracing = { workspace = true }
tracing-subscriber = { workspace = true, features = ["env-filter"] }
[lints]
diff --git a/examples/chat/src/main.rs b/examples/chat/src/main.rs
index c785d301c2f..b0dcc767b6f 100644
--- a/examples/chat/src/main.rs
+++ b/examples/chat/src/main.rs
@@ -20,12 +20,19 @@
#![doc = include_str!("../README.md")]
+use std::{
+ collections::hash_map::DefaultHasher,
+ error::Error,
+ hash::{Hash, Hasher},
+ time::Duration,
+};
+
use futures::stream::StreamExt;
-use libp2p::{gossipsub, mdns, noise, swarm::NetworkBehaviour, swarm::SwarmEvent, tcp, yamux};
-use std::collections::hash_map::DefaultHasher;
-use std::error::Error;
-use std::hash::{Hash, Hasher};
-use std::time::Duration;
+use libp2p::{
+ gossipsub, mdns, noise,
+ swarm::{NetworkBehaviour, SwarmEvent},
+ tcp, yamux,
+};
use tokio::{io, io::AsyncBufReadExt, select};
use tracing_subscriber::EnvFilter;
@@ -61,7 +68,8 @@ async fn main() -> Result<(), Box> {
// Set a custom gossipsub configuration
let gossipsub_config = gossipsub::ConfigBuilder::default()
.heartbeat_interval(Duration::from_secs(10)) // This is set to aid debugging by not cluttering the log space
- .validation_mode(gossipsub::ValidationMode::Strict) // This sets the kind of message validation. The default is Strict (enforce message signing)
+ .validation_mode(gossipsub::ValidationMode::Strict) // This sets the kind of message validation. The default is Strict (enforce message
+ // signing)
.message_id_fn(message_id_fn) // content-address messages. No two messages of the same content will be propagated.
.build()
.map_err(|msg| io::Error::new(io::ErrorKind::Other, msg))?; // Temporary hack because `build` does not return a proper `std::error::Error`.
@@ -76,7 +84,6 @@ async fn main() -> Result<(), Box> {
mdns::tokio::Behaviour::new(mdns::Config::default(), key.public().to_peer_id())?;
Ok(MyBehaviour { gossipsub, mdns })
})?
- .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60)))
.build();
// Create a Gossipsub topic
diff --git a/examples/dcutr/Cargo.toml b/examples/dcutr/Cargo.toml
index c1b4bbc6e7e..67edf04e2b0 100644
--- a/examples/dcutr/Cargo.toml
+++ b/examples/dcutr/Cargo.toml
@@ -13,7 +13,6 @@ clap = { version = "4.5.6", features = ["derive"] }
futures = { workspace = true }
futures-timer = "3.0"
libp2p = { path = "../../libp2p", features = [ "dns", "dcutr", "identify", "macros", "noise", "ping", "quic", "relay", "rendezvous", "tcp", "tokio", "yamux"] }
-log = "0.4"
tokio = { workspace = true, features = ["macros", "net", "rt", "signal"] }
tracing = { workspace = true }
tracing-subscriber = { workspace = true, features = ["env-filter"] }
diff --git a/examples/dcutr/src/main.rs b/examples/dcutr/src/main.rs
index 630d4b2b1f3..3f403d534e7 100644
--- a/examples/dcutr/src/main.rs
+++ b/examples/dcutr/src/main.rs
@@ -20,6 +20,8 @@
#![doc = include_str!("../README.md")]
+use std::{error::Error, str::FromStr};
+
use clap::Parser;
use futures::{executor::block_on, future::FutureExt, stream::StreamExt};
use libp2p::{
@@ -28,8 +30,6 @@ use libp2p::{
swarm::{NetworkBehaviour, SwarmEvent},
tcp, yamux, PeerId,
};
-use std::str::FromStr;
-use std::{error::Error, time::Duration};
use tracing_subscriber::EnvFilter;
#[derive(Debug, Parser)]
@@ -105,7 +105,6 @@ async fn main() -> Result<(), Box> {
)),
dcutr: dcutr::Behaviour::new(keypair.public().to_peer_id()),
})?
- .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60)))
.build();
swarm
diff --git a/examples/distributed-key-value-store/Cargo.toml b/examples/distributed-key-value-store/Cargo.toml
index 3846e54c8d3..8e30dd2c75d 100644
--- a/examples/distributed-key-value-store/Cargo.toml
+++ b/examples/distributed-key-value-store/Cargo.toml
@@ -10,10 +10,8 @@ release = false
[dependencies]
tokio = { workspace = true, features = ["full"] }
-async-trait = "0.1"
futures = { workspace = true }
libp2p = { path = "../../libp2p", features = [ "tokio", "dns", "kad", "mdns", "noise", "macros", "tcp", "yamux"] }
-tracing = { workspace = true }
tracing-subscriber = { workspace = true, features = ["env-filter"] }
[lints]
diff --git a/examples/distributed-key-value-store/src/main.rs b/examples/distributed-key-value-store/src/main.rs
index 6b7947b7eb3..3522c84c720 100644
--- a/examples/distributed-key-value-store/src/main.rs
+++ b/examples/distributed-key-value-store/src/main.rs
@@ -20,17 +20,16 @@
#![doc = include_str!("../README.md")]
+use std::error::Error;
+
use futures::stream::StreamExt;
-use libp2p::kad;
-use libp2p::kad::store::MemoryStore;
-use libp2p::kad::Mode;
use libp2p::{
+ kad,
+ kad::{store::MemoryStore, Mode},
mdns, noise,
swarm::{NetworkBehaviour, SwarmEvent},
tcp, yamux,
};
-use std::error::Error;
-use std::time::Duration;
use tokio::{
io::{self, AsyncBufReadExt},
select,
@@ -69,7 +68,6 @@ async fn main() -> Result<(), Box> {
)?,
})
})?
- .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60)))
.build();
swarm.behaviour_mut().kademlia.set_mode(Some(Mode::Server));
diff --git a/examples/file-sharing/Cargo.toml b/examples/file-sharing/Cargo.toml
index d098ce44317..021215c003b 100644
--- a/examples/file-sharing/Cargo.toml
+++ b/examples/file-sharing/Cargo.toml
@@ -14,7 +14,6 @@ tokio = { workspace = true, features = ["full"] }
clap = { version = "4.5.6", features = ["derive"] }
futures = { workspace = true }
libp2p = { path = "../../libp2p", features = [ "tokio", "cbor", "dns", "kad", "noise", "macros", "request-response", "tcp", "websocket", "yamux"] }
-tracing = { workspace = true }
tracing-subscriber = { workspace = true, features = ["env-filter"] }
[lints]
diff --git a/examples/file-sharing/src/main.rs b/examples/file-sharing/src/main.rs
index 5f6be83dc11..1e3b80a330c 100644
--- a/examples/file-sharing/src/main.rs
+++ b/examples/file-sharing/src/main.rs
@@ -22,15 +22,12 @@
mod network;
-use clap::Parser;
-use tokio::task::spawn;
+use std::{error::Error, io::Write, path::PathBuf};
-use futures::prelude::*;
-use futures::StreamExt;
+use clap::Parser;
+use futures::{prelude::*, StreamExt};
use libp2p::{core::Multiaddr, multiaddr::Protocol};
-use std::error::Error;
-use std::io::Write;
-use std::path::PathBuf;
+use tokio::task::spawn;
use tracing_subscriber::EnvFilter;
#[tokio::main]
diff --git a/examples/file-sharing/src/network.rs b/examples/file-sharing/src/network.rs
index a74afd1c0da..409255ee9ec 100644
--- a/examples/file-sharing/src/network.rs
+++ b/examples/file-sharing/src/network.rs
@@ -1,7 +1,14 @@
-use futures::channel::{mpsc, oneshot};
-use futures::prelude::*;
-use futures::StreamExt;
+use std::{
+ collections::{hash_map, HashMap, HashSet},
+ error::Error,
+ time::Duration,
+};
+use futures::{
+ channel::{mpsc, oneshot},
+ prelude::*,
+ StreamExt,
+};
use libp2p::{
core::Multiaddr,
identity, kad,
@@ -9,19 +16,13 @@ use libp2p::{
noise,
request_response::{self, OutboundRequestId, ProtocolSupport, ResponseChannel},
swarm::{NetworkBehaviour, Swarm, SwarmEvent},
- tcp, yamux, PeerId,
+ tcp, yamux, PeerId, StreamProtocol,
};
-
-use libp2p::StreamProtocol;
use serde::{Deserialize, Serialize};
-use std::collections::{hash_map, HashMap, HashSet};
-use std::error::Error;
-use std::time::Duration;
/// Creates the network components, namely:
///
-/// - The network client to interact with the network layer from anywhere
-/// within your application.
+/// - The network client to interact with the network layer from anywhere within your application.
///
/// - The network event stream, e.g. for incoming requests.
///
diff --git a/examples/identify/Cargo.toml b/examples/identify/Cargo.toml
index 8d12699afa7..c18f71a0386 100644
--- a/examples/identify/Cargo.toml
+++ b/examples/identify/Cargo.toml
@@ -12,7 +12,6 @@ release = false
tokio = { version = "1.37.0", features = ["full"] }
futures = { workspace = true }
libp2p = { path = "../../libp2p", features = ["identify", "noise", "tcp", "tokio", "yamux"] }
-tracing = { workspace = true }
tracing-subscriber = { workspace = true, features = ["env-filter"] }
[lints]
diff --git a/examples/identify/src/main.rs b/examples/identify/src/main.rs
index 22474061da6..3f08ac01e23 100644
--- a/examples/identify/src/main.rs
+++ b/examples/identify/src/main.rs
@@ -20,9 +20,10 @@
#![doc = include_str!("../README.md")]
+use std::error::Error;
+
use futures::StreamExt;
use libp2p::{core::multiaddr::Multiaddr, identify, noise, swarm::SwarmEvent, tcp, yamux};
-use std::{error::Error, time::Duration};
use tracing_subscriber::EnvFilter;
#[tokio::main]
@@ -44,7 +45,6 @@ async fn main() -> Result<(), Box> {
key.public(),
))
})?
- .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60)))
.build();
// Tell the swarm to listen on all interfaces and a random, OS-assigned
diff --git a/examples/ipfs-kad/Cargo.toml b/examples/ipfs-kad/Cargo.toml
index 115c604269f..fa04da4edcf 100644
--- a/examples/ipfs-kad/Cargo.toml
+++ b/examples/ipfs-kad/Cargo.toml
@@ -10,13 +10,10 @@ release = false
[dependencies]
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }
-async-trait = "0.1"
clap = { version = "4.5.6", features = ["derive"] }
-env_logger = "0.10"
futures = { workspace = true }
anyhow = "1.0.86"
libp2p = { path = "../../libp2p", features = [ "tokio", "dns", "kad", "noise", "tcp", "yamux", "rsa"] }
-tracing = { workspace = true }
tracing-subscriber = { workspace = true, features = ["env-filter"] }
[lints]
diff --git a/examples/ipfs-kad/src/main.rs b/examples/ipfs-kad/src/main.rs
index 95921d6fa35..8d9a289bdd1 100644
--- a/examples/ipfs-kad/src/main.rs
+++ b/examples/ipfs-kad/src/main.rs
@@ -20,15 +20,21 @@
#![doc = include_str!("../README.md")]
-use std::num::NonZeroUsize;
-use std::ops::Add;
-use std::time::{Duration, Instant};
+use std::{
+ num::NonZeroUsize,
+ ops::Add,
+ time::{Duration, Instant},
+};
use anyhow::{bail, Result};
use clap::Parser;
use futures::StreamExt;
-use libp2p::swarm::{StreamProtocol, SwarmEvent};
-use libp2p::{bytes::BufMut, identity, kad, noise, tcp, yamux, PeerId};
+use libp2p::{
+ bytes::BufMut,
+ identity, kad, noise,
+ swarm::{StreamProtocol, SwarmEvent},
+ tcp, yamux, PeerId,
+};
use tracing_subscriber::EnvFilter;
const BOOTNODES: [&str; 4] = [
@@ -64,7 +70,6 @@ async fn main() -> Result<()> {
let store = kad::store::MemoryStore::new(key.public().to_peer_id());
kad::Behaviour::with_config(key.public().to_peer_id(), store, cfg)
})?
- .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5)))
.build();
// Add the bootnodes to the local routing table. `libp2p-dns` built
diff --git a/examples/ipfs-private/Cargo.toml b/examples/ipfs-private/Cargo.toml
index 0813dba56e0..4dfe596d609 100644
--- a/examples/ipfs-private/Cargo.toml
+++ b/examples/ipfs-private/Cargo.toml
@@ -10,11 +10,9 @@ release = false
[dependencies]
tokio = { workspace = true, features = ["rt-multi-thread", "macros", "io-std"] }
-async-trait = "0.1"
either = "1.12"
futures = { workspace = true }
libp2p = { path = "../../libp2p", features = [ "tokio", "gossipsub", "dns", "identify", "kad", "macros", "noise", "ping", "pnet", "tcp", "websocket", "yamux"] }
-tracing = { workspace = true }
tracing-subscriber = { workspace = true, features = ["env-filter"] }
[lints]
diff --git a/examples/ipfs-private/src/main.rs b/examples/ipfs-private/src/main.rs
index a57bfd465e0..6d8f9beb75d 100644
--- a/examples/ipfs-private/src/main.rs
+++ b/examples/ipfs-private/src/main.rs
@@ -20,6 +20,8 @@
#![doc = include_str!("../README.md")]
+use std::{env, error::Error, fs, path::Path, str::FromStr};
+
use either::Either;
use futures::prelude::*;
use libp2p::{
@@ -31,7 +33,6 @@ use libp2p::{
swarm::{NetworkBehaviour, SwarmEvent},
tcp, yamux, Multiaddr, Transport,
};
-use std::{env, error::Error, fs, path::Path, str::FromStr, time::Duration};
use tokio::{io, io::AsyncBufReadExt, select};
use tracing_subscriber::EnvFilter;
@@ -151,7 +152,6 @@ async fn main() -> Result<(), Box> {
ping: ping::Behaviour::new(ping::Config::new()),
})
})?
- .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60)))
.build();
println!("Subscribing to {gossipsub_topic:?}");
diff --git a/examples/metrics/Cargo.toml b/examples/metrics/Cargo.toml
index 129b1abb1f3..ad2941e3761 100644
--- a/examples/metrics/Cargo.toml
+++ b/examples/metrics/Cargo.toml
@@ -12,13 +12,13 @@ release = false
futures = { workspace = true }
axum = "0.7"
libp2p = { path = "../../libp2p", features = ["tokio", "metrics", "ping", "noise", "identify", "tcp", "yamux", "macros"] }
-opentelemetry = { version = "0.25.0", features = ["metrics"] }
-opentelemetry-otlp = { version = "0.25.0", features = ["metrics"] }
-opentelemetry_sdk = { version = "0.25.0", features = ["rt-tokio", "metrics"] }
+opentelemetry = { version = "0.27.0", features = ["metrics"] }
+opentelemetry-otlp = { version = "0.27.0", features = ["metrics"] }
+opentelemetry_sdk = { version = "0.27.0", features = ["rt-tokio", "metrics"] }
prometheus-client = { workspace = true }
tokio = { workspace = true, features = ["full"] }
tracing = { workspace = true }
-tracing-opentelemetry = "0.26.0"
+tracing-opentelemetry = "0.28.0"
tracing-subscriber = { workspace = true, features = ["env-filter"] }
[lints]
diff --git a/examples/metrics/src/http_service.rs b/examples/metrics/src/http_service.rs
index 4a9c9785bb3..f1485832d86 100644
--- a/examples/metrics/src/http_service.rs
+++ b/examples/metrics/src/http_service.rs
@@ -18,15 +18,13 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use axum::extract::State;
-use axum::http::StatusCode;
-use axum::response::IntoResponse;
-use axum::routing::get;
-use axum::Router;
-use prometheus_client::encoding::text::encode;
-use prometheus_client::registry::Registry;
-use std::net::SocketAddr;
-use std::sync::{Arc, Mutex};
+use std::{
+ net::SocketAddr,
+ sync::{Arc, Mutex},
+};
+
+use axum::{extract::State, http::StatusCode, response::IntoResponse, routing::get, Router};
+use prometheus_client::{encoding::text::encode, registry::Registry};
use tokio::net::TcpListener;
const METRICS_CONTENT_TYPE: &str = "application/openmetrics-text;charset=utf-8;version=1.0.0";
diff --git a/examples/metrics/src/main.rs b/examples/metrics/src/main.rs
index 1755c769053..6f6e9d08e31 100644
--- a/examples/metrics/src/main.rs
+++ b/examples/metrics/src/main.rs
@@ -20,18 +20,22 @@
#![doc = include_str!("../README.md")]
+use std::error::Error;
+
use futures::StreamExt;
-use libp2p::core::Multiaddr;
-use libp2p::metrics::{Metrics, Recorder};
-use libp2p::swarm::{NetworkBehaviour, SwarmEvent};
-use libp2p::{identify, identity, noise, ping, tcp, yamux};
-use opentelemetry::{trace::TracerProvider, KeyValue};
+use libp2p::{
+ core::Multiaddr,
+ identify, identity,
+ metrics::{Metrics, Recorder},
+ noise, ping,
+ swarm::{NetworkBehaviour, SwarmEvent},
+ tcp, yamux,
+};
+use opentelemetry::{trace::TracerProvider as _, KeyValue};
+use opentelemetry_otlp::SpanExporter;
+use opentelemetry_sdk::{runtime, trace::TracerProvider};
use prometheus_client::registry::Registry;
-use std::error::Error;
-use std::time::Duration;
-use tracing_subscriber::layer::SubscriberExt;
-use tracing_subscriber::util::SubscriberInitExt;
-use tracing_subscriber::{EnvFilter, Layer};
+use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter, Layer};
mod http_service;
@@ -50,7 +54,6 @@ async fn main() -> Result<(), Box> {
)?
.with_bandwidth_metrics(&mut metric_registry)
.with_behaviour(|key| Behaviour::new(key.public()))?
- .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX)))
.build();
swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?;
@@ -90,14 +93,16 @@ async fn main() -> Result<(), Box> {
}
fn setup_tracing() -> Result<(), Box> {
- let provider = opentelemetry_otlp::new_pipeline()
- .tracing()
- .with_exporter(opentelemetry_otlp::new_exporter().tonic())
- .with_trace_config(opentelemetry_sdk::trace::Config::default().with_resource(
- opentelemetry_sdk::Resource::new(vec![KeyValue::new("service.name", "libp2p")]),
- ))
- .install_batch(opentelemetry_sdk::runtime::Tokio)?;
-
+ let provider = TracerProvider::builder()
+ .with_batch_exporter(
+ SpanExporter::builder().with_tonic().build()?,
+ runtime::Tokio,
+ )
+ .with_resource(opentelemetry_sdk::Resource::new(vec![KeyValue::new(
+ "service.name",
+ "libp2p",
+ )]))
+ .build();
tracing_subscriber::registry()
.with(tracing_subscriber::fmt::layer().with_filter(EnvFilter::from_default_env()))
.with(
diff --git a/examples/ping/Cargo.toml b/examples/ping/Cargo.toml
index 633f043de56..acc3b2affed 100644
--- a/examples/ping/Cargo.toml
+++ b/examples/ping/Cargo.toml
@@ -12,7 +12,6 @@ release = false
futures = { workspace = true }
libp2p = { path = "../../libp2p", features = ["noise", "ping", "tcp", "tokio", "yamux"] }
tokio = { workspace = true, features = ["full"] }
-tracing = { workspace = true }
tracing-subscriber = { workspace = true, features = ["env-filter"] }
[lints]
diff --git a/examples/ping/src/main.rs b/examples/ping/src/main.rs
index 911b0384f89..565ef057c0d 100644
--- a/examples/ping/src/main.rs
+++ b/examples/ping/src/main.rs
@@ -20,9 +20,10 @@
#![doc = include_str!("../README.md")]
+use std::{error::Error, time::Duration};
+
use futures::prelude::*;
use libp2p::{noise, ping, swarm::SwarmEvent, tcp, yamux, Multiaddr};
-use std::{error::Error, time::Duration};
use tracing_subscriber::EnvFilter;
#[tokio::main]
diff --git a/examples/relay-server/Cargo.toml b/examples/relay-server/Cargo.toml
index 7385cf6c033..3bdaf89b04f 100644
--- a/examples/relay-server/Cargo.toml
+++ b/examples/relay-server/Cargo.toml
@@ -13,7 +13,6 @@ clap = { version = "4.5.6", features = ["derive"] }
tokio = { version = "1.37.0", features = ["full"] }
futures = { workspace = true }
libp2p = { path = "../../libp2p", features = ["tokio", "noise", "macros", "ping", "tcp", "identify", "yamux", "relay", "quic"] }
-tracing = { workspace = true }
tracing-subscriber = { workspace = true, features = ["env-filter"] }
[lints]
diff --git a/examples/relay-server/src/main.rs b/examples/relay-server/src/main.rs
index 46a122d0717..b7868418fb0 100644
--- a/examples/relay-server/src/main.rs
+++ b/examples/relay-server/src/main.rs
@@ -21,17 +21,19 @@
#![doc = include_str!("../README.md")]
+use std::{
+ error::Error,
+ net::{Ipv4Addr, Ipv6Addr},
+};
+
use clap::Parser;
use futures::StreamExt;
use libp2p::{
- core::multiaddr::Protocol,
- core::Multiaddr,
+ core::{multiaddr::Protocol, Multiaddr},
identify, identity, noise, ping, relay,
swarm::{NetworkBehaviour, SwarmEvent},
tcp, yamux,
};
-use std::error::Error;
-use std::net::{Ipv4Addr, Ipv6Addr};
use tracing_subscriber::EnvFilter;
#[tokio::main]
diff --git a/examples/rendezvous/src/bin/rzv-discover.rs b/examples/rendezvous/src/bin/rzv-discover.rs
index edd3d10a0ce..bdf9aeafdab 100644
--- a/examples/rendezvous/src/bin/rzv-discover.rs
+++ b/examples/rendezvous/src/bin/rzv-discover.rs
@@ -18,6 +18,8 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::{error::Error, time::Duration};
+
use futures::StreamExt;
use libp2p::{
multiaddr::Protocol,
@@ -25,8 +27,6 @@ use libp2p::{
swarm::{NetworkBehaviour, SwarmEvent},
tcp, yamux, Multiaddr,
};
-use std::error::Error;
-use std::time::Duration;
use tracing_subscriber::EnvFilter;
const NAMESPACE: &str = "rendezvous";
@@ -53,7 +53,6 @@ async fn main() -> Result<(), Box> {
rendezvous: rendezvous::client::Behaviour::new(key.clone()),
ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))),
})?
- .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(5)))
.build();
swarm.dial(rendezvous_point_address.clone()).unwrap();
diff --git a/examples/rendezvous/src/bin/rzv-identify.rs b/examples/rendezvous/src/bin/rzv-identify.rs
index ff637aa6f49..00e94627292 100644
--- a/examples/rendezvous/src/bin/rzv-identify.rs
+++ b/examples/rendezvous/src/bin/rzv-identify.rs
@@ -18,13 +18,14 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::time::Duration;
+
use futures::StreamExt;
use libp2p::{
identify, noise, ping, rendezvous,
swarm::{NetworkBehaviour, SwarmEvent},
tcp, yamux, Multiaddr,
};
-use std::time::Duration;
use tracing_subscriber::EnvFilter;
#[tokio::main]
@@ -55,7 +56,6 @@ async fn main() {
ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))),
})
.unwrap()
- .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(5)))
.build();
let _ = swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap());
diff --git a/examples/rendezvous/src/bin/rzv-register.rs b/examples/rendezvous/src/bin/rzv-register.rs
index bd848238d4a..f70eda5d55e 100644
--- a/examples/rendezvous/src/bin/rzv-register.rs
+++ b/examples/rendezvous/src/bin/rzv-register.rs
@@ -18,13 +18,14 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::time::Duration;
+
use futures::StreamExt;
use libp2p::{
noise, ping, rendezvous,
swarm::{NetworkBehaviour, SwarmEvent},
tcp, yamux, Multiaddr,
};
-use std::time::Duration;
use tracing_subscriber::EnvFilter;
#[tokio::main]
@@ -51,11 +52,10 @@ async fn main() {
ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))),
})
.unwrap()
- .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(5)))
.build();
- // In production the external address should be the publicly facing IP address of the rendezvous point.
- // This address is recorded in the registration entry by the rendezvous point.
+ // In production the external address should be the publicly facing IP address of the rendezvous
+ // point. This address is recorded in the registration entry by the rendezvous point.
let external_address = "/ip4/127.0.0.1/tcp/0".parse::().unwrap();
swarm.add_external_address(external_address);
diff --git a/examples/rendezvous/src/main.rs b/examples/rendezvous/src/main.rs
index a15bc1ca2d3..a345d0faed9 100644
--- a/examples/rendezvous/src/main.rs
+++ b/examples/rendezvous/src/main.rs
@@ -20,14 +20,14 @@
#![doc = include_str!("../README.md")]
+use std::{error::Error, time::Duration};
+
use futures::StreamExt;
use libp2p::{
identify, noise, ping, rendezvous,
swarm::{NetworkBehaviour, SwarmEvent},
tcp, yamux,
};
-use std::error::Error;
-use std::time::Duration;
use tracing_subscriber::EnvFilter;
#[tokio::main]
@@ -55,7 +55,6 @@ async fn main() -> Result<(), Box> {
rendezvous: rendezvous::server::Behaviour::new(rendezvous::server::Config::default()),
ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))),
})?
- .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(5)))
.build();
let _ = swarm.listen_on("/ip4/0.0.0.0/tcp/62649".parse().unwrap());
diff --git a/examples/stream/src/main.rs b/examples/stream/src/main.rs
index 872ab8c3b98..71d2d2fcc76 100644
--- a/examples/stream/src/main.rs
+++ b/examples/stream/src/main.rs
@@ -44,12 +44,14 @@ async fn main() -> Result<()> {
// Deal with incoming streams.
// Spawning a dedicated task is just one way of doing this.
// libp2p doesn't care how you handle incoming streams but you _must_ handle them somehow.
- // To mitigate DoS attacks, libp2p will internally drop incoming streams if your application cannot keep up processing them.
+ // To mitigate DoS attacks, libp2p will internally drop incoming streams if your application
+ // cannot keep up processing them.
tokio::spawn(async move {
// This loop handles incoming streams _sequentially_ but that doesn't have to be the case.
// You can also spawn a dedicated task per stream if you want to.
- // Be aware that this breaks backpressure though as spawning new tasks is equivalent to an unbounded buffer.
- // Each task needs memory meaning an aggressive remote peer may force you OOM this way.
+ // Be aware that this breaks backpressure though as spawning new tasks is equivalent to an
+ // unbounded buffer. Each task needs memory meaning an aggressive remote peer may
+ // force you OOM this way.
while let Some((peer, stream)) = incoming_streams.next().await {
match echo(stream).await {
@@ -102,7 +104,8 @@ async fn connection_handler(peer: PeerId, mut control: stream::Control) {
}
Err(error) => {
// Other errors may be temporary.
- // In production, something like an exponential backoff / circuit-breaker may be more appropriate.
+ // In production, something like an exponential backoff / circuit-breaker may be
+ // more appropriate.
tracing::debug!(%peer, %error);
continue;
}
diff --git a/examples/upnp/src/main.rs b/examples/upnp/src/main.rs
index fd0764990d1..19de8d773ae 100644
--- a/examples/upnp/src/main.rs
+++ b/examples/upnp/src/main.rs
@@ -20,9 +20,10 @@
#![doc = include_str!("../README.md")]
+use std::error::Error;
+
use futures::prelude::*;
use libp2p::{noise, swarm::SwarmEvent, upnp, yamux, Multiaddr};
-use std::error::Error;
use tracing_subscriber::EnvFilter;
#[tokio::main]
diff --git a/hole-punching-tests/Cargo.toml b/hole-punching-tests/Cargo.toml
index 79728f9535c..c4f36d2a990 100644
--- a/hole-punching-tests/Cargo.toml
+++ b/hole-punching-tests/Cargo.toml
@@ -7,7 +7,7 @@ license = "MIT"
[dependencies]
anyhow = "1"
-env_logger = "0.10.2"
+env_logger = { workspace = true }
futures = { workspace = true }
libp2p = { path = "../libp2p", features = ["tokio", "dcutr", "identify", "macros", "noise", "ping", "relay", "tcp", "yamux", "quic"] }
tracing = { workspace = true }
diff --git a/hole-punching-tests/Dockerfile b/hole-punching-tests/Dockerfile
index 403cc301fc6..30c8e0a6414 100644
--- a/hole-punching-tests/Dockerfile
+++ b/hole-punching-tests/Dockerfile
@@ -1,5 +1,5 @@
# syntax=docker/dockerfile:1.5-labs
-FROM rust:1.81.0 as builder
+FROM rust:1.83.0 as builder
# Run with access to the target cache to speed up builds
WORKDIR /workspace
diff --git a/hole-punching-tests/src/main.rs b/hole-punching-tests/src/main.rs
index 02229e16262..bc5a1bae4f5 100644
--- a/hole-punching-tests/src/main.rs
+++ b/hole-punching-tests/src/main.rs
@@ -18,24 +18,27 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::{
+ collections::HashMap,
+ fmt, io,
+ net::{IpAddr, Ipv4Addr},
+ str::FromStr,
+ time::Duration,
+};
+
use anyhow::{Context, Result};
use either::Either;
use futures::stream::StreamExt;
-use libp2p::core::transport::ListenerId;
-use libp2p::swarm::dial_opts::DialOpts;
-use libp2p::swarm::ConnectionId;
use libp2p::{
- core::multiaddr::{Multiaddr, Protocol},
+ core::{
+ multiaddr::{Multiaddr, Protocol},
+ transport::ListenerId,
+ },
dcutr, identify, noise, ping, relay,
- swarm::{NetworkBehaviour, SwarmEvent},
+ swarm::{dial_opts::DialOpts, ConnectionId, NetworkBehaviour, SwarmEvent},
tcp, yamux, Swarm,
};
use redis::AsyncCommands;
-use std::collections::HashMap;
-use std::net::{IpAddr, Ipv4Addr};
-use std::str::FromStr;
-use std::time::Duration;
-use std::{fmt, io};
/// The redis key we push the relay's TCP listen address to.
const RELAY_TCP_ADDRESS: &str = "RELAY_TCP_ADDRESS";
diff --git a/identity/Cargo.toml b/identity/Cargo.toml
index cc41abb3e24..b13229c5826 100644
--- a/identity/Cargo.toml
+++ b/identity/Cargo.toml
@@ -41,7 +41,6 @@ rand = ["dep:rand", "ed25519-dalek?/rand_core"]
[dev-dependencies]
quickcheck = { workspace = true }
-base64 = "0.22.1"
serde_json = "1.0"
rmp-serde = "1.3"
criterion = "0.5"
diff --git a/identity/src/ecdsa.rs b/identity/src/ecdsa.rs
index 922675097df..11cdaced795 100644
--- a/identity/src/ecdsa.rs
+++ b/identity/src/ecdsa.rs
@@ -20,10 +20,9 @@
//! ECDSA keys with secp256r1 curve support.
-use super::error::DecodingError;
-use core::cmp;
-use core::fmt;
-use core::hash;
+use core::{cmp, fmt, hash};
+use std::convert::Infallible;
+
use p256::{
ecdsa::{
signature::{Signer, Verifier},
@@ -32,9 +31,10 @@ use p256::{
EncodedPoint,
};
use sec1::{DecodeEcPrivateKey, EncodeEcPrivateKey};
-use std::convert::Infallible;
use zeroize::Zeroize;
+use super::error::DecodingError;
+
/// An ECDSA keypair generated using `secp256r1` curve.
#[derive(Clone)]
pub struct Keypair {
@@ -158,7 +158,8 @@ impl PublicKey {
self.0.verify(msg, &sig).is_ok()
}
- /// Try to parse a public key from a byte buffer containing raw components of a key with or without compression.
+ /// Try to parse a public key from a byte buffer containing raw
+ /// components of a key with or without compression.
pub fn try_from_bytes(k: &[u8]) -> Result {
let enc_pt = EncodedPoint::from_bytes(k)
.map_err(|e| DecodingError::failed_to_parse("ecdsa p256 encoded point", e))?;
@@ -168,7 +169,8 @@ impl PublicKey {
.map(PublicKey)
}
- /// Convert a public key into a byte buffer containing raw components of the key without compression.
+ /// Convert a public key into a byte buffer containing
+ /// raw components of the key without compression.
pub fn to_bytes(&self) -> Vec {
self.0.to_encoded_point(false).as_bytes().to_owned()
}
diff --git a/identity/src/ed25519.rs b/identity/src/ed25519.rs
index d77c44547d6..5a1a53dd4af 100644
--- a/identity/src/ed25519.rs
+++ b/identity/src/ed25519.rs
@@ -20,13 +20,13 @@
//! Ed25519 keys.
-use super::error::DecodingError;
-use core::cmp;
-use core::fmt;
-use core::hash;
+use core::{cmp, fmt, hash};
+
use ed25519_dalek::{self as ed25519, Signer as _, Verifier as _};
use zeroize::Zeroize;
+use super::error::DecodingError;
+
/// An Ed25519 keypair.
#[derive(Clone)]
pub struct Keypair(ed25519::SigningKey);
@@ -152,7 +152,8 @@ impl PublicKey {
self.0.to_bytes()
}
- /// Try to parse a public key from a byte array containing the actual key as produced by `to_bytes`.
+ /// Try to parse a public key from a byte array containing
+ /// the actual key as produced by `to_bytes`.
pub fn try_from_bytes(k: &[u8]) -> Result {
let k = <[u8; 32]>::try_from(k)
.map_err(|e| DecodingError::failed_to_parse("Ed25519 public key", e))?;
@@ -206,9 +207,10 @@ impl SecretKey {
#[cfg(test)]
mod tests {
- use super::*;
use quickcheck::*;
+ use super::*;
+
fn eq_keypairs(kp1: &Keypair, kp2: &Keypair) -> bool {
kp1.public() == kp2.public() && kp1.0.to_bytes() == kp2.0.to_bytes()
}
diff --git a/identity/src/error.rs b/identity/src/error.rs
index 71cd78fe1ea..6e8c4d02caa 100644
--- a/identity/src/error.rs
+++ b/identity/src/error.rs
@@ -20,8 +20,7 @@
//! Errors during identity key operations.
-use std::error::Error;
-use std::fmt;
+use std::{error::Error, fmt};
use crate::KeyType;
diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs
index f1e8a7c2142..a1bbba00fa9 100644
--- a/identity/src/keypair.rs
+++ b/identity/src/keypair.rs
@@ -24,40 +24,40 @@
feature = "ed25519",
feature = "rsa"
))]
-#[cfg(feature = "ed25519")]
-use crate::ed25519;
+use quick_protobuf::{BytesReader, Writer};
+
+#[cfg(feature = "ecdsa")]
+use crate::ecdsa;
#[cfg(any(
feature = "ecdsa",
feature = "secp256k1",
feature = "ed25519",
feature = "rsa"
))]
-use crate::error::OtherVariantError;
-use crate::error::{DecodingError, SigningError};
+#[cfg(feature = "ed25519")]
+use crate::ed25519;
#[cfg(any(
feature = "ecdsa",
feature = "secp256k1",
feature = "ed25519",
feature = "rsa"
))]
-use crate::proto;
+use crate::error::OtherVariantError;
#[cfg(any(
feature = "ecdsa",
feature = "secp256k1",
feature = "ed25519",
feature = "rsa"
))]
-use quick_protobuf::{BytesReader, Writer};
-
+use crate::proto;
#[cfg(all(feature = "rsa", not(target_arch = "wasm32")))]
use crate::rsa;
-
#[cfg(feature = "secp256k1")]
use crate::secp256k1;
-
-#[cfg(feature = "ecdsa")]
-use crate::ecdsa;
-use crate::KeyType;
+use crate::{
+ error::{DecodingError, SigningError},
+ KeyType,
+};
/// Identity keypair of a node.
///
@@ -75,7 +75,6 @@ use crate::KeyType;
/// let mut bytes = std::fs::read("private.pk8").unwrap();
/// let keypair = Keypair::rsa_from_pkcs8(&mut bytes);
/// ```
-///
#[derive(Debug, Clone)]
pub struct Keypair {
keypair: KeyPairInner,
@@ -341,7 +340,8 @@ impl Keypair {
}
}
- /// Deterministically derive a new secret from this [`Keypair`], taking into account the provided domain.
+ /// Deterministically derive a new secret from this [`Keypair`],
+ /// taking into account the provided domain.
///
/// This works for all key types except RSA where it returns `None`.
///
@@ -352,10 +352,11 @@ impl Keypair {
/// # use libp2p_identity as identity;
/// let key = identity::Keypair::generate_ed25519();
///
- /// let new_key = key.derive_secret(b"my encryption key").expect("can derive secret for ed25519");
+ /// let new_key = key
+ /// .derive_secret(b"my encryption key")
+ /// .expect("can derive secret for ed25519");
/// # }
/// ```
- ///
#[cfg(any(
feature = "ecdsa",
feature = "secp256k1",
@@ -904,9 +905,10 @@ mod tests {
#[test]
fn public_key_implements_hash() {
- use crate::PublicKey;
use std::hash::Hash;
+ use crate::PublicKey;
+
fn assert_implements_hash() {}
assert_implements_hash::();
@@ -914,9 +916,10 @@ mod tests {
#[test]
fn public_key_implements_ord() {
- use crate::PublicKey;
use std::cmp::Ord;
+ use crate::PublicKey;
+
fn assert_implements_ord() {}
assert_implements_ord::();
diff --git a/identity/src/peer_id.rs b/identity/src/peer_id.rs
index 8ae6d99ae32..7f6d1f44eab 100644
--- a/identity/src/peer_id.rs
+++ b/identity/src/peer_id.rs
@@ -18,17 +18,19 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::{fmt, str::FromStr};
+
#[cfg(feature = "rand")]
use rand::Rng;
use sha2::Digest as _;
-use std::{fmt, str::FromStr};
use thiserror::Error;
/// Local type-alias for multihash.
///
/// Must be big enough to accommodate for `MAX_INLINE_KEY_LENGTH`.
/// 64 satisfies that and can hold 512 bit hashes which is what the ecosystem typically uses.
-/// Given that this appears in our type-signature, using a "common" number here makes us more compatible.
+/// Given that this appears in our type-signature,
+/// using a "common" number here makes us more compatible.
type Multihash = multihash::Multihash<64>;
#[cfg(feature = "serde")]
diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs
index 5eb78a4af75..b14d8c66d86 100644
--- a/identity/src/rsa.rs
+++ b/identity/src/rsa.rs
@@ -20,15 +20,20 @@
//! RSA keys.
-use super::error::*;
-use asn1_der::typed::{DerDecodable, DerEncodable, DerTypeView, Sequence};
-use asn1_der::{Asn1DerError, Asn1DerErrorVariant, DerObject, Sink, VecBacking};
-use ring::rand::SystemRandom;
-use ring::signature::KeyPair;
-use ring::signature::{self, RsaKeyPair, RSA_PKCS1_2048_8192_SHA256, RSA_PKCS1_SHA256};
use std::{fmt, sync::Arc};
+
+use asn1_der::{
+ typed::{DerDecodable, DerEncodable, DerTypeView, Sequence},
+ Asn1DerError, Asn1DerErrorVariant, DerObject, Sink, VecBacking,
+};
+use ring::{
+ rand::SystemRandom,
+ signature::{self, KeyPair, RsaKeyPair, RSA_PKCS1_2048_8192_SHA256, RSA_PKCS1_SHA256},
+};
use zeroize::Zeroize;
+use super::error::*;
+
/// An RSA keypair.
#[derive(Clone)]
pub struct Keypair(Arc);
@@ -315,9 +320,10 @@ impl DerDecodable<'_> for Asn1SubjectPublicKeyInfo {
#[cfg(test)]
mod tests {
- use super::*;
use quickcheck::*;
+ use super::*;
+
const KEY1: &[u8] = include_bytes!("test/rsa-2048.pk8");
const KEY2: &[u8] = include_bytes!("test/rsa-3072.pk8");
const KEY3: &[u8] = include_bytes!("test/rsa-4096.pk8");
diff --git a/identity/src/secp256k1.rs b/identity/src/secp256k1.rs
index a6e9e923268..e884cf1385d 100644
--- a/identity/src/secp256k1.rs
+++ b/identity/src/secp256k1.rs
@@ -20,15 +20,15 @@
//! Secp256k1 keys.
-use super::error::DecodingError;
+use core::{cmp, fmt, hash};
+
use asn1_der::typed::{DerDecodable, Sequence};
-use core::cmp;
-use core::fmt;
-use core::hash;
use libsecp256k1::{Message, Signature};
use sha2::{Digest as ShaDigestTrait, Sha256};
use zeroize::Zeroize;
+use super::error::DecodingError;
+
/// A Secp256k1 keypair.
#[derive(Clone)]
pub struct Keypair {
diff --git a/interop-tests/Cargo.toml b/interop-tests/Cargo.toml
index 0eb32bb4975..8f12275668d 100644
--- a/interop-tests/Cargo.toml
+++ b/interop-tests/Cargo.toml
@@ -13,7 +13,6 @@ crate-type = ["cdylib", "rlib"]
[dependencies]
anyhow = "1"
-either = "1.11.0"
futures = { workspace = true }
rand = "0.8.5"
serde = { version = "1", features = ["derive"] }
diff --git a/interop-tests/Dockerfile.chromium b/interop-tests/Dockerfile.chromium
index 86edbc5b9d2..4ccb142b4a3 100644
--- a/interop-tests/Dockerfile.chromium
+++ b/interop-tests/Dockerfile.chromium
@@ -1,5 +1,5 @@
# syntax=docker/dockerfile:1.5-labs
-FROM rust:1.81 as chef
+FROM rust:1.83 as chef
RUN rustup target add wasm32-unknown-unknown
RUN wget -q -O- https://github.com/rustwasm/wasm-pack/releases/download/v0.12.1/wasm-pack-v0.12.1-x86_64-unknown-linux-musl.tar.gz | tar -zx -C /usr/local/bin --strip-components 1 --wildcards "wasm-pack-*/wasm-pack"
RUN wget -q -O- https://github.com/WebAssembly/binaryen/releases/download/version_115/binaryen-version_115-x86_64-linux.tar.gz | tar -zx -C /usr/local/bin --strip-components 2 --wildcards "binaryen-version_*/bin/wasm-opt"
diff --git a/interop-tests/Dockerfile.native b/interop-tests/Dockerfile.native
index 499c73437fc..f0b078d9492 100644
--- a/interop-tests/Dockerfile.native
+++ b/interop-tests/Dockerfile.native
@@ -1,5 +1,5 @@
# syntax=docker/dockerfile:1.5-labs
-FROM lukemathwalker/cargo-chef:0.1.67-rust-bullseye as chef
+FROM lukemathwalker/cargo-chef:0.1.68-rust-bullseye as chef
WORKDIR /app
FROM chef AS planner
@@ -15,7 +15,7 @@ COPY . .
RUN RUSTFLAGS='-C target-feature=+crt-static' cargo build --release --package interop-tests --target $(rustc -vV | grep host | awk '{print $2}') --bin native_ping
RUN cp /app/target/$(rustc -vV | grep host | awk '{print $2}')/release/native_ping /usr/local/bin/testplan
-FROM scratch
+FROM debian:bullseye
COPY --from=builder /usr/local/bin/testplan /usr/local/bin/testplan
ENV RUST_BACKTRACE=1
ENTRYPOINT ["testplan"]
diff --git a/interop-tests/src/arch.rs b/interop-tests/src/arch.rs
index df36f8e5baf..91fc69dc215 100644
--- a/interop-tests/src/arch.rs
+++ b/interop-tests/src/arch.rs
@@ -1,7 +1,6 @@
// Native re-exports
#[cfg(not(target_arch = "wasm32"))]
pub(crate) use native::{build_swarm, init_logger, sleep, Instant, RedisClient};
-
// Wasm re-exports
#[cfg(target_arch = "wasm32")]
pub(crate) use wasm::{build_swarm, init_logger, sleep, Instant, RedisClient};
@@ -11,11 +10,13 @@ pub(crate) mod native {
use std::time::Duration;
use anyhow::{bail, Context, Result};
- use futures::future::BoxFuture;
- use futures::FutureExt;
- use libp2p::identity::Keypair;
- use libp2p::swarm::{NetworkBehaviour, Swarm};
- use libp2p::{noise, tcp, tls, yamux};
+ use futures::{future::BoxFuture, FutureExt};
+ use libp2p::{
+ identity::Keypair,
+ noise,
+ swarm::{NetworkBehaviour, Swarm},
+ tcp, tls, yamux,
+ };
use libp2p_mplex as mplex;
use libp2p_webrtc as webrtc;
use redis::AsyncCommands;
@@ -48,7 +49,6 @@ pub(crate) mod native {
.with_tokio()
.with_quic()
.with_behaviour(behaviour_constructor)?
- .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5)))
.build(),
format!("/ip4/{ip}/udp/0/quic-v1"),
),
@@ -61,7 +61,6 @@ pub(crate) mod native {
mplex::MplexConfig::default,
)?
.with_behaviour(behaviour_constructor)?
- .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5)))
.build(),
format!("/ip4/{ip}/tcp/0"),
),
@@ -74,7 +73,6 @@ pub(crate) mod native {
yamux::Config::default,
)?
.with_behaviour(behaviour_constructor)?
- .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5)))
.build(),
format!("/ip4/{ip}/tcp/0"),
),
@@ -87,7 +85,6 @@ pub(crate) mod native {
mplex::MplexConfig::default,
)?
.with_behaviour(behaviour_constructor)?
- .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5)))
.build(),
format!("/ip4/{ip}/tcp/0"),
),
@@ -100,7 +97,6 @@ pub(crate) mod native {
yamux::Config::default,
)?
.with_behaviour(behaviour_constructor)?
- .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5)))
.build(),
format!("/ip4/{ip}/tcp/0"),
),
@@ -110,7 +106,6 @@ pub(crate) mod native {
.with_websocket(tls::Config::new, mplex::MplexConfig::default)
.await?
.with_behaviour(behaviour_constructor)?
- .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5)))
.build(),
format!("/ip4/{ip}/tcp/0/ws"),
),
@@ -120,7 +115,6 @@ pub(crate) mod native {
.with_websocket(tls::Config::new, yamux::Config::default)
.await?
.with_behaviour(behaviour_constructor)?
- .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5)))
.build(),
format!("/ip4/{ip}/tcp/0/ws"),
),
@@ -130,7 +124,6 @@ pub(crate) mod native {
.with_websocket(noise::Config::new, mplex::MplexConfig::default)
.await?
.with_behaviour(behaviour_constructor)?
- .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5)))
.build(),
format!("/ip4/{ip}/tcp/0/ws"),
),
@@ -140,7 +133,6 @@ pub(crate) mod native {
.with_websocket(noise::Config::new, yamux::Config::default)
.await?
.with_behaviour(behaviour_constructor)?
- .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5)))
.build(),
format!("/ip4/{ip}/tcp/0/ws"),
),
@@ -154,7 +146,6 @@ pub(crate) mod native {
))
})?
.with_behaviour(behaviour_constructor)?
- .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5)))
.build(),
format!("/ip4/{ip}/udp/0/webrtc-direct"),
),
@@ -186,15 +177,19 @@ pub(crate) mod native {
#[cfg(target_arch = "wasm32")]
pub(crate) mod wasm {
+ use std::time::Duration;
+
use anyhow::{bail, Context, Result};
use futures::future::{BoxFuture, FutureExt};
- use libp2p::core::upgrade::Version;
- use libp2p::identity::Keypair;
- use libp2p::swarm::{NetworkBehaviour, Swarm};
- use libp2p::{noise, websocket_websys, webtransport_websys, yamux, Transport as _};
+ use libp2p::{
+ core::upgrade::Version,
+ identity::Keypair,
+ noise,
+ swarm::{NetworkBehaviour, Swarm},
+ websocket_websys, webtransport_websys, yamux, Transport as _,
+ };
use libp2p_mplex as mplex;
use libp2p_webrtc_websys as webrtc_websys;
- use std::time::Duration;
use crate::{BlpopRequest, Muxer, SecProtocol, Transport};
diff --git a/interop-tests/src/bin/wasm_ping.rs b/interop-tests/src/bin/wasm_ping.rs
index 0d697a0e2a3..7730b869456 100644
--- a/interop-tests/src/bin/wasm_ping.rs
+++ b/interop-tests/src/bin/wasm_ping.rs
@@ -1,26 +1,27 @@
#![allow(non_upper_case_globals)]
-use std::future::IntoFuture;
-use std::process::Stdio;
-use std::time::Duration;
+use std::{future::IntoFuture, process::Stdio, time::Duration};
use anyhow::{bail, Context, Result};
-use axum::http::{header, Uri};
-use axum::response::{Html, IntoResponse, Response};
-use axum::routing::get;
-use axum::{extract::State, http::StatusCode, routing::post, Json, Router};
+use axum::{
+ extract::State,
+ http::{header, StatusCode, Uri},
+ response::{Html, IntoResponse, Response},
+ routing::{get, post},
+ Json, Router,
+};
+use interop_tests::{BlpopRequest, Report};
use redis::{AsyncCommands, Client};
use thirtyfour::prelude::*;
-use tokio::io::{AsyncBufReadExt, BufReader};
-use tokio::net::TcpListener;
-use tokio::process::Child;
-use tokio::sync::mpsc;
-use tower_http::cors::CorsLayer;
-use tower_http::trace::TraceLayer;
+use tokio::{
+ io::{AsyncBufReadExt, BufReader},
+ net::TcpListener,
+ process::Child,
+ sync::mpsc,
+};
+use tower_http::{cors::CorsLayer, trace::TraceLayer};
use tracing_subscriber::{fmt, prelude::*, EnvFilter};
-use interop_tests::{BlpopRequest, Report};
-
mod config;
const BIND_ADDR: &str = "127.0.0.1:8080";
diff --git a/interop-tests/src/lib.rs b/interop-tests/src/lib.rs
index 0154bec51a4..a16dc4b8228 100644
--- a/interop-tests/src/lib.rs
+++ b/interop-tests/src/lib.rs
@@ -1,11 +1,14 @@
-use std::str::FromStr;
-use std::time::Duration;
+use std::{str::FromStr, time::Duration};
use anyhow::{bail, Context, Result};
use futures::{FutureExt, StreamExt};
-use libp2p::identity::Keypair;
-use libp2p::swarm::SwarmEvent;
-use libp2p::{identify, ping, swarm::NetworkBehaviour, Multiaddr};
+use libp2p::{
+ identify,
+ identity::Keypair,
+ ping,
+ swarm::{NetworkBehaviour, SwarmEvent},
+ Multiaddr,
+};
#[cfg(target_arch = "wasm32")]
use wasm_bindgen::prelude::*;
diff --git a/libp2p/CHANGELOG.md b/libp2p/CHANGELOG.md
index e383cfd0cdc..59bf2e81383 100644
--- a/libp2p/CHANGELOG.md
+++ b/libp2p/CHANGELOG.md
@@ -1,8 +1,27 @@
-## 0.54.2
+## 0.55.0
+
+- Raise MSRV to 1.83.0.
+ See [PR 5650](https://github.com/libp2p/rust-libp2p/pull/5650).
+
+- Add `with_connection_timeout` on `SwarmBuilder` to allow configuration of the connection_timeout parameter.
+ See [PR 5575](https://github.com/libp2p/rust-libp2p/pull/5575).
- Deprecate `void` crate.
See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676).
+- Update default for idle-connection-timeout to 10s.
+ See [PR 4967](https://github.com/libp2p/rust-libp2p/pull/4967).
+
+- Expose swarm builder phase errors.
+ See [PR 5726](https://github.com/libp2p/rust-libp2p/pull/5726).
+
+- Deprecate `ConnectionHandler::{InboundOpenInfo, OutboundOpenInfo}` associated type.
+ Previously, users could tag pending sub streams with custom data and retrieve the data
+ after the substream has been negotiated.
+ But substreams themselves are completely interchangeable, users should instead track
+ additional data inside `ConnectionHandler` after negotiation.
+ See [PR 5242](https://github.com/libp2p/rust-libp2p/pull/5242).
+
## 0.54.1
- Update individual crates.
diff --git a/libp2p/Cargo.toml b/libp2p/Cargo.toml
index 79f4b8fbb9a..39d01a5c5c7 100644
--- a/libp2p/Cargo.toml
+++ b/libp2p/Cargo.toml
@@ -3,7 +3,7 @@ name = "libp2p"
edition = "2021"
rust-version = { workspace = true }
description = "Peer-to-peer networking library"
-version = "0.54.2"
+version = "0.55.0"
authors = ["Parity Technologies "]
license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p"
@@ -94,7 +94,7 @@ bytes = "1"
either = "1.9.0"
futures = { workspace = true }
futures-timer = "3.0.2" # Explicit dependency to be used in `wasm-bindgen` feature
-getrandom = "0.2.3" # Explicit dependency to be used in `wasm-bindgen` feature
+getrandom = { workspace = true } # Explicit dependency to be used in `wasm-bindgen` feature
# TODO feature flag?
rw-stream-sink = { workspace = true }
@@ -137,12 +137,9 @@ libp2p-websocket = { workspace = true, optional = true }
[dev-dependencies]
async-std = { version = "1.6.2", features = ["attributes"] }
-async-trait = "0.1"
-clap = { version = "4.1.6", features = ["derive"] }
tokio = { workspace = true, features = [ "io-util", "io-std", "macros", "rt", "rt-multi-thread"] }
libp2p-mplex = { workspace = true }
-libp2p-noise = { workspace = true }
libp2p-tcp = { workspace = true, features = ["tokio"] }
tracing-subscriber = { workspace = true, features = ["env-filter"] }
diff --git a/libp2p/src/bandwidth.rs b/libp2p/src/bandwidth.rs
index 8931c5c4166..ac668e26b3f 100644
--- a/libp2p/src/bandwidth.rs
+++ b/libp2p/src/bandwidth.rs
@@ -20,13 +20,6 @@
#![allow(deprecated)]
-use crate::core::muxing::{StreamMuxer, StreamMuxerEvent};
-
-use futures::{
- io::{IoSlice, IoSliceMut},
- prelude::*,
- ready,
-};
use std::{
convert::TryFrom as _,
io,
@@ -38,6 +31,14 @@ use std::{
task::{Context, Poll},
};
+use futures::{
+ io::{IoSlice, IoSliceMut},
+ prelude::*,
+ ready,
+};
+
+use crate::core::muxing::{StreamMuxer, StreamMuxerEvent};
+
/// Wraps around a [`StreamMuxer`] and counts the number of bytes that go through all the opened
/// streams.
#[derive(Clone)]
@@ -123,7 +124,7 @@ impl BandwidthSinks {
/// Returns the total number of bytes that have been downloaded on all the streams.
///
/// > **Note**: This method is by design subject to race conditions. The returned value should
- /// > only ever be used for statistics purposes.
+ /// > only ever be used for statistics purposes.
pub fn total_inbound(&self) -> u64 {
self.inbound.load(Ordering::Relaxed)
}
@@ -131,7 +132,7 @@ impl BandwidthSinks {
/// Returns the total number of bytes that have been uploaded on all the streams.
///
/// > **Note**: This method is by design subject to race conditions. The returned value should
- /// > only ever be used for statistics purposes.
+ /// > only ever be used for statistics purposes.
pub fn total_outbound(&self) -> u64 {
self.outbound.load(Ordering::Relaxed)
}
diff --git a/libp2p/src/builder.rs b/libp2p/src/builder.rs
index de003314cca..ae4d0b0d4e4 100644
--- a/libp2p/src/builder.rs
+++ b/libp2p/src/builder.rs
@@ -4,6 +4,10 @@ mod phase;
mod select_muxer;
mod select_security;
+#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))]
+pub use phase::WebsocketError;
+pub use phase::{BehaviourError, TransportError};
+
/// Build a [`Swarm`](libp2p_swarm::Swarm) by combining an identity, a set of
/// [`Transport`](libp2p_core::Transport)s and a
/// [`NetworkBehaviour`](libp2p_swarm::NetworkBehaviour).
@@ -33,31 +37,31 @@ mod select_security;
/// # relay: libp2p_relay::client::Behaviour,
/// # }
///
-/// let swarm = SwarmBuilder::with_new_identity()
-/// .with_tokio()
-/// .with_tcp(
-/// Default::default(),
-/// (libp2p_tls::Config::new, libp2p_noise::Config::new),
-/// libp2p_yamux::Config::default,
-/// )?
-/// .with_quic()
-/// .with_other_transport(|_key| DummyTransport::<(PeerId, StreamMuxerBox)>::new())?
-/// .with_dns()?
-/// .with_websocket(
-/// (libp2p_tls::Config::new, libp2p_noise::Config::new),
-/// libp2p_yamux::Config::default,
-/// )
-/// .await?
-/// .with_relay_client(
-/// (libp2p_tls::Config::new, libp2p_noise::Config::new),
-/// libp2p_yamux::Config::default,
-/// )?
-/// .with_behaviour(|_key, relay| MyBehaviour { relay })?
-/// .with_swarm_config(|cfg| {
-/// // Edit cfg here.
-/// cfg
-/// })
-/// .build();
+/// let swarm = SwarmBuilder::with_new_identity()
+/// .with_tokio()
+/// .with_tcp(
+/// Default::default(),
+/// (libp2p_tls::Config::new, libp2p_noise::Config::new),
+/// libp2p_yamux::Config::default,
+/// )?
+/// .with_quic()
+/// .with_other_transport(|_key| DummyTransport::<(PeerId, StreamMuxerBox)>::new())?
+/// .with_dns()?
+/// .with_websocket(
+/// (libp2p_tls::Config::new, libp2p_noise::Config::new),
+/// libp2p_yamux::Config::default,
+/// )
+/// .await?
+/// .with_relay_client(
+/// (libp2p_tls::Config::new, libp2p_noise::Config::new),
+/// libp2p_yamux::Config::default,
+/// )?
+/// .with_behaviour(|_key, relay| MyBehaviour { relay })?
+/// .with_swarm_config(|cfg| {
+/// // Edit cfg here.
+/// cfg
+/// })
+/// .build();
/// #
/// # Ok(())
/// # }
@@ -70,11 +74,12 @@ pub struct SwarmBuilder {
#[cfg(test)]
mod tests {
- use crate::SwarmBuilder;
use libp2p_core::{muxing::StreamMuxerBox, transport::dummy::DummyTransport};
use libp2p_identity::PeerId;
use libp2p_swarm::NetworkBehaviour;
+ use crate::SwarmBuilder;
+
#[test]
#[cfg(all(
feature = "tokio",
diff --git a/libp2p/src/builder/phase.rs b/libp2p/src/builder/phase.rs
index c9679a46767..f8f1672f952 100644
--- a/libp2p/src/builder/phase.rs
+++ b/libp2p/src/builder/phase.rs
@@ -16,23 +16,26 @@ mod websocket;
use bandwidth_logging::*;
use bandwidth_metrics::*;
+pub use behaviour::BehaviourError;
use behaviour::*;
use build::*;
use dns::*;
+use libp2p_core::{muxing::StreamMuxerBox, Transport};
+use libp2p_identity::Keypair;
+pub use other_transport::TransportError;
use other_transport::*;
use provider::*;
use quic::*;
use relay::*;
use swarm::*;
use tcp::*;
+#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))]
+pub use websocket::WebsocketError;
use websocket::*;
-use super::select_muxer::SelectMuxerUpgrade;
-use super::select_security::SelectSecurityUpgrade;
-use super::SwarmBuilder;
-
-use libp2p_core::{muxing::StreamMuxerBox, Transport};
-use libp2p_identity::Keypair;
+use super::{
+ select_muxer::SelectMuxerUpgrade, select_security::SelectSecurityUpgrade, SwarmBuilder,
+};
#[allow(unreachable_pub)]
pub trait IntoSecurityUpgrade {
diff --git a/libp2p/src/builder/phase/bandwidth_logging.rs b/libp2p/src/builder/phase/bandwidth_logging.rs
index cee9498fcaa..f24df5f3df5 100644
--- a/libp2p/src/builder/phase/bandwidth_logging.rs
+++ b/libp2p/src/builder/phase/bandwidth_logging.rs
@@ -1,10 +1,9 @@
+use std::{marker::PhantomData, sync::Arc};
+
use super::*;
#[allow(deprecated)]
use crate::bandwidth::BandwidthSinks;
-use crate::transport_ext::TransportExt;
-use crate::SwarmBuilder;
-use std::marker::PhantomData;
-use std::sync::Arc;
+use crate::{transport_ext::TransportExt, SwarmBuilder};
pub struct BandwidthLoggingPhase {
pub(crate) relay_behaviour: R,
diff --git a/libp2p/src/builder/phase/bandwidth_metrics.rs b/libp2p/src/builder/phase/bandwidth_metrics.rs
index 52daa731ddd..ddd292c140e 100644
--- a/libp2p/src/builder/phase/bandwidth_metrics.rs
+++ b/libp2p/src/builder/phase/bandwidth_metrics.rs
@@ -1,10 +1,9 @@
+use std::{marker::PhantomData, sync::Arc};
+
use super::*;
#[allow(deprecated)]
use crate::bandwidth::BandwidthSinks;
-use crate::transport_ext::TransportExt;
-use crate::SwarmBuilder;
-use std::marker::PhantomData;
-use std::sync::Arc;
+use crate::{transport_ext::TransportExt, SwarmBuilder};
pub struct BandwidthMetricsPhase {
pub(crate) relay_behaviour: R,
diff --git a/libp2p/src/builder/phase/behaviour.rs b/libp2p/src/builder/phase/behaviour.rs
index 939db935c80..22f8c617051 100644
--- a/libp2p/src/builder/phase/behaviour.rs
+++ b/libp2p/src/builder/phase/behaviour.rs
@@ -1,8 +1,9 @@
+use std::{convert::Infallible, marker::PhantomData};
+
+use libp2p_swarm::NetworkBehaviour;
+
use super::*;
use crate::SwarmBuilder;
-use libp2p_swarm::NetworkBehaviour;
-use std::convert::Infallible;
-use std::marker::PhantomData;
pub struct BehaviourPhase {
pub(crate) relay_behaviour: R,
diff --git a/libp2p/src/builder/phase/build.rs b/libp2p/src/builder/phase/build.rs
index 80a83994eeb..d3138cb8b8d 100644
--- a/libp2p/src/builder/phase/build.rs
+++ b/libp2p/src/builder/phase/build.rs
@@ -1,28 +1,31 @@
+use std::time::Duration;
+
+use libp2p_core::{transport::timeout::TransportTimeout, Transport};
+use libp2p_swarm::Swarm;
+
#[allow(unused_imports)]
use super::*;
-
use crate::SwarmBuilder;
-use libp2p_core::Transport;
-use libp2p_swarm::Swarm;
pub struct BuildPhase {
pub(crate) behaviour: B,
pub(crate) transport: T,
pub(crate) swarm_config: libp2p_swarm::Config,
+ pub(crate) connection_timeout: Duration,
}
-const CONNECTION_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
-
impl
SwarmBuilder>
{
+ /// Timeout of the [`TransportTimeout`] wrapping the transport.
+ pub fn with_connection_timeout(mut self, connection_timeout: Duration) -> Self {
+ self.phase.connection_timeout = connection_timeout;
+ self
+ }
+
pub fn build(self) -> Swarm {
Swarm::new(
- libp2p_core::transport::timeout::TransportTimeout::new(
- self.phase.transport,
- CONNECTION_TIMEOUT,
- )
- .boxed(),
+ TransportTimeout::new(self.phase.transport, self.phase.connection_timeout).boxed(),
self.phase.behaviour,
self.keypair.public().to_peer_id(),
self.phase.swarm_config,
diff --git a/libp2p/src/builder/phase/dns.rs b/libp2p/src/builder/phase/dns.rs
index 638064d58bb..83653836a34 100644
--- a/libp2p/src/builder/phase/dns.rs
+++ b/libp2p/src/builder/phase/dns.rs
@@ -1,6 +1,7 @@
+use std::marker::PhantomData;
+
use super::*;
use crate::SwarmBuilder;
-use std::marker::PhantomData;
pub struct DnsPhase {
pub(crate) transport: T,
diff --git a/libp2p/src/builder/phase/identity.rs b/libp2p/src/builder/phase/identity.rs
index ceb86819dc7..e2511267cd3 100644
--- a/libp2p/src/builder/phase/identity.rs
+++ b/libp2p/src/builder/phase/identity.rs
@@ -1,6 +1,7 @@
+use std::marker::PhantomData;
+
use super::*;
use crate::SwarmBuilder;
-use std::marker::PhantomData;
pub struct IdentityPhase {}
diff --git a/libp2p/src/builder/phase/other_transport.rs b/libp2p/src/builder/phase/other_transport.rs
index e04621b2e3f..c3b951c8c75 100644
--- a/libp2p/src/builder/phase/other_transport.rs
+++ b/libp2p/src/builder/phase/other_transport.rs
@@ -1,20 +1,19 @@
-use std::convert::Infallible;
-use std::marker::PhantomData;
-use std::sync::Arc;
+use std::{convert::Infallible, marker::PhantomData, sync::Arc};
-use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade};
-use libp2p_core::Transport;
+use libp2p_core::{
+ upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade},
+ Transport,
+};
#[cfg(feature = "relay")]
use libp2p_core::{Negotiated, UpgradeInfo};
#[cfg(feature = "relay")]
use libp2p_identity::PeerId;
+use super::*;
#[allow(deprecated)]
use crate::bandwidth::BandwidthSinks;
use crate::SwarmBuilder;
-use super::*;
-
pub struct OtherTransportPhase {
pub(crate) transport: T,
}
diff --git a/libp2p/src/builder/phase/provider.rs b/libp2p/src/builder/phase/provider.rs
index 2a9154cda74..00a79e14a30 100644
--- a/libp2p/src/builder/phase/provider.rs
+++ b/libp2p/src/builder/phase/provider.rs
@@ -1,13 +1,15 @@
+use std::marker::PhantomData;
+
#[allow(unused_imports)]
use super::*;
use crate::SwarmBuilder;
-use std::marker::PhantomData;
/// Represents the phase where a provider is not yet specified.
-/// This is a marker type used in the type-state pattern to ensure compile-time checks of the builder's state.
+/// This is a marker type used in the type-state pattern to ensure compile-time checks of the
+/// builder's state.
pub enum NoProviderSpecified {}
-// Define enums for each of the possible runtime environments. These are used as markers in the type-state pattern,
-// allowing compile-time checks for the appropriate environment configuration.
+// Define enums for each of the possible runtime environments. These are used as markers in the
+// type-state pattern, allowing compile-time checks for the appropriate environment configuration.
#[cfg(all(not(target_arch = "wasm32"), feature = "async-std"))]
/// Represents the AsyncStd runtime environment.
@@ -26,7 +28,8 @@ pub struct ProviderPhase {}
impl SwarmBuilder {
/// Configures the SwarmBuilder to use the AsyncStd runtime.
- /// This method is only available when compiling for non-Wasm targets with the `async-std` feature enabled.
+ /// This method is only available when compiling for non-Wasm
+ /// targets with the `async-std` feature enabled.
#[cfg(all(not(target_arch = "wasm32"), feature = "async-std"))]
pub fn with_async_std(self) -> SwarmBuilder {
SwarmBuilder {
@@ -37,7 +40,8 @@ impl SwarmBuilder {
}
/// Configures the SwarmBuilder to use the Tokio runtime.
- /// This method is only available when compiling for non-Wasm targets with the `tokio` feature enabled
+ /// This method is only available when compiling for non-Wasm
+ /// targets with the `tokio` feature enabled
#[cfg(all(not(target_arch = "wasm32"), feature = "tokio"))]
pub fn with_tokio(self) -> SwarmBuilder {
SwarmBuilder {
diff --git a/libp2p/src/builder/phase/quic.rs b/libp2p/src/builder/phase/quic.rs
index e030e9493bb..1b6329c1095 100644
--- a/libp2p/src/builder/phase/quic.rs
+++ b/libp2p/src/builder/phase/quic.rs
@@ -1,5 +1,5 @@
-use super::*;
-use crate::SwarmBuilder;
+use std::{marker::PhantomData, sync::Arc};
+
#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))]
use libp2p_core::muxing::StreamMuxer;
use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade};
@@ -8,7 +8,9 @@ use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade};
all(not(target_arch = "wasm32"), feature = "websocket")
))]
use libp2p_core::{InboundUpgrade, Negotiated, OutboundUpgrade, UpgradeInfo};
-use std::{marker::PhantomData, sync::Arc};
+
+use super::*;
+use crate::SwarmBuilder;
pub struct QuicPhase {
pub(crate) transport: T,
diff --git a/libp2p/src/builder/phase/relay.rs b/libp2p/src/builder/phase/relay.rs
index f8305f9d246..33dbf1fb54c 100644
--- a/libp2p/src/builder/phase/relay.rs
+++ b/libp2p/src/builder/phase/relay.rs
@@ -10,9 +10,8 @@ use libp2p_core::{InboundUpgrade, Negotiated, OutboundUpgrade, StreamMuxer, Upgr
#[cfg(feature = "relay")]
use libp2p_identity::PeerId;
-use crate::SwarmBuilder;
-
use super::*;
+use crate::SwarmBuilder;
pub struct RelayPhase {
pub(crate) transport: T,
diff --git a/libp2p/src/builder/phase/swarm.rs b/libp2p/src/builder/phase/swarm.rs
index ee456ced927..e751ad672e4 100644
--- a/libp2p/src/builder/phase/swarm.rs
+++ b/libp2p/src/builder/phase/swarm.rs
@@ -1,6 +1,9 @@
#[allow(unused_imports)]
use super::*;
+#[allow(unused)] // used below but due to feature flag combinations, clippy gives an unnecessary warning.
+const DEFAULT_CONNECTION_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
+
#[allow(dead_code)]
pub struct SwarmPhase {
pub(crate) behaviour: B,
@@ -20,6 +23,7 @@ macro_rules! impl_with_swarm_config {
behaviour: self.phase.behaviour,
transport: self.phase.transport,
swarm_config: constructor($config),
+ connection_timeout: DEFAULT_CONNECTION_TIMEOUT,
},
keypair: self.keypair,
phantom: std::marker::PhantomData,
diff --git a/libp2p/src/builder/phase/tcp.rs b/libp2p/src/builder/phase/tcp.rs
index 4b7cf29b3d2..f38f52441e5 100644
--- a/libp2p/src/builder/phase/tcp.rs
+++ b/libp2p/src/builder/phase/tcp.rs
@@ -1,5 +1,5 @@
-use super::*;
-use crate::SwarmBuilder;
+use std::marker::PhantomData;
+
#[cfg(all(
not(target_arch = "wasm32"),
any(feature = "tcp", feature = "websocket")
@@ -14,7 +14,9 @@ use libp2p_core::Transport;
use libp2p_core::{
upgrade::InboundConnectionUpgrade, upgrade::OutboundConnectionUpgrade, Negotiated, UpgradeInfo,
};
-use std::marker::PhantomData;
+
+use super::*;
+use crate::SwarmBuilder;
pub struct TcpPhase {}
diff --git a/libp2p/src/builder/phase/websocket.rs b/libp2p/src/builder/phase/websocket.rs
index 68a85bb77b7..a23c6eca854 100644
--- a/libp2p/src/builder/phase/websocket.rs
+++ b/libp2p/src/builder/phase/websocket.rs
@@ -1,5 +1,5 @@
-use super::*;
-use crate::SwarmBuilder;
+use std::marker::PhantomData;
+
#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))]
use libp2p_core::muxing::{StreamMuxer, StreamMuxerBox};
use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade};
@@ -15,7 +15,9 @@ use libp2p_core::{InboundUpgrade, Negotiated, OutboundUpgrade, UpgradeInfo};
feature = "relay"
))]
use libp2p_identity::PeerId;
-use std::marker::PhantomData;
+
+use super::*;
+use crate::SwarmBuilder;
pub struct WebsocketPhase {
pub(crate) transport: T,
@@ -126,8 +128,8 @@ impl_websocket_builder!(
impl_websocket_builder!(
"tokio",
super::provider::Tokio,
- // Note this is an unnecessary await for Tokio Websocket (i.e. tokio dns) in order to be consistent
- // with above AsyncStd construction.
+ // Note this is an unnecessary await for Tokio Websocket (i.e. tokio dns) in order to be
+ // consistent with above AsyncStd construction.
futures::future::ready(libp2p_dns::tokio::Transport::system(
libp2p_tcp::tokio::Transport::new(libp2p_tcp::Config::default())
)),
diff --git a/libp2p/src/builder/select_muxer.rs b/libp2p/src/builder/select_muxer.rs
index c93ba9d9991..93ae0547269 100644
--- a/libp2p/src/builder/select_muxer.rs
+++ b/libp2p/src/builder/select_muxer.rs
@@ -20,12 +20,15 @@
#![allow(unreachable_pub)]
+use std::iter::{Chain, Map};
+
use either::Either;
use futures::future;
-use libp2p_core::either::EitherFuture;
-use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade};
-use libp2p_core::UpgradeInfo;
-use std::iter::{Chain, Map};
+use libp2p_core::{
+ either::EitherFuture,
+ upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade},
+ UpgradeInfo,
+};
#[derive(Debug, Clone)]
pub struct SelectMuxerUpgrade(A, B);
diff --git a/libp2p/src/builder/select_security.rs b/libp2p/src/builder/select_security.rs
index d6c7f8c172f..1ed760feb1b 100644
--- a/libp2p/src/builder/select_security.rs
+++ b/libp2p/src/builder/select_security.rs
@@ -21,13 +21,15 @@
#![allow(unreachable_pub)]
+use std::iter::{Chain, Map};
+
use either::Either;
-use futures::future::MapOk;
-use futures::{future, TryFutureExt};
-use libp2p_core::either::EitherFuture;
-use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo};
+use futures::{future, future::MapOk, TryFutureExt};
+use libp2p_core::{
+ either::EitherFuture,
+ upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo},
+};
use libp2p_identity::PeerId;
-use std::iter::{Chain, Map};
/// Upgrade that combines two upgrades into one. Supports all the protocols supported by either
/// sub-upgrade.
diff --git a/libp2p/src/lib.rs b/libp2p/src/lib.rs
index 58f911e9445..47e1142d0e9 100644
--- a/libp2p/src/lib.rs
+++ b/libp2p/src/lib.rs
@@ -34,11 +34,6 @@
pub use bytes;
pub use futures;
-#[doc(inline)]
-pub use libp2p_core::multihash;
-#[doc(inline)]
-pub use multiaddr;
-
#[doc(inline)]
pub use libp2p_allow_block_list as allow_block_list;
#[cfg(feature = "autonat")]
@@ -48,6 +43,8 @@ pub use libp2p_autonat as autonat;
pub use libp2p_connection_limits as connection_limits;
#[doc(inline)]
pub use libp2p_core as core;
+#[doc(inline)]
+pub use libp2p_core::multihash;
#[cfg(feature = "dcutr")]
#[doc(inline)]
pub use libp2p_dcutr as dcutr;
@@ -140,6 +137,8 @@ pub use libp2p_webtransport_websys as webtransport_websys;
#[cfg(feature = "yamux")]
#[doc(inline)]
pub use libp2p_yamux as yamux;
+#[doc(inline)]
+pub use multiaddr;
mod builder;
mod transport_ext;
@@ -149,15 +148,23 @@ pub mod bandwidth;
#[cfg(doc)]
pub mod tutorials;
-pub use self::builder::SwarmBuilder;
-pub use self::core::{
- transport::TransportError,
- upgrade::{InboundUpgrade, OutboundUpgrade},
- Transport,
-};
-pub use self::multiaddr::{multiaddr as build_multiaddr, Multiaddr};
-pub use self::swarm::Swarm;
-pub use self::transport_ext::TransportExt;
+#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))]
+pub use builder::WebsocketError as WebsocketBuilderError;
pub use libp2p_identity as identity;
pub use libp2p_identity::PeerId;
pub use libp2p_swarm::{Stream, StreamProtocol};
+
+pub use self::{
+ builder::{
+ BehaviourError as BehaviourBuilderError, SwarmBuilder,
+ TransportError as TransportBuilderError,
+ },
+ core::{
+ transport::TransportError,
+ upgrade::{InboundUpgrade, OutboundUpgrade},
+ Transport,
+ },
+ multiaddr::{multiaddr as build_multiaddr, Multiaddr},
+ swarm::Swarm,
+ transport_ext::TransportExt,
+};
diff --git a/libp2p/src/transport_ext.rs b/libp2p/src/transport_ext.rs
index 4f07484fc1f..0ed5b816903 100644
--- a/libp2p/src/transport_ext.rs
+++ b/libp2p/src/transport_ext.rs
@@ -20,15 +20,19 @@
//! Provides the `TransportExt` trait.
+use std::sync::Arc;
+
+use libp2p_identity::PeerId;
+
#[allow(deprecated)]
use crate::bandwidth::{BandwidthLogging, BandwidthSinks};
-use crate::core::{
- muxing::{StreamMuxer, StreamMuxerBox},
- transport::Boxed,
+use crate::{
+ core::{
+ muxing::{StreamMuxer, StreamMuxerBox},
+ transport::Boxed,
+ },
+ Transport,
};
-use crate::Transport;
-use libp2p_identity::PeerId;
-use std::sync::Arc;
/// Trait automatically implemented on all objects that implement `Transport`. Provides some
/// additional utilities.
@@ -42,23 +46,17 @@ pub trait TransportExt: Transport {
/// # Example
///
/// ```
- /// use libp2p_yamux as yamux;
+ /// use libp2p::{core::upgrade, identity, Transport, TransportExt};
/// use libp2p_noise as noise;
/// use libp2p_tcp as tcp;
- /// use libp2p::{
- /// core::upgrade,
- /// identity,
- /// TransportExt,
- /// Transport,
- /// };
+ /// use libp2p_yamux as yamux;
///
/// let id_keys = identity::Keypair::generate_ed25519();
///
/// let transport = tcp::tokio::Transport::new(tcp::Config::default().nodelay(true))
/// .upgrade(upgrade::Version::V1)
/// .authenticate(
- /// noise::Config::new(&id_keys)
- /// .expect("Signing libp2p-noise static DH keypair failed."),
+ /// noise::Config::new(&id_keys).expect("Signing libp2p-noise static DH keypair failed."),
/// )
/// .multiplex(yamux::Config::default())
/// .boxed();
diff --git a/libp2p/src/tutorials/hole_punching.rs b/libp2p/src/tutorials/hole_punching.rs
index 0963c0ca59e..06a4dad4037 100644
--- a/libp2p/src/tutorials/hole_punching.rs
+++ b/libp2p/src/tutorials/hole_punching.rs
@@ -57,8 +57,8 @@
//! cargo build --bin relay-server-example
//! ```
//!
-//! You can find the binary at `target/debug/relay-server-example`. In case you built it locally, copy
-//! it to your server.
+//! You can find the binary at `target/debug/relay-server-example`. In case you built it locally,
+//! copy it to your server.
//!
//! On your server, start the relay server binary:
//!
@@ -98,7 +98,8 @@
//!
//! ``` bash
//! $ libp2p-lookup direct --address /ip4/111.11.111.111/tcp/4001
-//! Lookup for peer with id PeerId("12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN") succeeded.
+//! Lookup for peer with id PeerId("12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN")
+//! succeeded.
//!
//! Protocol version: "/TODO/0.0.1"
//! Agent version: "rust-libp2p/0.36.0"
@@ -163,12 +164,18 @@
//! [`Multiaddr`](crate::Multiaddr).
//!
//! ``` ignore
-//! [2022-01-30T12:54:10Z INFO client] Established connection to PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X") via Dialer { address: "/ip4/$RELAY_PEER_ID/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN/p2p-circuit/p2p/12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X", role_override: Dialer }
+//! [2022-01-30T12:54:10Z INFO client] Established connection to
+//! PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X") via Dialer { address:
+//! "/ip4/$RELAY_PEER_ID/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN/
+//! p2p-circuit/p2p/12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X",
+//! role_override: Dialer }
//! ```
//!
-//! 2. The direct connection upgrade, also known as hole punch, succeeding.
-//! Reported by [`dcutr`](crate::dcutr) through [`Event`](crate::dcutr::Event) containing [`Result::Ok`] with the [`ConnectionId`](libp2p_swarm::ConnectionId) of the new direct connection.
+//! 2. The direct connection upgrade, also known as hole punch, succeeding. Reported by
+//! [`dcutr`](crate::dcutr) through [`Event`](crate::dcutr::Event) containing [`Result::Ok`] with
+//! the [`ConnectionId`](libp2p_swarm::ConnectionId) of the new direct connection.
//!
//! ``` ignore
-//! [2022-01-30T12:54:11Z INFO client] Event { remote_peer_id: PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X"), result: Ok(2) }
+//! [2022-01-30T12:54:11Z INFO client] Event { remote_peer_id:
+//! PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X"), result: Ok(2) }
//! ```
diff --git a/libp2p/src/tutorials/ping.rs b/libp2p/src/tutorials/ping.rs
index 31bf5ba3a14..ebaea29f33a 100644
--- a/libp2p/src/tutorials/ping.rs
+++ b/libp2p/src/tutorials/ping.rs
@@ -72,6 +72,7 @@
//!
//! ```rust
//! use std::error::Error;
+//!
//! use tracing_subscriber::EnvFilter;
//!
//! #[tokio::main]
@@ -98,8 +99,9 @@
//!
//! ```rust
//! use std::error::Error;
-//! use tracing_subscriber::EnvFilter;
+//!
//! use libp2p::{noise, tcp, yamux};
+//! use tracing_subscriber::EnvFilter;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box> {
@@ -139,12 +141,14 @@
//! The two traits [`Transport`] and [`NetworkBehaviour`] allow us to cleanly
//! separate _how_ to send bytes from _what_ bytes and to _whom_ to send.
//!
-//! With the above in mind, let's extend our example, creating a [`ping::Behaviour`](crate::ping::Behaviour) at the end:
+//! With the above in mind, let's extend our example, creating a
+//! [`ping::Behaviour`](crate::ping::Behaviour) at the end:
//!
//! ```rust
//! use std::error::Error;
-//! use tracing_subscriber::EnvFilter;
+//!
//! use libp2p::{noise, ping, tcp, yamux};
+//! use tracing_subscriber::EnvFilter;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box> {
@@ -174,8 +178,9 @@
//!
//! ```rust
//! use std::error::Error;
-//! use tracing_subscriber::EnvFilter;
+//!
//! use libp2p::{noise, ping, tcp, yamux};
+//! use tracing_subscriber::EnvFilter;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box> {
@@ -209,8 +214,9 @@
//!
//! ```rust
//! use std::{error::Error, time::Duration};
-//! use tracing_subscriber::EnvFilter;
+//!
//! use libp2p::{noise, ping, tcp, yamux};
+//! use tracing_subscriber::EnvFilter;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box> {
@@ -226,7 +232,6 @@
//! yamux::Config::default,
//! )?
//! .with_behaviour(|_| ping::Behaviour::default())?
-//! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX)))
//! .build();
//!
//! Ok(())
@@ -261,8 +266,9 @@
//!
//! ```rust
//! use std::{error::Error, time::Duration};
-//! use tracing_subscriber::EnvFilter;
+//!
//! use libp2p::{noise, ping, tcp, yamux, Multiaddr};
+//! use tracing_subscriber::EnvFilter;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box> {
@@ -278,7 +284,6 @@
//! yamux::Config::default,
//! )?
//! .with_behaviour(|_| ping::Behaviour::default())?
-//! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX)))
//! .build();
//!
//! // Tell the swarm to listen on all interfaces and a random, OS-assigned
@@ -305,9 +310,10 @@
//!
//! ```no_run
//! use std::{error::Error, time::Duration};
-//! use tracing_subscriber::EnvFilter;
-//! use libp2p::{noise, ping, tcp, yamux, Multiaddr, swarm::SwarmEvent};
+//!
//! use futures::prelude::*;
+//! use libp2p::{noise, ping, swarm::SwarmEvent, tcp, yamux, Multiaddr};
+//! use tracing_subscriber::EnvFilter;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box> {
@@ -323,7 +329,6 @@
//! yamux::Config::default,
//! )?
//! .with_behaviour(|_| ping::Behaviour::default())?
-//! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX)))
//! .build();
//!
//! // Tell the swarm to listen on all interfaces and a random, OS-assigned
diff --git a/misc/allow-block-list/CHANGELOG.md b/misc/allow-block-list/CHANGELOG.md
index b5ffd7f0495..e7f68f6f8fe 100644
--- a/misc/allow-block-list/CHANGELOG.md
+++ b/misc/allow-block-list/CHANGELOG.md
@@ -1,13 +1,10 @@
-## 0.4.2
-
-- Deprecate `void` crate.
- See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676).
-
## 0.4.1
- Add getters & setters for the allowed/blocked peers.
Return a `bool` for every "insert/remove" function, informing if a change was performed.
See [PR 5572](https://github.com/libp2p/rust-libp2p/pull/5572).
+- Deprecate `void` crate.
+ See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676).
## 0.4.0
diff --git a/misc/allow-block-list/Cargo.toml b/misc/allow-block-list/Cargo.toml
index 66ee3ef9124..c169be87056 100644
--- a/misc/allow-block-list/Cargo.toml
+++ b/misc/allow-block-list/Cargo.toml
@@ -3,7 +3,7 @@ name = "libp2p-allow-block-list"
edition = "2021"
rust-version = { workspace = true }
description = "Allow/block list connection management for libp2p."
-version = "0.4.2"
+version = "0.4.1"
license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p"
keywords = ["peer-to-peer", "libp2p", "networking"]
diff --git a/misc/allow-block-list/src/lib.rs b/misc/allow-block-list/src/lib.rs
index f93cf4ffefa..ea0d56b5a67 100644
--- a/misc/allow-block-list/src/lib.rs
+++ b/misc/allow-block-list/src/lib.rs
@@ -31,12 +31,12 @@
//! #[derive(NetworkBehaviour)]
//! # #[behaviour(prelude = "libp2p_swarm::derive_prelude")]
//! struct MyBehaviour {
-//! allowed_peers: allow_block_list::Behaviour,
+//! allowed_peers: allow_block_list::Behaviour,
//! }
//!
//! # fn main() {
//! let behaviour = MyBehaviour {
-//! allowed_peers: allow_block_list::Behaviour::default()
+//! allowed_peers: allow_block_list::Behaviour::default(),
//! };
//! # }
//! ```
@@ -51,27 +51,29 @@
//! #[derive(NetworkBehaviour)]
//! # #[behaviour(prelude = "libp2p_swarm::derive_prelude")]
//! struct MyBehaviour {
-//! blocked_peers: allow_block_list::Behaviour,
+//! blocked_peers: allow_block_list::Behaviour,
//! }
//!
//! # fn main() {
//! let behaviour = MyBehaviour {
-//! blocked_peers: allow_block_list::Behaviour::default()
+//! blocked_peers: allow_block_list::Behaviour::default(),
//! };
//! # }
//! ```
-use libp2p_core::transport::PortUse;
-use libp2p_core::{Endpoint, Multiaddr};
+use std::{
+ collections::{HashSet, VecDeque},
+ convert::Infallible,
+ fmt,
+ task::{Context, Poll, Waker},
+};
+
+use libp2p_core::{transport::PortUse, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
use libp2p_swarm::{
dummy, CloseConnection, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler,
THandlerInEvent, THandlerOutEvent, ToSwarm,
};
-use std::collections::{HashSet, VecDeque};
-use std::convert::Infallible;
-use std::fmt;
-use std::task::{Context, Poll, Waker};
/// A [`NetworkBehaviour`] that can act as an allow or block list.
#[derive(Default, Debug)]
@@ -101,7 +103,8 @@ impl Behaviour {
/// Allow connections to the given peer.
///
- /// Returns whether the peer was newly inserted. Does nothing if the peer was already present in the set.
+ /// Returns whether the peer was newly inserted. Does nothing if the peer
+ /// was already present in the set.
pub fn allow_peer(&mut self, peer: PeerId) -> bool {
let inserted = self.state.peers.insert(peer);
if inserted {
@@ -116,7 +119,8 @@ impl Behaviour {
///
/// All active connections to this peer will be closed immediately.
///
- /// Returns whether the peer was present in the set. Does nothing if the peer was not present in the set.
+ /// Returns whether the peer was present in the set. Does nothing if the peer
+ /// was not present in the set.
pub fn disallow_peer(&mut self, peer: PeerId) -> bool {
let removed = self.state.peers.remove(&peer);
if removed {
@@ -139,7 +143,8 @@ impl Behaviour {
///
/// All active connections to this peer will be closed immediately.
///
- /// Returns whether the peer was newly inserted. Does nothing if the peer was already present in the set.
+ /// Returns whether the peer was newly inserted. Does nothing if the peer was already present in
+ /// the set.
pub fn block_peer(&mut self, peer: PeerId) -> bool {
let inserted = self.state.peers.insert(peer);
if inserted {
@@ -153,7 +158,8 @@ impl Behaviour {
/// Unblock connections to a given peer.
///
- /// Returns whether the peer was present in the set. Does nothing if the peer was not present in the set.
+ /// Returns whether the peer was present in the set. Does nothing if the peer
+ /// was not present in the set.
pub fn unblock_peer(&mut self, peer: PeerId) -> bool {
let removed = self.state.peers.remove(&peer);
if removed {
@@ -294,10 +300,11 @@ where
#[cfg(test)]
mod tests {
- use super::*;
use libp2p_swarm::{dial_opts::DialOpts, DialError, ListenError, Swarm, SwarmEvent};
use libp2p_swarm_test::SwarmExt;
+ use super::*;
+
#[async_std::test]
async fn cannot_dial_blocked_peer() {
let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default());
diff --git a/misc/connection-limits/src/lib.rs b/misc/connection-limits/src/lib.rs
index 016a7f2cfd4..c8df5be5653 100644
--- a/misc/connection-limits/src/lib.rs
+++ b/misc/connection-limits/src/lib.rs
@@ -18,6 +18,13 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::{
+ collections::{HashMap, HashSet},
+ convert::Infallible,
+ fmt,
+ task::{Context, Poll},
+};
+
use libp2p_core::{transport::PortUse, ConnectedPoint, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
use libp2p_swarm::{
@@ -25,22 +32,22 @@ use libp2p_swarm::{
dummy, ConnectionClosed, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler,
THandlerInEvent, THandlerOutEvent, ToSwarm,
};
-use std::collections::{HashMap, HashSet};
-use std::convert::Infallible;
-use std::fmt;
-use std::task::{Context, Poll};
/// A [`NetworkBehaviour`] that enforces a set of [`ConnectionLimits`].
///
-/// For these limits to take effect, this needs to be composed into the behaviour tree of your application.
+/// For these limits to take effect, this needs to be composed
+/// into the behaviour tree of your application.
///
-/// If a connection is denied due to a limit, either a [`SwarmEvent::IncomingConnectionError`](libp2p_swarm::SwarmEvent::IncomingConnectionError)
-/// or [`SwarmEvent::OutgoingConnectionError`](libp2p_swarm::SwarmEvent::OutgoingConnectionError) will be emitted.
-/// The [`ListenError::Denied`](libp2p_swarm::ListenError::Denied) and respectively the [`DialError::Denied`](libp2p_swarm::DialError::Denied) variant
-/// contain a [`ConnectionDenied`] type that can be downcast to [`Exceeded`] error if (and only if) **this**
-/// behaviour denied the connection.
+/// If a connection is denied due to a limit, either a
+/// [`SwarmEvent::IncomingConnectionError`](libp2p_swarm::SwarmEvent::IncomingConnectionError)
+/// or [`SwarmEvent::OutgoingConnectionError`](libp2p_swarm::SwarmEvent::OutgoingConnectionError)
+/// will be emitted. The [`ListenError::Denied`](libp2p_swarm::ListenError::Denied) and respectively
+/// the [`DialError::Denied`](libp2p_swarm::DialError::Denied) variant
+/// contain a [`ConnectionDenied`] type that can be downcast to [`Exceeded`] error if (and only if)
+/// **this** behaviour denied the connection.
///
-/// If you employ multiple [`NetworkBehaviour`]s that manage connections, it may also be a different error.
+/// If you employ multiple [`NetworkBehaviour`]s that manage connections,
+/// it may also be a different error.
///
/// # Example
///
@@ -53,9 +60,9 @@ use std::task::{Context, Poll};
/// #[derive(NetworkBehaviour)]
/// # #[behaviour(prelude = "libp2p_swarm::derive_prelude")]
/// struct MyBehaviour {
-/// identify: identify::Behaviour,
-/// ping: ping::Behaviour,
-/// limits: connection_limits::Behaviour
+/// identify: identify::Behaviour,
+/// ping: ping::Behaviour,
+/// limits: connection_limits::Behaviour,
/// }
/// ```
pub struct Behaviour {
@@ -367,14 +374,16 @@ impl NetworkBehaviour for Behaviour {
#[cfg(test)]
mod tests {
- use super::*;
use libp2p_swarm::{
- behaviour::toggle::Toggle, dial_opts::DialOpts, dial_opts::PeerCondition, DialError,
- ListenError, Swarm, SwarmEvent,
+ behaviour::toggle::Toggle,
+ dial_opts::{DialOpts, PeerCondition},
+ DialError, ListenError, Swarm, SwarmEvent,
};
use libp2p_swarm_test::SwarmExt;
use quickcheck::*;
+ use super::*;
+
#[test]
fn max_outgoing() {
use rand::Rng;
diff --git a/misc/keygen/Cargo.toml b/misc/keygen/Cargo.toml
index 003993a512c..c5e96553a5c 100644
--- a/misc/keygen/Cargo.toml
+++ b/misc/keygen/Cargo.toml
@@ -17,7 +17,6 @@ clap = { version = "4.5.6", features = ["derive"] }
zeroize = "1"
serde = { version = "1.0.203", features = ["derive"] }
serde_json = "1.0.117"
-libp2p-core = { workspace = true }
base64 = "0.22.1"
libp2p-identity = { workspace = true }
diff --git a/misc/keygen/src/config.rs b/misc/keygen/src/config.rs
index e6c563b3c32..7d46b1849bd 100644
--- a/misc/keygen/src/config.rs
+++ b/misc/keygen/src/config.rs
@@ -1,10 +1,8 @@
+use std::{error::Error, path::Path};
+
use base64::prelude::*;
+use libp2p_identity::{Keypair, PeerId};
use serde::{Deserialize, Serialize};
-use std::error::Error;
-use std::path::Path;
-
-use libp2p_identity::Keypair;
-use libp2p_identity::PeerId;
#[derive(Clone, Serialize, Deserialize)]
#[serde(rename_all = "PascalCase")]
diff --git a/misc/keygen/src/main.rs b/misc/keygen/src/main.rs
index 64d98005369..4c4d3bfbf66 100644
--- a/misc/keygen/src/main.rs
+++ b/misc/keygen/src/main.rs
@@ -1,9 +1,12 @@
+use std::{
+ error::Error,
+ path::PathBuf,
+ str::{self, FromStr},
+ sync::mpsc,
+ thread,
+};
+
use base64::prelude::*;
-use std::error::Error;
-use std::path::PathBuf;
-use std::str::{self, FromStr};
-use std::sync::mpsc;
-use std::thread;
mod config;
diff --git a/misc/memory-connection-limits/Cargo.toml b/misc/memory-connection-limits/Cargo.toml
index f18cb09d193..2d04b6cf2ac 100644
--- a/misc/memory-connection-limits/Cargo.toml
+++ b/misc/memory-connection-limits/Cargo.toml
@@ -14,15 +14,13 @@ memory-stats = { version = "1", features = ["always_use_statm"] }
libp2p-core = { workspace = true }
libp2p-swarm = { workspace = true }
libp2p-identity = { workspace = true, features = ["peerid"] }
-sysinfo = "0.30"
+sysinfo = "0.33"
tracing = { workspace = true }
[dev-dependencies]
-async-std = { version = "1.12.0", features = ["attributes"] }
libp2p-identify = { workspace = true }
libp2p-swarm-derive = { path = "../../swarm-derive" }
libp2p-swarm-test = { path = "../../swarm-test" }
-rand = "0.8.5"
[lints]
workspace = true
diff --git a/misc/memory-connection-limits/src/lib.rs b/misc/memory-connection-limits/src/lib.rs
index e2a89977991..28fa5598481 100644
--- a/misc/memory-connection-limits/src/lib.rs
+++ b/misc/memory-connection-limits/src/lib.rs
@@ -18,35 +18,40 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::{
+ convert::Infallible,
+ fmt,
+ task::{Context, Poll},
+ time::{Duration, Instant},
+};
+
use libp2p_core::{transport::PortUse, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
use libp2p_swarm::{
dummy, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent,
THandlerOutEvent, ToSwarm,
};
-use std::convert::Infallible;
-
-use std::{
- fmt,
- task::{Context, Poll},
- time::{Duration, Instant},
-};
use sysinfo::MemoryRefreshKind;
/// A [`NetworkBehaviour`] that enforces a set of memory usage based limits.
///
-/// For these limits to take effect, this needs to be composed into the behaviour tree of your application.
+/// For these limits to take effect, this needs to be composed
+/// into the behaviour tree of your application.
///
-/// If a connection is denied due to a limit, either a [`SwarmEvent::IncomingConnectionError`](libp2p_swarm::SwarmEvent::IncomingConnectionError)
-/// or [`SwarmEvent::OutgoingConnectionError`](libp2p_swarm::SwarmEvent::OutgoingConnectionError) will be emitted.
-/// The [`ListenError::Denied`](libp2p_swarm::ListenError::Denied) and respectively the [`DialError::Denied`](libp2p_swarm::DialError::Denied) variant
-/// contain a [`ConnectionDenied`] type that can be downcast to [`MemoryUsageLimitExceeded`] error if (and only if) **this**
-/// behaviour denied the connection.
+/// If a connection is denied due to a limit, either a
+/// [`SwarmEvent::IncomingConnectionError`](libp2p_swarm::SwarmEvent::IncomingConnectionError)
+/// or [`SwarmEvent::OutgoingConnectionError`](libp2p_swarm::SwarmEvent::OutgoingConnectionError)
+/// will be emitted. The [`ListenError::Denied`](libp2p_swarm::ListenError::Denied) and respectively
+/// the [`DialError::Denied`](libp2p_swarm::DialError::Denied) variant
+/// contain a [`ConnectionDenied`] type that can be downcast to [`MemoryUsageLimitExceeded`] error
+/// if (and only if) **this** behaviour denied the connection.
///
-/// If you employ multiple [`NetworkBehaviour`]s that manage connections, it may also be a different error.
+/// If you employ multiple [`NetworkBehaviour`]s that manage connections,
+/// it may also be a different error.
///
/// [Behaviour::with_max_bytes] and [Behaviour::with_max_percentage] are mutually exclusive.
-/// If you need to employ both of them, compose two instances of [Behaviour] into your custom behaviour.
+/// If you need to employ both of them,
+/// compose two instances of [Behaviour] into your custom behaviour.
///
/// # Example
///
@@ -58,8 +63,8 @@ use sysinfo::MemoryRefreshKind;
/// #[derive(NetworkBehaviour)]
/// # #[behaviour(prelude = "libp2p_swarm::derive_prelude")]
/// struct MyBehaviour {
-/// identify: identify::Behaviour,
-/// limits: memory_connection_limits::Behaviour
+/// identify: identify::Behaviour,
+/// limits: memory_connection_limits::Behaviour,
/// }
/// ```
pub struct Behaviour {
@@ -68,7 +73,8 @@ pub struct Behaviour {
last_refreshed: Instant,
}
-/// The maximum duration for which the retrieved memory-stats of the process are allowed to be stale.
+/// The maximum duration for which the retrieved memory-stats
+/// of the process are allowed to be stale.
///
/// Once exceeded, we will retrieve new stats.
const MAX_STALE_DURATION: Duration = Duration::from_millis(100);
@@ -94,7 +100,7 @@ impl Behaviour {
use sysinfo::{RefreshKind, System};
let system_memory_bytes = System::new_with_specifics(
- RefreshKind::new().with_memory(MemoryRefreshKind::new().with_ram()),
+ RefreshKind::default().with_memory(MemoryRefreshKind::default().with_ram()),
)
.total_memory();
diff --git a/misc/memory-connection-limits/tests/max_bytes.rs b/misc/memory-connection-limits/tests/max_bytes.rs
index 7f89e2c7a9a..e82ad67d076 100644
--- a/misc/memory-connection-limits/tests/max_bytes.rs
+++ b/misc/memory-connection-limits/tests/max_bytes.rs
@@ -20,14 +20,14 @@
mod util;
+use std::time::Duration;
+
use libp2p_core::Multiaddr;
use libp2p_identity::PeerId;
use libp2p_memory_connection_limits::*;
-use std::time::Duration;
-use util::*;
-
use libp2p_swarm::{dial_opts::DialOpts, DialError, Swarm};
use libp2p_swarm_test::SwarmExt;
+use util::*;
#[test]
fn max_bytes() {
@@ -69,7 +69,8 @@ fn max_bytes() {
.expect("Unexpected connection limit.");
}
- std::thread::sleep(Duration::from_millis(100)); // Memory stats are only updated every 100ms internally, ensure they are up-to-date when we try to exceed it.
+ std::thread::sleep(Duration::from_millis(100)); // Memory stats are only updated every 100ms internally, ensure they are up-to-date when we try
+ // to exceed it.
match network
.dial(
diff --git a/misc/memory-connection-limits/tests/max_percentage.rs b/misc/memory-connection-limits/tests/max_percentage.rs
index bfb1b504af5..bdadad437b8 100644
--- a/misc/memory-connection-limits/tests/max_percentage.rs
+++ b/misc/memory-connection-limits/tests/max_percentage.rs
@@ -20,24 +20,24 @@
mod util;
+use std::time::Duration;
+
use libp2p_core::Multiaddr;
use libp2p_identity::PeerId;
use libp2p_memory_connection_limits::*;
-use std::time::Duration;
-use sysinfo::{MemoryRefreshKind, RefreshKind};
-use util::*;
-
use libp2p_swarm::{
dial_opts::{DialOpts, PeerCondition},
DialError, Swarm,
};
use libp2p_swarm_test::SwarmExt;
+use sysinfo::{MemoryRefreshKind, RefreshKind};
+use util::*;
#[test]
fn max_percentage() {
const CONNECTION_LIMIT: usize = 20;
let system_info = sysinfo::System::new_with_specifics(
- RefreshKind::new().with_memory(MemoryRefreshKind::new().with_ram()),
+ RefreshKind::default().with_memory(MemoryRefreshKind::default().with_ram()),
);
let mut network = Swarm::new_ephemeral(|_| TestBehaviour {
@@ -76,7 +76,9 @@ fn max_percentage() {
.expect("Unexpected connection limit.");
}
- std::thread::sleep(Duration::from_millis(100)); // Memory stats are only updated every 100ms internally, ensure they are up-to-date when we try to exceed it.
+ // Memory stats are only updated every 100ms internally,
+ // ensure they are up-to-date when we try to exceed it.
+ std::thread::sleep(Duration::from_millis(100));
match network
.dial(
diff --git a/misc/memory-connection-limits/tests/util/mod.rs b/misc/memory-connection-limits/tests/util/mod.rs
index 333b0ee135f..205f4d13bc4 100644
--- a/misc/memory-connection-limits/tests/util/mod.rs
+++ b/misc/memory-connection-limits/tests/util/mod.rs
@@ -18,7 +18,10 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use std::task::{Context, Poll};
+use std::{
+ convert::Infallible,
+ task::{Context, Poll},
+};
use libp2p_core::{transport::PortUse, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
@@ -26,7 +29,6 @@ use libp2p_swarm::{
dummy, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent,
THandlerOutEvent, ToSwarm,
};
-use std::convert::Infallible;
#[derive(libp2p_swarm_derive::NetworkBehaviour)]
#[behaviour(prelude = "libp2p_swarm::derive_prelude")]
diff --git a/misc/metrics/src/bandwidth.rs b/misc/metrics/src/bandwidth.rs
index 8a0f54e5b65..b6308ed1b51 100644
--- a/misc/metrics/src/bandwidth.rs
+++ b/misc/metrics/src/bandwidth.rs
@@ -1,4 +1,10 @@
-use crate::protocol_stack;
+use std::{
+ convert::TryFrom as _,
+ io,
+ pin::Pin,
+ task::{Context, Poll},
+};
+
use futures::{
future::{MapOk, TryFutureExt},
io::{IoSlice, IoSliceMut},
@@ -16,12 +22,8 @@ use prometheus_client::{
metrics::{counter::Counter, family::Family},
registry::{Registry, Unit},
};
-use std::{
- convert::TryFrom as _,
- io,
- pin::Pin,
- task::{Context, Poll},
-};
+
+use crate::protocol_stack;
#[derive(Debug, Clone)]
#[pin_project::pin_project]
diff --git a/misc/metrics/src/dcutr.rs b/misc/metrics/src/dcutr.rs
index 3e60dca2cab..6a0f27394e9 100644
--- a/misc/metrics/src/dcutr.rs
+++ b/misc/metrics/src/dcutr.rs
@@ -18,10 +18,11 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue};
-use prometheus_client::metrics::counter::Counter;
-use prometheus_client::metrics::family::Family;
-use prometheus_client::registry::Registry;
+use prometheus_client::{
+ encoding::{EncodeLabelSet, EncodeLabelValue},
+ metrics::{counter::Counter, family::Family},
+ registry::Registry,
+};
pub(crate) struct Metrics {
events: Family,
diff --git a/misc/metrics/src/gossipsub.rs b/misc/metrics/src/gossipsub.rs
index 2d90b92fbc6..b3e2e11f0b0 100644
--- a/misc/metrics/src/gossipsub.rs
+++ b/misc/metrics/src/gossipsub.rs
@@ -18,8 +18,7 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use prometheus_client::metrics::counter::Counter;
-use prometheus_client::registry::Registry;
+use prometheus_client::{metrics::counter::Counter, registry::Registry};
pub(crate) struct Metrics {
messages: Counter,
diff --git a/misc/metrics/src/identify.rs b/misc/metrics/src/identify.rs
index 03ac3f9634e..b16c6a56ccf 100644
--- a/misc/metrics/src/identify.rs
+++ b/misc/metrics/src/identify.rs
@@ -18,17 +18,21 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::protocol_stack;
+use std::{
+ collections::HashMap,
+ sync::{Arc, Mutex},
+};
+
use libp2p_identity::PeerId;
use libp2p_swarm::StreamProtocol;
-use prometheus_client::collector::Collector;
-use prometheus_client::encoding::{DescriptorEncoder, EncodeMetric};
-use prometheus_client::metrics::counter::Counter;
-use prometheus_client::metrics::gauge::ConstGauge;
-use prometheus_client::metrics::MetricType;
-use prometheus_client::registry::Registry;
-use std::collections::HashMap;
-use std::sync::{Arc, Mutex};
+use prometheus_client::{
+ collector::Collector,
+ encoding::{DescriptorEncoder, EncodeMetric},
+ metrics::{counter::Counter, gauge::ConstGauge, MetricType},
+ registry::Registry,
+};
+
+use crate::protocol_stack;
const ALLOWED_PROTOCOLS: &[StreamProtocol] = &[
#[cfg(feature = "dcutr")]
diff --git a/misc/metrics/src/kad.rs b/misc/metrics/src/kad.rs
index bd5a6526737..0a2a8038511 100644
--- a/misc/metrics/src/kad.rs
+++ b/misc/metrics/src/kad.rs
@@ -18,11 +18,15 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue};
-use prometheus_client::metrics::counter::Counter;
-use prometheus_client::metrics::family::Family;
-use prometheus_client::metrics::histogram::{exponential_buckets, Histogram};
-use prometheus_client::registry::{Registry, Unit};
+use prometheus_client::{
+ encoding::{EncodeLabelSet, EncodeLabelValue},
+ metrics::{
+ counter::Counter,
+ family::Family,
+ histogram::{exponential_buckets, Histogram},
+ },
+ registry::{Registry, Unit},
+};
pub(crate) struct Metrics {
query_result_get_record_ok: Counter,
diff --git a/misc/metrics/src/lib.rs b/misc/metrics/src/lib.rs
index 74fd15e2181..1fd79e7846f 100644
--- a/misc/metrics/src/lib.rs
+++ b/misc/metrics/src/lib.rs
@@ -67,8 +67,8 @@ impl Metrics {
/// Create a new set of Swarm and protocol [`Metrics`].
///
/// ```
- /// use prometheus_client::registry::Registry;
/// use libp2p_metrics::Metrics;
+ /// use prometheus_client::registry::Registry;
/// let mut registry = Registry::default();
/// let metrics = Metrics::new(&mut registry);
/// ```
diff --git a/misc/metrics/src/ping.rs b/misc/metrics/src/ping.rs
index afdd05134a6..ce653c72ea1 100644
--- a/misc/metrics/src/ping.rs
+++ b/misc/metrics/src/ping.rs
@@ -18,11 +18,15 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue};
-use prometheus_client::metrics::counter::Counter;
-use prometheus_client::metrics::family::Family;
-use prometheus_client::metrics::histogram::{exponential_buckets, Histogram};
-use prometheus_client::registry::{Registry, Unit};
+use prometheus_client::{
+ encoding::{EncodeLabelSet, EncodeLabelValue},
+ metrics::{
+ counter::Counter,
+ family::Family,
+ histogram::{exponential_buckets, Histogram},
+ },
+ registry::{Registry, Unit},
+};
#[derive(Clone, Hash, PartialEq, Eq, EncodeLabelSet, Debug)]
struct FailureLabels {
diff --git a/misc/metrics/src/relay.rs b/misc/metrics/src/relay.rs
index 607daf3f1e1..d4c25b6eb3e 100644
--- a/misc/metrics/src/relay.rs
+++ b/misc/metrics/src/relay.rs
@@ -18,10 +18,11 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue};
-use prometheus_client::metrics::counter::Counter;
-use prometheus_client::metrics::family::Family;
-use prometheus_client::registry::Registry;
+use prometheus_client::{
+ encoding::{EncodeLabelSet, EncodeLabelValue},
+ metrics::{counter::Counter, family::Family},
+ registry::Registry,
+};
pub(crate) struct Metrics {
events: Family,
diff --git a/misc/metrics/src/swarm.rs b/misc/metrics/src/swarm.rs
index 51c0a0af253..6e95d082de6 100644
--- a/misc/metrics/src/swarm.rs
+++ b/misc/metrics/src/swarm.rs
@@ -18,18 +18,25 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use std::collections::HashMap;
-use std::sync::{Arc, Mutex};
+use std::{
+ collections::HashMap,
+ sync::{Arc, Mutex},
+};
-use crate::protocol_stack;
use libp2p_swarm::{ConnectionId, DialError, SwarmEvent};
-use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue};
-use prometheus_client::metrics::counter::Counter;
-use prometheus_client::metrics::family::Family;
-use prometheus_client::metrics::histogram::{exponential_buckets, Histogram};
-use prometheus_client::registry::{Registry, Unit};
+use prometheus_client::{
+ encoding::{EncodeLabelSet, EncodeLabelValue},
+ metrics::{
+ counter::Counter,
+ family::Family,
+ histogram::{exponential_buckets, Histogram},
+ },
+ registry::{Registry, Unit},
+};
use web_time::Instant;
+use crate::protocol_stack;
+
pub(crate) struct Metrics {
connections_incoming: Family,
connections_incoming_error: Family,
diff --git a/misc/multistream-select/Cargo.toml b/misc/multistream-select/Cargo.toml
index 1bbe3642477..66ab434b613 100644
--- a/misc/multistream-select/Cargo.toml
+++ b/misc/multistream-select/Cargo.toml
@@ -22,9 +22,8 @@ unsigned-varint = { workspace = true }
async-std = { version = "1.6.2", features = ["attributes"] }
futures_ringbuf = "0.4.0"
quickcheck = { workspace = true }
-rand = "0.8"
rw-stream-sink = { workspace = true }
-tracing-subscriber = { workspace = true, features = ["env-filter"] }
+libp2p-test-utils = { workspace = true }
# Passing arguments to the docsrs builder in order to properly document cfg's.
# More information: https://docs.rs/about/builds#cross-compiling
diff --git a/misc/multistream-select/src/dialer_select.rs b/misc/multistream-select/src/dialer_select.rs
index 83bb4909041..bd537e7fc7b 100644
--- a/misc/multistream-select/src/dialer_select.rs
+++ b/misc/multistream-select/src/dialer_select.rs
@@ -20,10 +20,6 @@
//! Protocol negotiation strategies for the peer acting as the dialer.
-use crate::protocol::{HeaderLine, Message, MessageIO, Protocol, ProtocolError};
-use crate::{Negotiated, NegotiationError, Version};
-
-use futures::prelude::*;
use std::{
convert::TryFrom as _,
iter, mem,
@@ -31,6 +27,13 @@ use std::{
task::{Context, Poll},
};
+use futures::prelude::*;
+
+use crate::{
+ protocol::{HeaderLine, Message, MessageIO, Protocol, ProtocolError},
+ Negotiated, NegotiationError, Version,
+};
+
/// Returns a `Future` that negotiates a protocol on the given I/O stream
/// for a peer acting as the _dialer_ (or _initiator_).
///
@@ -84,8 +87,9 @@ enum State {
impl Future for DialerSelectFuture
where
- // The Unpin bound here is required because we produce a `Negotiated` as the output.
- // It also makes the implementation considerably easier to write.
+ // The Unpin bound here is required because we produce
+ // a `Negotiated` as the output. It also makes
+ // the implementation considerably easier to write.
R: AsyncRead + AsyncWrite + Unpin,
I: Iterator,
I::Item: AsRef,
@@ -204,14 +208,18 @@ where
#[cfg(test)]
mod tests {
- use super::*;
- use crate::listener_select_proto;
- use async_std::future::timeout;
- use async_std::net::{TcpListener, TcpStream};
- use quickcheck::{Arbitrary, Gen, GenRange};
use std::time::Duration;
+
+ use async_std::{
+ future::timeout,
+ net::{TcpListener, TcpStream},
+ };
+ use libp2p_test_utils::EnvFilter;
+ use quickcheck::{Arbitrary, Gen, GenRange};
use tracing::metadata::LevelFilter;
- use tracing_subscriber::EnvFilter;
+
+ use super::*;
+ use crate::listener_select_proto;
#[test]
fn select_proto_basic() {
@@ -267,13 +275,11 @@ mod tests {
ListenerProtos(listen_protos): ListenerProtos,
DialPayload(dial_payload): DialPayload,
) {
- let _ = tracing_subscriber::fmt()
- .with_env_filter(
- EnvFilter::builder()
- .with_default_directive(LevelFilter::DEBUG.into())
- .from_env_lossy(),
- )
- .try_init();
+ libp2p_test_utils::with_env_filter(
+ EnvFilter::builder()
+ .with_default_directive(LevelFilter::DEBUG.into())
+ .from_env_lossy(),
+ );
async_std::task::block_on(async move {
let listener = TcpListener::bind("0.0.0.0:0").await.unwrap();
@@ -353,8 +359,8 @@ mod tests {
.unwrap();
assert_eq!(proto, "/proto1");
- // client can close the connection even though protocol negotiation is not yet done, i.e.
- // `_server_connection` had been untouched.
+ // client can close the connection even though protocol negotiation is not yet done,
+ // i.e. `_server_connection` had been untouched.
io.close().await.unwrap();
});
diff --git a/misc/multistream-select/src/length_delimited.rs b/misc/multistream-select/src/length_delimited.rs
index 3a7988d0548..8062455de46 100644
--- a/misc/multistream-select/src/length_delimited.rs
+++ b/misc/multistream-select/src/length_delimited.rs
@@ -18,8 +18,6 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use bytes::{Buf as _, BufMut as _, Bytes, BytesMut};
-use futures::{io::IoSlice, prelude::*};
use std::{
convert::TryFrom as _,
io,
@@ -27,6 +25,9 @@ use std::{
task::{Context, Poll},
};
+use bytes::{Buf as _, BufMut as _, Bytes, BytesMut};
+use futures::{io::IoSlice, prelude::*};
+
const MAX_LEN_BYTES: u16 = 2;
const MAX_FRAME_SIZE: u16 = (1 << (MAX_LEN_BYTES * 8 - MAX_LEN_BYTES)) - 1;
const DEFAULT_BUFFER_SIZE: usize = 64;
@@ -383,10 +384,12 @@ where
#[cfg(test)]
mod tests {
- use crate::length_delimited::LengthDelimited;
+ use std::io::ErrorKind;
+
use futures::{io::Cursor, prelude::*};
use quickcheck::*;
- use std::io::ErrorKind;
+
+ use crate::length_delimited::LengthDelimited;
#[test]
fn basic_read() {
diff --git a/misc/multistream-select/src/lib.rs b/misc/multistream-select/src/lib.rs
index 5565623f25e..96432de6cb0 100644
--- a/misc/multistream-select/src/lib.rs
+++ b/misc/multistream-select/src/lib.rs
@@ -70,20 +70,21 @@
//!
//! ```no_run
//! use async_std::net::TcpStream;
-//! use multistream_select::{dialer_select_proto, Version};
//! use futures::prelude::*;
+//! use multistream_select::{dialer_select_proto, Version};
//!
//! async_std::task::block_on(async move {
//! let socket = TcpStream::connect("127.0.0.1:10333").await.unwrap();
//!
//! let protos = vec!["/echo/1.0.0", "/echo/2.5.0"];
-//! let (protocol, _io) = dialer_select_proto(socket, protos, Version::V1).await.unwrap();
+//! let (protocol, _io) = dialer_select_proto(socket, protos, Version::V1)
+//! .await
+//! .unwrap();
//!
//! println!("Negotiated protocol: {:?}", protocol);
//! // You can now use `_io` to communicate with the remote.
//! });
//! ```
-//!
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
@@ -93,10 +94,12 @@ mod listener_select;
mod negotiated;
mod protocol;
-pub use self::dialer_select::{dialer_select_proto, DialerSelectFuture};
-pub use self::listener_select::{listener_select_proto, ListenerSelectFuture};
-pub use self::negotiated::{Negotiated, NegotiatedComplete, NegotiationError};
-pub use self::protocol::ProtocolError;
+pub use self::{
+ dialer_select::{dialer_select_proto, DialerSelectFuture},
+ listener_select::{listener_select_proto, ListenerSelectFuture},
+ negotiated::{Negotiated, NegotiatedComplete, NegotiationError},
+ protocol::ProtocolError,
+};
/// Supported multistream-select versions.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Default)]
diff --git a/misc/multistream-select/src/listener_select.rs b/misc/multistream-select/src/listener_select.rs
index b4236310a1d..cd5af72a9d0 100644
--- a/misc/multistream-select/src/listener_select.rs
+++ b/misc/multistream-select/src/listener_select.rs
@@ -21,11 +21,6 @@
//! Protocol negotiation strategies for the peer acting as the listener
//! in a multistream-select protocol negotiation.
-use crate::protocol::{HeaderLine, Message, MessageIO, Protocol, ProtocolError};
-use crate::{Negotiated, NegotiationError};
-
-use futures::prelude::*;
-use smallvec::SmallVec;
use std::{
convert::TryFrom as _,
mem,
@@ -33,6 +28,14 @@ use std::{
task::{Context, Poll},
};
+use futures::prelude::*;
+use smallvec::SmallVec;
+
+use crate::{
+ protocol::{HeaderLine, Message, MessageIO, Protocol, ProtocolError},
+ Negotiated, NegotiationError,
+};
+
/// Returns a `Future` that negotiates a protocol on the given I/O stream
/// for a peer acting as the _listener_ (or _responder_).
///
@@ -109,8 +112,10 @@ enum State {
impl Future for ListenerSelectFuture
where
- // The Unpin bound here is required because we produce a `Negotiated` as the output.
- // It also makes the implementation considerably easier to write.
+ // The Unpin bound here is required because
+ // we produce a `Negotiated` as the output.
+ // It also makes the implementation considerably
+ // easier to write.
R: AsyncRead + AsyncWrite + Unpin,
N: AsRef + Clone,
{
diff --git a/misc/multistream-select/src/negotiated.rs b/misc/multistream-select/src/negotiated.rs
index a24014a4f5f..6693b3b5636 100644
--- a/misc/multistream-select/src/negotiated.rs
+++ b/misc/multistream-select/src/negotiated.rs
@@ -18,7 +18,12 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::protocol::{HeaderLine, Message, MessageReader, Protocol, ProtocolError};
+use std::{
+ error::Error,
+ fmt, io, mem,
+ pin::Pin,
+ task::{Context, Poll},
+};
use futures::{
io::{IoSlice, IoSliceMut},
@@ -26,12 +31,8 @@ use futures::{
ready,
};
use pin_project::pin_project;
-use std::{
- error::Error,
- fmt, io, mem,
- pin::Pin,
- task::{Context, Poll},
-};
+
+use crate::protocol::{HeaderLine, Message, MessageReader, Protocol, ProtocolError};
/// An I/O stream that has settled on an (application-layer) protocol to use.
///
@@ -59,8 +60,10 @@ pub struct NegotiatedComplete {
impl Future for NegotiatedComplete
where
- // `Unpin` is required not because of implementation details but because we produce the
- // `Negotiated` as the output of the future.
+ // `Unpin` is required not because of
+ // implementation details but because we produce
+ // the `Negotiated` as the output of the
+ // future.
TInner: AsyncRead + AsyncWrite + Unpin,
{
type Output = Result, NegotiationError>;
@@ -250,13 +253,13 @@ where
}
// TODO: implement once method is stabilized in the futures crate
- /*unsafe fn initializer(&self) -> Initializer {
- match &self.state {
- State::Completed { io, .. } => io.initializer(),
- State::Expecting { io, .. } => io.inner_ref().initializer(),
- State::Invalid => panic!("Negotiated: Invalid state"),
- }
- }*/
+ // unsafe fn initializer(&self) -> Initializer {
+ // match &self.state {
+ // State::Completed { io, .. } => io.initializer(),
+ // State::Expecting { io, .. } => io.inner_ref().initializer(),
+ // State::Invalid => panic!("Negotiated: Invalid state"),
+ // }
+ // }
fn poll_read_vectored(
mut self: Pin<&mut Self>,
diff --git a/misc/multistream-select/src/protocol.rs b/misc/multistream-select/src/protocol.rs
index 92b6acedaeb..93cd4ac02b5 100644
--- a/misc/multistream-select/src/protocol.rs
+++ b/misc/multistream-select/src/protocol.rs
@@ -25,19 +25,22 @@
//! `Stream` and `Sink` implementations of `MessageIO` and
//! `MessageReader`.
-use crate::length_delimited::{LengthDelimited, LengthDelimitedReader};
-use crate::Version;
-
-use bytes::{BufMut, Bytes, BytesMut};
-use futures::{io::IoSlice, prelude::*, ready};
use std::{
error::Error,
fmt, io,
pin::Pin,
task::{Context, Poll},
};
+
+use bytes::{BufMut, Bytes, BytesMut};
+use futures::{io::IoSlice, prelude::*, ready};
use unsigned_varint as uvi;
+use crate::{
+ length_delimited::{LengthDelimited, LengthDelimitedReader},
+ Version,
+};
+
/// The maximum number of supported protocols that can be processed.
const MAX_PROTOCOLS: usize = 1000;
@@ -461,10 +464,12 @@ impl fmt::Display for ProtocolError {
#[cfg(test)]
mod tests {
- use super::*;
- use quickcheck::*;
use std::iter;
+ use quickcheck::*;
+
+ use super::*;
+
impl Arbitrary for Protocol {
fn arbitrary(g: &mut Gen) -> Protocol {
let n = g.gen_range(1..g.size());
diff --git a/misc/quick-protobuf-codec/src/lib.rs b/misc/quick-protobuf-codec/src/lib.rs
index c57b7da7db8..d49315a54c3 100644
--- a/misc/quick-protobuf-codec/src/lib.rs
+++ b/misc/quick-protobuf-codec/src/lib.rs
@@ -1,10 +1,10 @@
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
+use std::{io, marker::PhantomData};
+
use asynchronous_codec::{Decoder, Encoder};
use bytes::{Buf, BufMut, BytesMut};
use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer, WriterBackend};
-use std::io;
-use std::marker::PhantomData;
mod generated;
@@ -182,12 +182,13 @@ impl From for io::Error {
#[cfg(test)]
mod tests {
- use super::*;
+ use std::error::Error;
+
use asynchronous_codec::FramedRead;
- use futures::io::Cursor;
- use futures::{FutureExt, StreamExt};
+ use futures::{io::Cursor, FutureExt, StreamExt};
use quickcheck::{Arbitrary, Gen, QuickCheck};
- use std::error::Error;
+
+ use super::*;
#[test]
fn honors_max_message_length() {
diff --git a/misc/quick-protobuf-codec/tests/large_message.rs b/misc/quick-protobuf-codec/tests/large_message.rs
index 65dafe065d1..a434d3ce17f 100644
--- a/misc/quick-protobuf-codec/tests/large_message.rs
+++ b/misc/quick-protobuf-codec/tests/large_message.rs
@@ -1,7 +1,6 @@
use asynchronous_codec::Encoder;
use bytes::BytesMut;
-use quick_protobuf_codec::proto;
-use quick_protobuf_codec::Codec;
+use quick_protobuf_codec::{proto, Codec};
#[test]
fn encode_large_message() {
diff --git a/misc/quickcheck-ext/src/lib.rs b/misc/quickcheck-ext/src/lib.rs
index 4ada7e73ba1..9c2deec8743 100644
--- a/misc/quickcheck-ext/src/lib.rs
+++ b/misc/quickcheck-ext/src/lib.rs
@@ -1,9 +1,9 @@
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
-pub use quickcheck::*;
-
use core::ops::Range;
+
use num_traits::sign::Unsigned;
+pub use quickcheck::*;
pub trait GenRange {
fn gen_range(&mut self, _range: Range) -> T;
diff --git a/misc/rw-stream-sink/src/lib.rs b/misc/rw-stream-sink/src/lib.rs
index f10e683ad33..5fdf1987252 100644
--- a/misc/rw-stream-sink/src/lib.rs
+++ b/misc/rw-stream-sink/src/lib.rs
@@ -27,7 +27,6 @@
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
-use futures::{prelude::*, ready};
use std::{
io::{self, Read},
mem,
@@ -35,6 +34,8 @@ use std::{
task::{Context, Poll},
};
+use futures::{prelude::*, ready};
+
static_assertions::const_assert!(mem::size_of::() <= mem::size_of::());
/// Wraps a [`Stream`] and [`Sink`] whose items are buffers.
@@ -115,14 +116,16 @@ where
#[cfg(test)]
mod tests {
- use super::RwStreamSink;
- use async_std::task;
- use futures::{channel::mpsc, prelude::*};
use std::{
pin::Pin,
task::{Context, Poll},
};
+ use async_std::task;
+ use futures::{channel::mpsc, prelude::*};
+
+ use super::RwStreamSink;
+
// This struct merges a stream and a sink and is quite useful for tests.
struct Wrapper(St, Si);
diff --git a/misc/server/CHANGELOG.md b/misc/server/CHANGELOG.md
index fe48de0f553..53341baa9ab 100644
--- a/misc/server/CHANGELOG.md
+++ b/misc/server/CHANGELOG.md
@@ -1,25 +1,15 @@
-## 0.12.8
-
-### Changed
-
-- Remove deprecated [`libp2p-lookup`](https://github.com/mxinden/libp2p-lookup) from Dockerfile.
- See [PR 5610](https://github.com/libp2p/rust-libp2p/pull/5610).
-
-## 0.12.7
+## 0.12.6
### Changed
+- Stop using kad default protocol.
+ See [PR 5122](https://github.com/libp2p/rust-libp2p/pull/5122)
- Use periodic and automatic bootstrap of Kademlia.
See [PR 4838](https://github.com/libp2p/rust-libp2p/pull/4838).
- Update to [`libp2p-identify` `v0.45.0`](protocols/identify/CHANGELOG.md#0450).
See [PR 4981](https://github.com/libp2p/rust-libp2p/pull/4981).
-
-## 0.12.6
-
-### Changed
-
-- Stop using kad default protocol.
- See [PR 5122](https://github.com/libp2p/rust-libp2p/pull/5122)
+- Remove deprecated [`libp2p-lookup`](https://github.com/mxinden/libp2p-lookup) from Dockerfile.
+ See [PR 5610](https://github.com/libp2p/rust-libp2p/pull/5610).
## 0.12.5
diff --git a/misc/server/Cargo.toml b/misc/server/Cargo.toml
index 0954e2f38d8..b2b3d33ca1e 100644
--- a/misc/server/Cargo.toml
+++ b/misc/server/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "libp2p-server"
-version = "0.12.8"
+version = "0.12.6"
authors = ["Max Inden "]
edition = "2021"
repository = "https://github.com/libp2p/rust-libp2p"
@@ -14,7 +14,6 @@ license = "MIT"
base64 = "0.22"
clap = { version = "4.5.6", features = ["derive"] }
futures = { workspace = true }
-futures-timer = "3"
axum = "0.7"
libp2p = { workspace = true, features = [
"autonat",
@@ -34,8 +33,7 @@ libp2p = { workspace = true, features = [
"websocket",
] }
prometheus-client = { workspace = true }
-serde = "1.0.203"
-serde_derive = "1.0.125"
+serde = { version = "1", features = ["derive"] }
serde_json = "1.0"
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }
tracing = { workspace = true }
diff --git a/misc/server/Dockerfile b/misc/server/Dockerfile
index 12a8982eb3f..8b5aac2ae82 100644
--- a/misc/server/Dockerfile
+++ b/misc/server/Dockerfile
@@ -1,5 +1,5 @@
# syntax=docker/dockerfile:1.5-labs
-FROM rust:1.81.0 as chef
+FROM rust:1.83.0 as chef
RUN wget -q -O- https://github.com/LukeMathWalker/cargo-chef/releases/download/v0.1.62/cargo-chef-x86_64-unknown-linux-gnu.tar.gz | tar -zx -C /usr/local/bin
WORKDIR /app
diff --git a/misc/server/src/behaviour.rs b/misc/server/src/behaviour.rs
index 36b18c9798d..230d62a2ef3 100644
--- a/misc/server/src/behaviour.rs
+++ b/misc/server/src/behaviour.rs
@@ -1,13 +1,10 @@
-use libp2p::autonat;
-use libp2p::identify;
-use libp2p::kad;
-use libp2p::ping;
-use libp2p::relay;
-use libp2p::swarm::behaviour::toggle::Toggle;
-use libp2p::swarm::{NetworkBehaviour, StreamProtocol};
-use libp2p::{identity, Multiaddr, PeerId};
-use std::str::FromStr;
-use std::time::Duration;
+use std::{str::FromStr, time::Duration};
+
+use libp2p::{
+ autonat, identify, identity, kad, ping, relay,
+ swarm::{behaviour::toggle::Toggle, NetworkBehaviour, StreamProtocol},
+ Multiaddr, PeerId,
+};
const BOOTNODES: [&str; 4] = [
"QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
diff --git a/misc/server/src/config.rs b/misc/server/src/config.rs
index c3e3ec529c1..8f8c71369b2 100644
--- a/misc/server/src/config.rs
+++ b/misc/server/src/config.rs
@@ -1,7 +1,7 @@
+use std::{error::Error, path::Path};
+
use libp2p::Multiaddr;
-use serde_derive::Deserialize;
-use std::error::Error;
-use std::path::Path;
+use serde::Deserialize;
#[derive(Clone, Deserialize)]
#[serde(rename_all = "PascalCase")]
diff --git a/misc/server/src/http_service.rs b/misc/server/src/http_service.rs
index cee1aa96e28..87a8adb94e0 100644
--- a/misc/server/src/http_service.rs
+++ b/misc/server/src/http_service.rs
@@ -18,15 +18,13 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use axum::extract::State;
-use axum::http::StatusCode;
-use axum::response::IntoResponse;
-use axum::routing::get;
-use axum::Router;
-use prometheus_client::encoding::text::encode;
-use prometheus_client::registry::Registry;
-use std::net::SocketAddr;
-use std::sync::{Arc, Mutex};
+use std::{
+ net::SocketAddr,
+ sync::{Arc, Mutex},
+};
+
+use axum::{extract::State, http::StatusCode, response::IntoResponse, routing::get, Router};
+use prometheus_client::{encoding::text::encode, registry::Registry};
use tokio::net::TcpListener;
const METRICS_CONTENT_TYPE: &str = "application/openmetrics-text;charset=utf-8;version=1.0.0";
diff --git a/misc/server/src/main.rs b/misc/server/src/main.rs
index 820921beaed..a633a80207e 100644
--- a/misc/server/src/main.rs
+++ b/misc/server/src/main.rs
@@ -1,18 +1,18 @@
+use std::{error::Error, path::PathBuf, str::FromStr};
+
use base64::Engine;
use clap::Parser;
use futures::stream::StreamExt;
-use libp2p::identity;
-use libp2p::identity::PeerId;
-use libp2p::kad;
-use libp2p::metrics::{Metrics, Recorder};
-use libp2p::swarm::SwarmEvent;
-use libp2p::tcp;
-use libp2p::{identify, noise, yamux};
-use prometheus_client::metrics::info::Info;
-use prometheus_client::registry::Registry;
-use std::error::Error;
-use std::path::PathBuf;
-use std::str::FromStr;
+use libp2p::{
+ identify, identity,
+ identity::PeerId,
+ kad,
+ metrics::{Metrics, Recorder},
+ noise,
+ swarm::SwarmEvent,
+ tcp, yamux,
+};
+use prometheus_client::{metrics::info::Info, registry::Registry};
use tracing_subscriber::EnvFilter;
use zeroize::Zeroizing;
diff --git a/misc/test-utils/CHANGELOG.md b/misc/test-utils/CHANGELOG.md
new file mode 100644
index 00000000000..0b8ed3ab931
--- /dev/null
+++ b/misc/test-utils/CHANGELOG.md
@@ -0,0 +1,4 @@
+## 0.1.0
+
+- Introduce 'test-utils` crate.
+ See [PR 5725](https://github.com/libp2p/rust-libp2p/pull/5725).
\ No newline at end of file
diff --git a/misc/test-utils/Cargo.toml b/misc/test-utils/Cargo.toml
new file mode 100644
index 00000000000..438bcabcf2a
--- /dev/null
+++ b/misc/test-utils/Cargo.toml
@@ -0,0 +1,17 @@
+[package]
+name = "libp2p-test-utils"
+version = "0.1.0"
+edition = "2021"
+authors = ["Krishang Shah "]
+license = "MIT"
+repository = "https://github.com/libp2p/rust-libp2p"
+publish = false
+
+[package.metadata.release]
+release = false
+
+[dependencies]
+tracing-subscriber = { workspace = true, features = ["env-filter"] }
+
+[lints]
+workspace = true
diff --git a/misc/test-utils/src/lib.rs b/misc/test-utils/src/lib.rs
new file mode 100644
index 00000000000..1155c79b614
--- /dev/null
+++ b/misc/test-utils/src/lib.rs
@@ -0,0 +1,15 @@
+pub use tracing_subscriber::EnvFilter;
+
+/// Initializes logging with the default environment filter (`RUST_LOG`).
+pub fn with_default_env_filter() {
+ with_env_filter(EnvFilter::from_default_env());
+}
+
+/// Initializes logging with a custom environment filter.
+/// Logs are written to standard error (`stderr`).
+pub fn with_env_filter(filter: impl Into) {
+ let _ = tracing_subscriber::fmt()
+ .with_env_filter(filter)
+ .with_writer(std::io::stderr)
+ .try_init();
+}
diff --git a/misc/webrtc-utils/Cargo.toml b/misc/webrtc-utils/Cargo.toml
index 287388a49e7..2c50a2f8ab7 100644
--- a/misc/webrtc-utils/Cargo.toml
+++ b/misc/webrtc-utils/Cargo.toml
@@ -23,7 +23,6 @@ quick-protobuf-codec = { workspace = true }
rand = "0.8"
serde = { version = "1.0", features = ["derive"] }
sha2 = "0.10.8"
-thiserror = { workspace = true }
tinytemplate = "1.2"
tracing = { workspace = true }
diff --git a/misc/webrtc-utils/src/fingerprint.rs b/misc/webrtc-utils/src/fingerprint.rs
index a02c4d1116d..c32d33d5bab 100644
--- a/misc/webrtc-utils/src/fingerprint.rs
+++ b/misc/webrtc-utils/src/fingerprint.rs
@@ -19,9 +19,10 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::fmt;
+
use libp2p_core::multihash;
use sha2::Digest as _;
-use std::fmt;
pub const SHA256: &str = "sha-256";
const MULTIHASH_SHA256_CODE: u64 = 0x12;
diff --git a/misc/webrtc-utils/src/noise.rs b/misc/webrtc-utils/src/noise.rs
index 9180acfc1ca..705db7f4697 100644
--- a/misc/webrtc-utils/src/noise.rs
+++ b/misc/webrtc-utils/src/noise.rs
@@ -19,16 +19,17 @@
// DEALINGS IN THE SOFTWARE.
use futures::{AsyncRead, AsyncWrite, AsyncWriteExt};
-use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade};
-use libp2p_core::UpgradeInfo;
+use libp2p_core::{
+ upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade},
+ UpgradeInfo,
+};
use libp2p_identity as identity;
use libp2p_identity::PeerId;
use libp2p_noise as noise;
+pub use noise::Error;
use crate::fingerprint::Fingerprint;
-pub use noise::Error;
-
pub async fn inbound(
id_keys: identity::Keypair,
stream: T,
@@ -89,9 +90,10 @@ pub(crate) fn noise_prologue(
#[cfg(test)]
mod tests {
- use super::*;
use hex_literal::hex;
+ use super::*;
+
#[test]
fn noise_prologue_tests() {
let a = Fingerprint::raw(hex!(
diff --git a/misc/webrtc-utils/src/sdp.rs b/misc/webrtc-utils/src/sdp.rs
index 0796548f449..96a07f5db95 100644
--- a/misc/webrtc-utils/src/sdp.rs
+++ b/misc/webrtc-utils/src/sdp.rs
@@ -18,13 +18,13 @@
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::fingerprint::Fingerprint;
-use serde::Serialize;
use std::net::{IpAddr, SocketAddr};
+
+use rand::{distributions::Alphanumeric, thread_rng, Rng};
+use serde::Serialize;
use tinytemplate::TinyTemplate;
-use rand::distributions::Alphanumeric;
-use rand::{thread_rng, Rng};
+use crate::fingerprint::Fingerprint;
pub fn answer(addr: SocketAddr, server_fingerprint: Fingerprint, client_ufrag: &str) -> String {
let answer = render_description(
@@ -71,7 +71,8 @@ pub fn answer(addr: SocketAddr, server_fingerprint: Fingerprint, client_ufrag: &
// the answerer is received, which adds additional latency. setup:active allows the answer and
// the DTLS handshake to occur in parallel. Thus, setup:active is RECOMMENDED.
//
-// a=candidate:
+// a=candidate:
+//
//
// A transport address for a candidate that can be used for connectivity checks (RFC8839).
//
diff --git a/misc/webrtc-utils/src/stream.rs b/misc/webrtc-utils/src/stream.rs
index 17f746a92a1..0ec420a103a 100644
--- a/misc/webrtc-utils/src/stream.rs
+++ b/misc/webrtc-utils/src/stream.rs
@@ -19,20 +19,22 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use bytes::Bytes;
-use futures::{channel::oneshot, prelude::*, ready};
-
use std::{
io,
pin::Pin,
task::{Context, Poll},
};
-use crate::proto::{Flag, Message};
+use bytes::Bytes;
+use futures::{channel::oneshot, prelude::*, ready};
+
use crate::{
- stream::drop_listener::GracefullyClosed,
- stream::framed_dc::FramedDc,
- stream::state::{Closing, State},
+ proto::{Flag, Message},
+ stream::{
+ drop_listener::GracefullyClosed,
+ framed_dc::FramedDc,
+ state::{Closing, State},
+ },
};
mod drop_listener;
@@ -69,7 +71,8 @@ impl Stream
where
T: AsyncRead + AsyncWrite + Unpin + Clone,
{
- /// Returns a new [`Stream`] and a [`DropListener`], which will notify the receiver when/if the stream is dropped.
+ /// Returns a new [`Stream`] and a [`DropListener`],
+ /// which will notify the receiver when/if the stream is dropped.
pub fn new(data_channel: T) -> (Self, DropListener) {
let (sender, receiver) = oneshot::channel();
@@ -175,8 +178,9 @@ where
buf: &[u8],
) -> Poll> {
while self.state.read_flags_in_async_write() {
- // TODO: In case AsyncRead::poll_read encountered an error or returned None earlier, we will poll the
- // underlying I/O resource once more. Is that allowed? How about introducing a state IoReadClosed?
+ // TODO: In case AsyncRead::poll_read encountered an error or returned None earlier, we
+ // will poll the underlying I/O resource once more. Is that allowed? How
+ // about introducing a state IoReadClosed?
let Self {
read_buffer,
@@ -265,11 +269,12 @@ where
#[cfg(test)]
mod tests {
- use super::*;
- use crate::stream::framed_dc::codec;
use asynchronous_codec::Encoder;
use bytes::BytesMut;
+ use super::*;
+ use crate::stream::framed_dc::codec;
+
#[test]
fn max_data_len() {
// Largest possible message.
diff --git a/misc/webrtc-utils/src/stream/drop_listener.rs b/misc/webrtc-utils/src/stream/drop_listener.rs
index 9745e3d4364..ea3f19d2f57 100644
--- a/misc/webrtc-utils/src/stream/drop_listener.rs
+++ b/misc/webrtc-utils/src/stream/drop_listener.rs
@@ -18,17 +18,22 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use futures::channel::oneshot;
-use futures::channel::oneshot::Canceled;
-use futures::{AsyncRead, AsyncWrite, FutureExt, SinkExt};
+use std::{
+ future::Future,
+ io,
+ pin::Pin,
+ task::{Context, Poll},
+};
-use std::future::Future;
-use std::io;
-use std::pin::Pin;
-use std::task::{Context, Poll};
+use futures::{
+ channel::{oneshot, oneshot::Canceled},
+ AsyncRead, AsyncWrite, FutureExt, SinkExt,
+};
-use crate::proto::{Flag, Message};
-use crate::stream::framed_dc::FramedDc;
+use crate::{
+ proto::{Flag, Message},
+ stream::framed_dc::FramedDc,
+};
#[must_use]
pub struct DropListener {
diff --git a/misc/webrtc-utils/src/stream/framed_dc.rs b/misc/webrtc-utils/src/stream/framed_dc.rs
index 721178fdcd3..a7b9b6214e0 100644
--- a/misc/webrtc-utils/src/stream/framed_dc.rs
+++ b/misc/webrtc-utils/src/stream/framed_dc.rs
@@ -21,8 +21,10 @@
use asynchronous_codec::Framed;
use futures::{AsyncRead, AsyncWrite};
-use crate::proto::Message;
-use crate::stream::{MAX_DATA_LEN, MAX_MSG_LEN, VARINT_LEN};
+use crate::{
+ proto::Message,
+ stream::{MAX_DATA_LEN, MAX_MSG_LEN, VARINT_LEN},
+};
pub(crate) type FramedDc = Framed>;
pub(crate) fn new(inner: T) -> FramedDc
diff --git a/misc/webrtc-utils/src/stream/state.rs b/misc/webrtc-utils/src/stream/state.rs
index 082325e4d47..006c1610d00 100644
--- a/misc/webrtc-utils/src/stream/state.rs
+++ b/misc/webrtc-utils/src/stream/state.rs
@@ -18,10 +18,10 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use bytes::Bytes;
-
use std::io;
+use bytes::Bytes;
+
use crate::proto::Flag;
#[derive(Debug, Copy, Clone)]
@@ -46,8 +46,8 @@ pub(crate) enum State {
/// Represents the state of closing one half (either read or write) of the connection.
///
-/// Gracefully closing the read or write requires sending the `STOP_SENDING` or `FIN` flag respectively
-/// and flushing the underlying connection.
+/// Gracefully closing the read or write requires sending the `STOP_SENDING` or `FIN` flag
+/// respectively and flushing the underlying connection.
#[derive(Debug, Copy, Clone)]
pub(crate) enum Closing {
Requested,
@@ -181,8 +181,8 @@ impl State {
/// Whether we should read from the stream in the [`futures::AsyncWrite`] implementation.
///
- /// This is necessary for read-closed streams because we would otherwise not read any more flags from
- /// the socket.
+ /// This is necessary for read-closed streams because we would otherwise
+ /// not read any more flags from the socket.
pub(crate) fn read_flags_in_async_write(&self) -> bool {
matches!(self, Self::ReadClosed)
}
@@ -324,9 +324,10 @@ impl State {
#[cfg(test)]
mod tests {
- use super::*;
use std::io::ErrorKind;
+ use super::*;
+
#[test]
fn cannot_read_after_receiving_fin() {
let mut open = State::Open;
diff --git a/misc/webrtc-utils/src/transport.rs b/misc/webrtc-utils/src/transport.rs
index 440ad73ed02..60b1934082f 100644
--- a/misc/webrtc-utils/src/transport.rs
+++ b/misc/webrtc-utils/src/transport.rs
@@ -1,7 +1,9 @@
-use crate::fingerprint::Fingerprint;
-use libp2p_core::{multiaddr::Protocol, Multiaddr};
use std::net::{IpAddr, SocketAddr};
+use libp2p_core::{multiaddr::Protocol, Multiaddr};
+
+use crate::fingerprint::Fingerprint;
+
/// Parse the given [`Multiaddr`] into a [`SocketAddr`] and a [`Fingerprint`] for dialing.
pub fn parse_webrtc_dial_addr(addr: &Multiaddr) -> Option<(SocketAddr, Fingerprint)> {
let mut iter = addr.iter();
@@ -38,9 +40,10 @@ pub fn parse_webrtc_dial_addr(addr: &Multiaddr) -> Option<(SocketAddr, Fingerpri
#[cfg(test)]
mod tests {
- use super::*;
use std::net::{Ipv4Addr, Ipv6Addr};
+ use super::*;
+
#[test]
fn parse_valid_address_with_certhash_and_p2p() {
let addr = "/ip4/127.0.0.1/udp/39901/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w/p2p/12D3KooWNpDk9w6WrEEcdsEH1y47W71S36yFjw4sd3j7omzgCSMS"
diff --git a/muxers/mplex/Cargo.toml b/muxers/mplex/Cargo.toml
index 7f887c8b3b8..78650218f4b 100644
--- a/muxers/mplex/Cargo.toml
+++ b/muxers/mplex/Cargo.toml
@@ -32,7 +32,7 @@ libp2p-muxer-test-harness = { path = "../test-harness" }
libp2p-plaintext = { workspace = true }
libp2p-tcp = { workspace = true, features = ["async-io"] }
quickcheck = { workspace = true }
-tracing-subscriber = { workspace = true, features = ["env-filter"] }
+libp2p-test-utils = { workspace = true }
[[bench]]
name = "split_send_size"
diff --git a/muxers/mplex/benches/split_send_size.rs b/muxers/mplex/benches/split_send_size.rs
index 44eafa884ac..7a0e9780ca7 100644
--- a/muxers/mplex/benches/split_send_size.rs
+++ b/muxers/mplex/benches/split_send_size.rs
@@ -21,22 +21,23 @@
//! A benchmark for the `split_send_size` configuration option
//! using different transports.
+use std::{pin::Pin, time::Duration};
+
use async_std::task;
use criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput};
-use futures::future::poll_fn;
-use futures::prelude::*;
-use futures::{channel::oneshot, future::join};
-use libp2p_core::muxing::StreamMuxerExt;
-use libp2p_core::transport::ListenerId;
-use libp2p_core::Endpoint;
-use libp2p_core::{multiaddr::multiaddr, muxing, transport, upgrade, Multiaddr, Transport};
+use futures::{
+ channel::oneshot,
+ future::{join, poll_fn},
+ prelude::*,
+};
+use libp2p_core::{
+ multiaddr::multiaddr, muxing, muxing::StreamMuxerExt, transport, transport::ListenerId,
+ upgrade, Endpoint, Multiaddr, Transport,
+};
use libp2p_identity as identity;
use libp2p_identity::PeerId;
use libp2p_mplex as mplex;
use libp2p_plaintext as plaintext;
-use std::pin::Pin;
-use std::time::Duration;
-use tracing_subscriber::EnvFilter;
type BenchTransport = transport::Boxed<(PeerId, muxing::StreamMuxerBox)>;
@@ -53,9 +54,7 @@ const BENCH_SIZES: [usize; 8] = [
];
fn prepare(c: &mut Criterion) {
- let _ = tracing_subscriber::fmt()
- .with_env_filter(EnvFilter::from_default_env())
- .try_init();
+ libp2p_test_utils::with_default_env_filter();
let payload: Vec = vec![1; 1024 * 1024];
@@ -120,7 +119,8 @@ fn run(
}
transport::TransportEvent::Incoming { upgrade, .. } => {
let (_peer, mut conn) = upgrade.await.unwrap();
- // Just calling `poll_inbound` without `poll` is fine here because mplex makes progress through all `poll_` functions. It is hacky though.
+ // Just calling `poll_inbound` without `poll` is fine here because mplex makes
+ // progress through all `poll_` functions. It is hacky though.
let mut s = poll_fn(|cx| conn.poll_inbound_unpin(cx))
.await
.expect("unexpected error");
@@ -158,7 +158,8 @@ fn run(
.unwrap()
.await
.unwrap();
- // Just calling `poll_outbound` without `poll` is fine here because mplex makes progress through all `poll_` functions. It is hacky though.
+ // Just calling `poll_outbound` without `poll` is fine here because mplex makes progress
+ // through all `poll_` functions. It is hacky though.
let mut stream = poll_fn(|cx| conn.poll_outbound_unpin(cx)).await.unwrap();
let mut off = 0;
loop {
diff --git a/muxers/mplex/src/codec.rs b/muxers/mplex/src/codec.rs
index 014ee899280..20ee6bb4ed6 100644
--- a/muxers/mplex/src/codec.rs
+++ b/muxers/mplex/src/codec.rs
@@ -18,14 +18,15 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use asynchronous_codec::{Decoder, Encoder};
-use bytes::{BufMut, Bytes, BytesMut};
-use libp2p_core::Endpoint;
use std::{
fmt,
hash::{Hash, Hasher},
io, mem,
};
+
+use asynchronous_codec::{Decoder, Encoder};
+use bytes::{BufMut, Bytes, BytesMut};
+use libp2p_core::Endpoint;
use unsigned_varint::{codec, encode};
// Maximum size for a packet: 1MB as per the spec.
@@ -298,7 +299,7 @@ impl Encoder for Codec {
role: Endpoint::Listener,
},
data,
- } => (num << 3 | 1, data),
+ } => ((num << 3) | 1, data),
Frame::Data {
stream_id:
LocalStreamId {
@@ -306,35 +307,35 @@ impl Encoder for Codec {
role: Endpoint::Dialer,
},
data,
- } => (num << 3 | 2, data),
+ } => ((num << 3) | 2, data),
Frame::Close {
stream_id:
LocalStreamId {
num,
role: Endpoint::Listener,
},
- } => (num << 3 | 3, Bytes::new()),
+ } => ((num << 3) | 3, Bytes::new()),
Frame::Close {
stream_id:
LocalStreamId {
num,
role: Endpoint::Dialer,
},
- } => (num << 3 | 4, Bytes::new()),
+ } => ((num << 3) | 4, Bytes::new()),
Frame::Reset {
stream_id:
LocalStreamId {
num,
role: Endpoint::Listener,
},
- } => (num << 3 | 5, Bytes::new()),
+ } => ((num << 3) | 5, Bytes::new()),
Frame::Reset {
stream_id:
LocalStreamId {
num,
role: Endpoint::Dialer,
},
- } => (num << 3 | 6, Bytes::new()),
+ } => ((num << 3) | 6, Bytes::new()),
};
let mut header_buf = encode::u64_buffer();
diff --git a/muxers/mplex/src/config.rs b/muxers/mplex/src/config.rs
index 3bf5e703a18..45bb05b2240 100644
--- a/muxers/mplex/src/config.rs
+++ b/muxers/mplex/src/config.rs
@@ -18,9 +18,10 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::codec::MAX_FRAME_SIZE;
use std::cmp;
+use crate::codec::MAX_FRAME_SIZE;
+
pub(crate) const DEFAULT_MPLEX_PROTOCOL_NAME: &str = "/mplex/6.7.0";
/// Configuration for the multiplexer.
diff --git a/muxers/mplex/src/io.rs b/muxers/mplex/src/io.rs
index 50fc0fc1d3f..eeea4ce734f 100644
--- a/muxers/mplex/src/io.rs
+++ b/muxers/mplex/src/io.rs
@@ -18,23 +18,31 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::codec::{Codec, Frame, LocalStreamId, RemoteStreamId};
-use crate::{MaxBufferBehaviour, MplexConfig};
+pub(crate) use std::io::{Error, Result};
+use std::{
+ cmp,
+ collections::VecDeque,
+ fmt, io, mem,
+ sync::Arc,
+ task::{Context, Poll, Waker},
+};
+
use asynchronous_codec::Framed;
use bytes::Bytes;
-use futures::task::{waker_ref, ArcWake, AtomicWaker, WakerRef};
-use futures::{prelude::*, ready, stream::Fuse};
+use futures::{
+ prelude::*,
+ ready,
+ stream::Fuse,
+ task::{waker_ref, ArcWake, AtomicWaker, WakerRef},
+};
use nohash_hasher::{IntMap, IntSet};
use parking_lot::Mutex;
use smallvec::SmallVec;
-use std::collections::VecDeque;
-use std::{
- cmp, fmt, io, mem,
- sync::Arc,
- task::{Context, Poll, Waker},
-};
-pub(crate) use std::io::{Error, Result};
+use crate::{
+ codec::{Codec, Frame, LocalStreamId, RemoteStreamId},
+ MaxBufferBehaviour, MplexConfig,
+};
/// A connection identifier.
///
/// Randomly generated and mainly intended to improve log output
@@ -302,13 +310,11 @@ where
/// reading and writing immediately. The remote is informed
/// based on the current state of the substream:
///
- /// * If the substream was open, a `Reset` frame is sent at
- /// the next opportunity.
- /// * If the substream was half-closed, i.e. a `Close` frame
- /// has already been sent, nothing further happens.
- /// * If the substream was half-closed by the remote, i.e.
- /// a `Close` frame has already been received, a `Close`
- /// frame is sent at the next opportunity.
+ /// * If the substream was open, a `Reset` frame is sent at the next opportunity.
+ /// * If the substream was half-closed, i.e. a `Close` frame has already been sent, nothing
+ /// further happens.
+ /// * If the substream was half-closed by the remote, i.e. a `Close` frame has already been
+ /// received, a `Close` frame is sent at the next opportunity.
///
/// If the multiplexed stream is closed or encountered
/// an error earlier, or there is no known substream with
@@ -1146,15 +1152,14 @@ const EXTRA_PENDING_FRAMES: usize = 1000;
#[cfg(test)]
mod tests {
- use super::*;
+ use std::{collections::HashSet, num::NonZeroU8, ops::DerefMut, pin::Pin};
+
use async_std::task;
use asynchronous_codec::{Decoder, Encoder};
use bytes::BytesMut;
use quickcheck::*;
- use std::collections::HashSet;
- use std::num::NonZeroU8;
- use std::ops::DerefMut;
- use std::pin::Pin;
+
+ use super::*;
impl Arbitrary for MaxBufferBehaviour {
fn arbitrary(g: &mut Gen) -> MaxBufferBehaviour {
@@ -1226,10 +1231,7 @@ mod tests {
#[test]
fn max_buffer_behaviour() {
- use tracing_subscriber::EnvFilter;
- let _ = tracing_subscriber::fmt()
- .with_env_filter(EnvFilter::from_default_env())
- .try_init();
+ libp2p_test_utils::with_default_env_filter();
fn prop(cfg: MplexConfig, overflow: NonZeroU8) {
let mut r_buf = BytesMut::new();
@@ -1364,10 +1366,7 @@ mod tests {
#[test]
fn close_on_error() {
- use tracing_subscriber::EnvFilter;
- let _ = tracing_subscriber::fmt()
- .with_env_filter(EnvFilter::from_default_env())
- .try_init();
+ libp2p_test_utils::with_default_env_filter();
fn prop(cfg: MplexConfig, num_streams: NonZeroU8) {
let num_streams = cmp::min(cfg.max_substreams, num_streams.get() as usize);
diff --git a/muxers/mplex/src/lib.rs b/muxers/mplex/src/lib.rs
index 17ca9ad46f6..1ef89dc283a 100644
--- a/muxers/mplex/src/lib.rs
+++ b/muxers/mplex/src/lib.rs
@@ -26,15 +26,22 @@ mod codec;
mod config;
mod io;
-pub use config::{MaxBufferBehaviour, MplexConfig};
+use std::{
+ cmp, iter,
+ pin::Pin,
+ sync::Arc,
+ task::{Context, Poll},
+};
use bytes::Bytes;
use codec::LocalStreamId;
+pub use config::{MaxBufferBehaviour, MplexConfig};
use futures::{prelude::*, ready};
-use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent};
-use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo};
+use libp2p_core::{
+ muxing::{StreamMuxer, StreamMuxerEvent},
+ upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo},
+};
use parking_lot::Mutex;
-use std::{cmp, iter, pin::Pin, sync::Arc, task::Context, task::Poll};
impl UpgradeInfo for MplexConfig {
type Info = &'static str;
diff --git a/muxers/test-harness/src/lib.rs b/muxers/test-harness/src/lib.rs
index d03bdbdfed7..489d476f158 100644
--- a/muxers/test-harness/src/lib.rs
+++ b/muxers/test-harness/src/lib.rs
@@ -1,15 +1,20 @@
+use std::{
+ fmt,
+ future::Future,
+ mem,
+ pin::Pin,
+ task::{Context, Poll},
+ time::Duration,
+};
+
+use futures::{future, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, Stream, StreamExt};
+use libp2p_core::{
+ muxing::StreamMuxerExt,
+ upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade},
+ StreamMuxer, UpgradeInfo,
+};
+
use crate::future::{BoxFuture, Either, FutureExt};
-use futures::{future, AsyncRead, AsyncWrite};
-use futures::{AsyncReadExt, Stream};
-use futures::{AsyncWriteExt, StreamExt};
-use libp2p_core::muxing::StreamMuxerExt;
-use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade};
-use libp2p_core::{StreamMuxer, UpgradeInfo};
-use std::future::Future;
-use std::pin::Pin;
-use std::task::{Context, Poll};
-use std::time::Duration;
-use std::{fmt, mem};
pub async fn connected_muxers_on_memory_ring_buffer() -> (M, M)
where
@@ -41,7 +46,8 @@ where
.unwrap()
}
-/// Verifies that Alice can send a message and immediately close the stream afterwards and Bob can use `read_to_end` to read the entire message.
+/// Verifies that Alice can send a message and immediately close the stream afterwards and Bob can
+/// use `read_to_end` to read the entire message.
pub async fn close_implies_flush(alice: A, bob: B)
where
A: StreamMuxer + Unpin,
@@ -99,7 +105,8 @@ where
.await;
}
-/// Runs the given protocol between the two parties, ensuring commutativity, i.e. either party can be the dialer and listener.
+/// Runs the given protocol between the two parties, ensuring commutativity, i.e. either party can
+/// be the dialer and listener.
async fn run_commutative(
mut alice: A,
mut bob: B,
@@ -120,7 +127,8 @@ async fn run_commutative(
/// Runs a given protocol between the two parties.
///
/// The first party will open a new substream and the second party will wait for this.
-/// The [`StreamMuxer`] is polled until both parties have completed the protocol to ensure that the underlying connection can make progress at all times.
+/// The [`StreamMuxer`] is polled until both parties have completed the protocol to ensure that the
+/// underlying connection can make progress at all times.
async fn run(
dialer: &mut A,
listener: &mut B,
diff --git a/muxers/yamux/src/lib.rs b/muxers/yamux/src/lib.rs
index bcfeb62fccf..001eb6b0348 100644
--- a/muxers/yamux/src/lib.rs
+++ b/muxers/yamux/src/lib.rs
@@ -22,17 +22,20 @@
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
-use either::Either;
-use futures::{prelude::*, ready};
-use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent};
-use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo};
-use std::collections::VecDeque;
-use std::io::{IoSlice, IoSliceMut};
-use std::task::Waker;
use std::{
- io, iter,
+ collections::VecDeque,
+ io,
+ io::{IoSlice, IoSliceMut},
+ iter,
pin::Pin,
- task::{Context, Poll},
+ task::{Context, Poll, Waker},
+};
+
+use either::Either;
+use futures::{prelude::*, ready};
+use libp2p_core::{
+ muxing::{StreamMuxer, StreamMuxerEvent},
+ upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo},
};
use thiserror::Error;
@@ -40,10 +43,12 @@ use thiserror::Error;
#[derive(Debug)]
pub struct Muxer {
connection: Either, yamux013::Connection>,
- /// Temporarily buffers inbound streams in case our node is performing backpressure on the remote.
+ /// Temporarily buffers inbound streams in case our node is
+ /// performing backpressure on the remote.
///
- /// The only way how yamux can make progress is by calling [`yamux013::Connection::poll_next_inbound`]. However, the
- /// [`StreamMuxer`] interface is designed to allow a caller to selectively make progress via
+ /// The only way how yamux can make progress is by calling
+ /// [`yamux013::Connection::poll_next_inbound`]. However, the [`StreamMuxer`] interface is
+ /// designed to allow a caller to selectively make progress via
/// [`StreamMuxer::poll_inbound`] and [`StreamMuxer::poll_outbound`] whilst the more general
/// [`StreamMuxer::poll`] is designed to make progress on existing streams etc.
///
@@ -57,7 +62,8 @@ pub struct Muxer {
/// How many streams to buffer before we start resetting them.
///
/// This is equal to the ACK BACKLOG in `rust-yamux`.
-/// Thus, for peers running on a recent version of `rust-libp2p`, we should never need to reset streams because they'll voluntarily stop opening them once they hit the ACK backlog.
+/// Thus, for peers running on a recent version of `rust-libp2p`, we should never need to reset
+/// streams because they'll voluntarily stop opening them once they hit the ACK backlog.
const MAX_BUFFERED_INBOUND_STREAMS: usize = 256;
impl Muxer
diff --git a/protocols/autonat/CHANGELOG.md b/protocols/autonat/CHANGELOG.md
index 9b2bc4cb2ea..75a40b8c5ad 100644
--- a/protocols/autonat/CHANGELOG.md
+++ b/protocols/autonat/CHANGELOG.md
@@ -1,9 +1,9 @@
## 0.13.1
- Verify that an incoming AutoNAT dial comes from a connected peer. See [PR 5597](https://github.com/libp2p/rust-libp2p/pull/5597).
-
- Deprecate `void` crate.
See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676).
+- Update to `libp2p-request-response` `v0.28.0`.
## 0.13.0
diff --git a/protocols/autonat/Cargo.toml b/protocols/autonat/Cargo.toml
index 92ca163d8ec..8ad4492fbff 100644
--- a/protocols/autonat/Cargo.toml
+++ b/protocols/autonat/Cargo.toml
@@ -18,7 +18,6 @@ categories = ["network-programming", "asynchronous"]
[dependencies]
async-trait = { version = "0.1", optional = true }
asynchronous-codec = { workspace = true }
-bytes = { version = "1", optional = true }
either = { version = "1.9.0", optional = true }
futures = { workspace = true }
futures-bounded = { workspace = true, optional = true }
@@ -38,14 +37,14 @@ thiserror = { workspace = true, optional = true }
[dev-dependencies]
tokio = { workspace = true, features = ["macros", "rt", "sync"] }
libp2p-swarm-test = { path = "../../swarm-test" }
-tracing-subscriber = { version = "0.3", features = ["env-filter"] }
+libp2p-test-utils = { workspace = true }
libp2p-identify = { workspace = true }
libp2p-swarm = { workspace = true, features = ["macros"] }
[features]
default = ["v1", "v2"]
v1 = ["dep:libp2p-request-response", "dep:web-time", "dep:async-trait"]
-v2 = ["dep:bytes", "dep:either", "dep:futures-bounded", "dep:thiserror", "dep:rand_core"]
+v2 = ["dep:either", "dep:futures-bounded", "dep:thiserror", "dep:rand_core"]
# Passing arguments to the docsrs builder in order to properly document cfg's.
# More information: https://docs.rs/about/builds#cross-compiling
diff --git a/protocols/autonat/src/v1.rs b/protocols/autonat/src/v1.rs
index c60e4805f40..4de601c5df5 100644
--- a/protocols/autonat/src/v1.rs
+++ b/protocols/autonat/src/v1.rs
@@ -29,6 +29,8 @@
pub(crate) mod behaviour;
pub(crate) mod protocol;
+pub use libp2p_request_response::{InboundFailure, OutboundFailure};
+
pub use self::{
behaviour::{
Behaviour, Config, Event, InboundProbeError, InboundProbeEvent, NatStatus,
@@ -36,7 +38,6 @@ pub use self::{
},
protocol::{ResponseError, DEFAULT_PROTOCOL_NAME},
};
-pub use libp2p_request_response::{InboundFailure, OutboundFailure};
pub(crate) mod proto {
#![allow(unreachable_pub)]
diff --git a/protocols/autonat/src/v1/behaviour.rs b/protocols/autonat/src/v1/behaviour.rs
index 7a717baed8d..24ec1b13be7 100644
--- a/protocols/autonat/src/v1/behaviour.rs
+++ b/protocols/autonat/src/v1/behaviour.rs
@@ -21,15 +21,19 @@
mod as_client;
mod as_server;
-use crate::protocol::{AutoNatCodec, DialRequest, DialResponse, ResponseError};
-use crate::DEFAULT_PROTOCOL_NAME;
+use std::{
+ collections::{HashMap, HashSet, VecDeque},
+ iter,
+ task::{Context, Poll},
+ time::Duration,
+};
+
use as_client::AsClient;
pub use as_client::{OutboundProbeError, OutboundProbeEvent};
use as_server::AsServer;
pub use as_server::{InboundProbeError, InboundProbeEvent};
use futures_timer::Delay;
-use libp2p_core::transport::PortUse;
-use libp2p_core::{multiaddr::Protocol, ConnectedPoint, Endpoint, Multiaddr};
+use libp2p_core::{multiaddr::Protocol, transport::PortUse, ConnectedPoint, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
use libp2p_request_response::{
self as request_response, InboundRequestId, OutboundRequestId, ProtocolSupport, ResponseChannel,
@@ -39,14 +43,13 @@ use libp2p_swarm::{
ConnectionDenied, ConnectionId, ListenAddresses, NetworkBehaviour, THandler, THandlerInEvent,
THandlerOutEvent, ToSwarm,
};
-use std::{
- collections::{HashMap, HashSet, VecDeque},
- iter,
- task::{Context, Poll},
- time::Duration,
-};
use web_time::Instant;
+use crate::{
+ protocol::{AutoNatCodec, DialRequest, DialResponse, ResponseError},
+ DEFAULT_PROTOCOL_NAME,
+};
+
/// Config for the [`Behaviour`].
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Config {
@@ -148,17 +151,18 @@ pub enum Event {
/// [`NetworkBehaviour`] for AutoNAT.
///
-/// The behaviour frequently runs probes to determine whether the local peer is behind NAT and/ or a firewall, or
-/// publicly reachable.
-/// In a probe, a dial-back request is sent to a peer that is randomly selected from the list of fixed servers and
-/// connected peers. Upon receiving a dial-back request, the remote tries to dial the included addresses. When a
-/// first address was successfully dialed, a status Ok will be send back together with the dialed address. If no address
-/// can be reached a dial-error is send back.
+/// The behaviour frequently runs probes to determine whether the local peer is behind NAT and/ or a
+/// firewall, or publicly reachable.
+/// In a probe, a dial-back request is sent to a peer that is randomly selected from the list of
+/// fixed servers and connected peers. Upon receiving a dial-back request, the remote tries to dial
+/// the included addresses. When a first address was successfully dialed, a status Ok will be send
+/// back together with the dialed address. If no address can be reached a dial-error is send back.
/// Based on the received response, the sender assumes themselves to be public or private.
-/// The status is retried in a frequency of [`Config::retry_interval`] or [`Config::retry_interval`], depending on whether
-/// enough confidence in the assumed NAT status was reached or not.
-/// The confidence increases each time a probe confirms the assumed status, and decreases if a different status is reported.
-/// If the confidence is 0, the status is flipped and the Behaviour will report the new status in an `OutEvent`.
+/// The status is retried in a frequency of [`Config::retry_interval`] or
+/// [`Config::retry_interval`], depending on whether enough confidence in the assumed NAT status was
+/// reached or not. The confidence increases each time a probe confirms the assumed status, and
+/// decreases if a different status is reported. If the confidence is 0, the status is flipped and
+/// the Behaviour will report the new status in an `OutEvent`.
pub struct Behaviour {
// Local peer id
local_peer_id: PeerId,
@@ -195,11 +199,12 @@ pub struct Behaviour {
ongoing_outbound: HashMap,
// Connected peers with the observed address of each connection.
- // If the endpoint of a connection is relayed or not global (in case of Config::only_global_ips),
- // the observed address is `None`.
+ // If the endpoint of a connection is relayed or not global (in case of
+ // Config::only_global_ips), the observed address is `None`.
connected: HashMap>>,
- // Used servers in recent outbound probes that are throttled through Config::throttle_server_period.
+ // Used servers in recent outbound probes that are throttled through
+ // Config::throttle_server_period.
throttled_servers: Vec<(PeerId, Instant)>,
// Recent probes done for clients
@@ -264,8 +269,8 @@ impl Behaviour {
}
/// Add a peer to the list over servers that may be used for probes.
- /// These peers are used for dial-request even if they are currently not connection, in which case a connection will be
- /// establish before sending the dial-request.
+ /// These peers are used for dial-request even if they are currently not connection, in which
+ /// case a connection will be establish before sending the dial-request.
pub fn add_server(&mut self, peer: PeerId, address: Option) {
self.servers.insert(peer);
if let Some(addr) = address {
@@ -564,7 +569,8 @@ impl NetworkBehaviour for Behaviour {
type Action = ToSwarm<::ToSwarm, THandlerInEvent>;
-// Trait implemented for `AsClient` and `AsServer` to handle events from the inner [`request_response::Behaviour`] Protocol.
+// Trait implemented for `AsClient` and `AsServer` to handle events from the inner
+// [`request_response::Behaviour`] Protocol.
trait HandleInnerEvent {
fn handle_event(
&mut self,
@@ -671,7 +677,8 @@ impl GlobalIp for std::net::Ipv6Addr {
// Variation of unstable method [`std::net::Ipv6Addr::multicast_scope`] that instead of the
// `Ipv6MulticastScope` just returns if the scope is global or not.
- // Equivalent to `Ipv6Addr::multicast_scope(..).map(|scope| matches!(scope, Ipv6MulticastScope::Global))`.
+ // Equivalent to `Ipv6Addr::multicast_scope(..).map(|scope| matches!(scope,
+ // Ipv6MulticastScope::Global))`.
fn is_multicast_scope_global(addr: &std::net::Ipv6Addr) -> Option {
match addr.segments()[0] & 0x000f {
14 => Some(true), // Global multicast scope.
diff --git a/protocols/autonat/src/v1/behaviour/as_client.rs b/protocols/autonat/src/v1/behaviour/as_client.rs
index 385dee50ee1..ca8daf6e1ac 100644
--- a/protocols/autonat/src/v1/behaviour/as_client.rs
+++ b/protocols/autonat/src/v1/behaviour/as_client.rs
@@ -18,12 +18,12 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::ResponseError;
-
-use super::{
- Action, AutoNatCodec, Config, DialRequest, DialResponse, Event, HandleInnerEvent, NatStatus,
- ProbeId,
+use std::{
+ collections::{HashMap, HashSet, VecDeque},
+ task::{Context, Poll},
+ time::Duration,
};
+
use futures::FutureExt;
use futures_timer::Delay;
use libp2p_core::Multiaddr;
@@ -31,13 +31,14 @@ use libp2p_identity::PeerId;
use libp2p_request_response::{self as request_response, OutboundFailure, OutboundRequestId};
use libp2p_swarm::{ConnectionId, ListenAddresses, ToSwarm};
use rand::{seq::SliceRandom, thread_rng};
-use std::{
- collections::{HashMap, HashSet, VecDeque},
- task::{Context, Poll},
- time::Duration,
-};
use web_time::Instant;
+use super::{
+ Action, AutoNatCodec, Config, DialRequest, DialResponse, Event, HandleInnerEvent, NatStatus,
+ ProbeId,
+};
+use crate::ResponseError;
+
/// Outbound probe failed or was aborted.
#[derive(Debug)]
pub enum OutboundProbeError {
@@ -111,6 +112,7 @@ impl HandleInnerEvent for AsClient<'_> {
request_id,
response,
},
+ ..
} => {
tracing::debug!(?response, "Outbound dial-back request returned response");
@@ -153,6 +155,7 @@ impl HandleInnerEvent for AsClient<'_> {
peer,
error,
request_id,
+ ..
} => {
tracing::debug!(
%peer,
diff --git a/protocols/autonat/src/v1/behaviour/as_server.rs b/protocols/autonat/src/v1/behaviour/as_server.rs
index 01148add6e8..32b4120c552 100644
--- a/protocols/autonat/src/v1/behaviour/as_server.rs
+++ b/protocols/autonat/src/v1/behaviour/as_server.rs
@@ -17,10 +17,11 @@
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use super::{
- Action, AutoNatCodec, Config, DialRequest, DialResponse, Event, HandleInnerEvent, ProbeId,
- ResponseError,
+use std::{
+ collections::{HashMap, HashSet, VecDeque},
+ num::NonZeroU8,
};
+
use libp2p_core::{multiaddr::Protocol, Multiaddr};
use libp2p_identity::PeerId;
use libp2p_request_response::{
@@ -30,12 +31,13 @@ use libp2p_swarm::{
dial_opts::{DialOpts, PeerCondition},
ConnectionId, DialError, ToSwarm,
};
-use std::{
- collections::{HashMap, HashSet, VecDeque},
- num::NonZeroU8,
-};
use web_time::Instant;
+use super::{
+ Action, AutoNatCodec, Config, DialRequest, DialResponse, Event, HandleInnerEvent, ProbeId,
+ ResponseError,
+};
+
/// Inbound probe failed.
#[derive(Debug)]
pub enum InboundProbeError {
@@ -105,6 +107,7 @@ impl HandleInnerEvent for AsServer<'_> {
request,
channel,
},
+ ..
} => {
let probe_id = self.probe_id.next();
if !self.connected.contains_key(&peer) {
@@ -181,6 +184,7 @@ impl HandleInnerEvent for AsServer<'_> {
peer,
error,
request_id,
+ ..
} => {
tracing::debug!(
%peer,
@@ -379,10 +383,10 @@ impl AsServer<'_> {
#[cfg(test)]
mod test {
- use super::*;
-
use std::net::Ipv4Addr;
+ use super::*;
+
fn random_ip<'a>() -> Protocol<'a> {
Protocol::Ip4(Ipv4Addr::new(
rand::random(),
diff --git a/protocols/autonat/src/v1/protocol.rs b/protocols/autonat/src/v1/protocol.rs
index 2ce538fddf4..6aa0c99167b 100644
--- a/protocols/autonat/src/v1/protocol.rs
+++ b/protocols/autonat/src/v1/protocol.rs
@@ -18,16 +18,20 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::proto;
+use std::io;
+
use async_trait::async_trait;
use asynchronous_codec::{FramedRead, FramedWrite};
-use futures::io::{AsyncRead, AsyncWrite};
-use futures::{SinkExt, StreamExt};
+use futures::{
+ io::{AsyncRead, AsyncWrite},
+ SinkExt, StreamExt,
+};
use libp2p_core::Multiaddr;
use libp2p_identity::PeerId;
use libp2p_request_response::{self as request_response};
use libp2p_swarm::StreamProtocol;
-use std::io;
+
+use crate::proto;
/// The protocol name used for negotiating with multistream-select.
pub const DEFAULT_PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/libp2p/autonat/1.0.0");
diff --git a/protocols/autonat/src/v2.rs b/protocols/autonat/src/v2.rs
index 48e9f25f890..94decf50a55 100644
--- a/protocols/autonat/src/v2.rs
+++ b/protocols/autonat/src/v2.rs
@@ -4,17 +4,17 @@
//!
//! The new version fixes the issues of the first version:
//! - The server now always dials back over a newly allocated port. This greatly reduces the risk of
-//! false positives that often occurred in the first version, when the clinet-server connection
-//! occurred over a hole-punched port.
+//! false positives that often occurred in the first version, when the clinet-server connection
+//! occurred over a hole-punched port.
//! - The server protects against DoS attacks by requiring the client to send more data to the
-//! server then the dial back puts on the client, thus making the protocol unatractive for an
-//! attacker.
+//! server then the dial back puts on the client, thus making the protocol unatractive for an
+//! attacker.
//!
//! The protocol is separated into two parts:
//! - The client part, which is implemented in the `client` module. (The client is the party that
-//! wants to check if it is reachable from the outside.)
+//! wants to check if it is reachable from the outside.)
//! - The server part, which is implemented in the `server` module. (The server is the party
-//! performing reachability checks on behalf of the client.)
+//! performing reachability checks on behalf of the client.)
//!
//! The two can be used together.
diff --git a/protocols/autonat/src/v2/client.rs b/protocols/autonat/src/v2/client.rs
index d3272512f35..11ddb792839 100644
--- a/protocols/autonat/src/v2/client.rs
+++ b/protocols/autonat/src/v2/client.rs
@@ -1,5 +1,4 @@
mod behaviour;
mod handler;
-pub use behaviour::Event;
-pub use behaviour::{Behaviour, Config};
+pub use behaviour::{Behaviour, Config, Event};
diff --git a/protocols/autonat/src/v2/client/behaviour.rs b/protocols/autonat/src/v2/client/behaviour.rs
index 97509c05443..8e238fc9be4 100644
--- a/protocols/autonat/src/v2/client/behaviour.rs
+++ b/protocols/autonat/src/v2/client/behaviour.rs
@@ -1,5 +1,6 @@
use std::{
collections::{HashMap, VecDeque},
+ fmt::{Debug, Display, Formatter},
task::{Context, Poll},
time::Duration,
};
@@ -15,14 +16,12 @@ use libp2p_swarm::{
};
use rand::prelude::*;
use rand_core::OsRng;
-use std::fmt::{Debug, Display, Formatter};
-
-use crate::v2::{protocol::DialRequest, Nonce};
use super::handler::{
dial_back::{self, IncomingNonce},
dial_request,
};
+use crate::v2::{protocol::DialRequest, Nonce};
#[derive(Debug, Clone, Copy)]
pub struct Config {
@@ -281,10 +280,12 @@ where
}
}
- /// Issues dial requests to random AutoNAT servers for the most frequently reported, untested candidates.
+ /// Issues dial requests to random AutoNAT servers for the most frequently reported, untested
+ /// candidates.
///
/// In the current implementation, we only send a single address to each AutoNAT server.
- /// This spreads our candidates out across all servers we are connected to which should give us pretty fast feedback on all of them.
+ /// This spreads our candidates out across all servers we are connected to which should give us
+ /// pretty fast feedback on all of them.
fn issue_dial_requests_for_untested_candidates(&mut self) {
for addr in self.untested_candidates() {
let Some((conn_id, peer_id)) = self.random_autonat_server() else {
@@ -311,7 +312,8 @@ where
/// Returns all untested candidates, sorted by the frequency they were reported at.
///
- /// More frequently reported candidates are considered to more likely be external addresses and thus tested first.
+ /// More frequently reported candidates are considered to more likely be external addresses and
+ /// thus tested first.
fn untested_candidates(&self) -> impl Iterator- {
let mut entries = self
.address_candidates
@@ -333,7 +335,8 @@ where
.map(|(addr, _)| addr)
}
- /// Chooses an active connection to one of our peers that reported support for the [`DIAL_REQUEST_PROTOCOL`](crate::v2::DIAL_REQUEST_PROTOCOL) protocol.
+ /// Chooses an active connection to one of our peers that reported support for the
+ /// [`DIAL_REQUEST_PROTOCOL`](crate::v2::DIAL_REQUEST_PROTOCOL) protocol.
fn random_autonat_server(&mut self) -> Option<(ConnectionId, PeerId)> {
let (conn_id, info) = self
.peer_info
diff --git a/protocols/autonat/src/v2/client/handler/dial_back.rs b/protocols/autonat/src/v2/client/handler/dial_back.rs
index b3b3a59c02d..7cdf194343a 100644
--- a/protocols/autonat/src/v2/client/handler/dial_back.rs
+++ b/protocols/autonat/src/v2/client/handler/dial_back.rs
@@ -1,4 +1,5 @@
use std::{
+ convert::Infallible,
io,
task::{Context, Poll},
time::Duration,
@@ -11,7 +12,6 @@ use libp2p_swarm::{
handler::{ConnectionEvent, FullyNegotiatedInbound, ListenUpgradeError},
ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol,
};
-use std::convert::Infallible;
use crate::v2::{protocol, Nonce, DIAL_BACK_PROTOCOL};
@@ -35,16 +35,14 @@ impl ConnectionHandler for Handler {
type InboundOpenInfo = ();
type OutboundOpenInfo = ();
- fn listen_protocol(&self) -> SubstreamProtocol {
+ fn listen_protocol(&self) -> SubstreamProtocol {
SubstreamProtocol::new(ReadyUpgrade::new(DIAL_BACK_PROTOCOL), ())
}
fn poll(
&mut self,
cx: &mut Context<'_>,
- ) -> Poll<
- ConnectionHandlerEvent,
- > {
+ ) -> Poll> {
loop {
match self.inbound.poll_next_unpin(cx) {
Poll::Pending => return Poll::Pending,
@@ -68,12 +66,7 @@ impl ConnectionHandler for Handler {
fn on_connection_event(
&mut self,
- event: ConnectionEvent<
- Self::InboundProtocol,
- Self::OutboundProtocol,
- Self::InboundOpenInfo,
- Self::OutboundOpenInfo,
- >,
+ event: ConnectionEvent,
) {
match event {
ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound {
@@ -83,7 +76,7 @@ impl ConnectionHandler for Handler {
tracing::warn!("Dial back request dropped, too many requests in flight");
}
}
- // TODO: remove when Rust 1.82 is MSRVprotocols/autonat/src/v2/client/handler/dial_back.rs
+ // TODO: remove when Rust 1.82 is MSRV
#[allow(unreachable_patterns)]
ConnectionEvent::ListenUpgradeError(ListenUpgradeError { error, .. }) => {
libp2p_core::util::unreachable(error);
diff --git a/protocols/autonat/src/v2/client/handler/dial_request.rs b/protocols/autonat/src/v2/client/handler/dial_request.rs
index 0f303167523..61f564505eb 100644
--- a/protocols/autonat/src/v2/client/handler/dial_request.rs
+++ b/protocols/autonat/src/v2/client/handler/dial_request.rs
@@ -1,10 +1,18 @@
+use std::{
+ collections::VecDeque,
+ convert::Infallible,
+ io,
+ iter::{once, repeat},
+ task::{Context, Poll},
+ time::Duration,
+};
+
use futures::{channel::oneshot, AsyncWrite};
use futures_bounded::FuturesMap;
use libp2p_core::{
upgrade::{DeniedUpgrade, ReadyUpgrade},
Multiaddr,
};
-
use libp2p_swarm::{
handler::{
ConnectionEvent, DialUpgradeError, FullyNegotiatedOutbound, OutboundUpgradeSend,
@@ -13,14 +21,6 @@ use libp2p_swarm::{
ConnectionHandler, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError,
SubstreamProtocol,
};
-use std::{
- collections::VecDeque,
- convert::Infallible,
- io,
- iter::{once, repeat},
- task::{Context, Poll},
- time::Duration,
-};
use crate::v2::{
generated::structs::{mod_DialResponse::ResponseStatus, DialStatus},
@@ -72,7 +72,7 @@ pub struct Handler {
queued_events: VecDeque<
ConnectionHandlerEvent<
::OutboundProtocol,
- ::OutboundOpenInfo,
+ (),
::ToBehaviour,
>,
>,
@@ -121,16 +121,14 @@ impl ConnectionHandler for Handler {
type InboundOpenInfo = ();
type OutboundOpenInfo = ();
- fn listen_protocol(&self) -> SubstreamProtocol {
+ fn listen_protocol(&self) -> SubstreamProtocol {
SubstreamProtocol::new(DeniedUpgrade, ())
}
fn poll(
&mut self,
cx: &mut Context<'_>,
- ) -> Poll<
- ConnectionHandlerEvent,
- > {
+ ) -> Poll> {
if let Some(event) = self.queued_events.pop_front() {
return Poll::Ready(event);
}
@@ -161,12 +159,7 @@ impl ConnectionHandler for Handler {
fn on_connection_event(
&mut self,
- event: ConnectionEvent<
- Self::InboundProtocol,
- Self::OutboundProtocol,
- Self::InboundOpenInfo,
- Self::OutboundOpenInfo,
- >,
+ event: ConnectionEvent,
) {
match event {
ConnectionEvent::DialUpgradeError(DialUpgradeError { error, .. }) => {
@@ -261,7 +254,9 @@ async fn start_stream_handle(
Ok(_) => {}
Err(err) => {
if err.kind() == io::ErrorKind::ConnectionReset {
- // The AutoNAT server may have already closed the stream (this is normal because the probe is finished), in this case we have this error:
+ // The AutoNAT server may have already closed the stream
+ // (this is normal because the probe is finished),
+ // in this case we have this error:
// Err(Custom { kind: ConnectionReset, error: Stopped(0) })
// so we silently ignore this error
} else {
diff --git a/protocols/autonat/src/v2/protocol.rs b/protocols/autonat/src/v2/protocol.rs
index 4077fd65f5d..70f9f8c37af 100644
--- a/protocols/autonat/src/v2/protocol.rs
+++ b/protocols/autonat/src/v2/protocol.rs
@@ -1,13 +1,10 @@
// change to quick-protobuf-codec
-use std::io;
-use std::io::ErrorKind;
+use std::{io, io::ErrorKind};
use asynchronous_codec::{Framed, FramedRead, FramedWrite};
-
use futures::{AsyncRead, AsyncWrite, SinkExt, StreamExt};
use libp2p_core::Multiaddr;
-
use quick_protobuf_codec::Codec;
use rand::Rng;
@@ -103,7 +100,10 @@ impl From for proto::Message {
);
proto::Message {
msg: proto::mod_Message::OneOfmsg::dialDataResponse(proto::DialDataResponse {
- data: vec![0; val.data_count], // One could use Cow::Borrowed here, but it will require a modification of the generated code and that will fail the CI
+ // One could use Cow::Borrowed here, but it will
+ // require a modification of the generated code
+ // and that will fail the CI
+ data: vec![0; val.data_count],
}),
}
}
diff --git a/protocols/autonat/src/v2/server.rs b/protocols/autonat/src/v2/server.rs
index 25819307784..cd9b1e46b18 100644
--- a/protocols/autonat/src/v2/server.rs
+++ b/protocols/autonat/src/v2/server.rs
@@ -1,5 +1,4 @@
mod behaviour;
mod handler;
-pub use behaviour::Behaviour;
-pub use behaviour::Event;
+pub use behaviour::{Behaviour, Event};
diff --git a/protocols/autonat/src/v2/server/behaviour.rs b/protocols/autonat/src/v2/server/behaviour.rs
index 027cfff7c13..125955cb53a 100644
--- a/protocols/autonat/src/v2/server/behaviour.rs
+++ b/protocols/autonat/src/v2/server/behaviour.rs
@@ -4,20 +4,19 @@ use std::{
task::{Context, Poll},
};
-use crate::v2::server::handler::dial_request::DialBackStatus;
use either::Either;
use libp2p_core::{transport::PortUse, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
-use libp2p_swarm::dial_opts::PeerCondition;
use libp2p_swarm::{
- dial_opts::DialOpts, dummy, ConnectionDenied, ConnectionHandler, ConnectionId, DialFailure,
- FromSwarm, NetworkBehaviour, ToSwarm,
+ dial_opts::{DialOpts, PeerCondition},
+ dummy, ConnectionDenied, ConnectionHandler, ConnectionId, DialFailure, FromSwarm,
+ NetworkBehaviour, ToSwarm,
};
use rand_core::{OsRng, RngCore};
use crate::v2::server::handler::{
dial_back,
- dial_request::{self, DialBackCommand},
+ dial_request::{self, DialBackCommand, DialBackStatus},
Handler,
};
diff --git a/protocols/autonat/src/v2/server/handler/dial_back.rs b/protocols/autonat/src/v2/server/handler/dial_back.rs
index 3cacd4ff32b..8adb33509ef 100644
--- a/protocols/autonat/src/v2/server/handler/dial_back.rs
+++ b/protocols/autonat/src/v2/server/handler/dial_back.rs
@@ -14,13 +14,12 @@ use libp2p_swarm::{
SubstreamProtocol,
};
+use super::dial_request::{DialBackCommand, DialBackStatus as DialBackRes};
use crate::v2::{
protocol::{dial_back, recv_dial_back_response},
DIAL_BACK_PROTOCOL,
};
-use super::dial_request::{DialBackCommand, DialBackStatus as DialBackRes};
-
pub(crate) type ToBehaviour = io::Result<()>;
pub struct Handler {
@@ -47,16 +46,14 @@ impl ConnectionHandler for Handler {
type InboundOpenInfo = ();
type OutboundOpenInfo = ();
- fn listen_protocol(&self) -> SubstreamProtocol {
+ fn listen_protocol(&self) -> SubstreamProtocol {
SubstreamProtocol::new(DeniedUpgrade, ())
}
fn poll(
&mut self,
cx: &mut Context<'_>,
- ) -> Poll<
- ConnectionHandlerEvent,
- > {
+ ) -> Poll> {
if let Poll::Ready(result) = self.outbound.poll_unpin(cx) {
return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(
result
@@ -77,12 +74,7 @@ impl ConnectionHandler for Handler {
fn on_connection_event(
&mut self,
- event: ConnectionEvent<
- Self::InboundProtocol,
- Self::OutboundProtocol,
- Self::InboundOpenInfo,
- Self::OutboundOpenInfo,
- >,
+ event: ConnectionEvent,
) {
match event {
ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound {
diff --git a/protocols/autonat/src/v2/server/handler/dial_request.rs b/protocols/autonat/src/v2/server/handler/dial_request.rs
index 5058e0f3f42..22cab2b9cab 100644
--- a/protocols/autonat/src/v2/server/handler/dial_request.rs
+++ b/protocols/autonat/src/v2/server/handler/dial_request.rs
@@ -81,16 +81,14 @@ where
type InboundOpenInfo = ();
type OutboundOpenInfo = ();
- fn listen_protocol(&self) -> SubstreamProtocol {
+ fn listen_protocol(&self) -> SubstreamProtocol {
SubstreamProtocol::new(ReadyUpgrade::new(DIAL_REQUEST_PROTOCOL), ())
}
fn poll(
&mut self,
cx: &mut Context<'_>,
- ) -> Poll<
- ConnectionHandlerEvent,
- > {
+ ) -> Poll> {
loop {
match self.inbound.poll_unpin(cx) {
Poll::Ready(Ok(event)) => {
@@ -117,12 +115,7 @@ where
fn on_connection_event(
&mut self,
- event: ConnectionEvent<
- Self::InboundProtocol,
- Self::OutboundProtocol,
- Self::InboundOpenInfo,
- Self::OutboundOpenInfo,
- >,
+ event: ConnectionEvent,
) {
match event {
ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound {
diff --git a/protocols/autonat/tests/autonatv2.rs b/protocols/autonat/tests/autonatv2.rs
index f22a2e51470..1e278f5554f 100644
--- a/protocols/autonat/tests/autonatv2.rs
+++ b/protocols/autonat/tests/autonatv2.rs
@@ -1,23 +1,20 @@
-use libp2p_autonat::v2::client::{self, Config};
-use libp2p_autonat::v2::server;
-use libp2p_core::multiaddr::Protocol;
-use libp2p_core::transport::TransportError;
-use libp2p_core::Multiaddr;
+use std::{sync::Arc, time::Duration};
+
+use libp2p_autonat::v2::{
+ client::{self, Config},
+ server,
+};
+use libp2p_core::{multiaddr::Protocol, transport::TransportError, Multiaddr};
use libp2p_swarm::{
DialError, FromSwarm, NetworkBehaviour, NewExternalAddrCandidate, Swarm, SwarmEvent,
};
use libp2p_swarm_test::SwarmExt;
use rand_core::OsRng;
-use std::sync::Arc;
-use std::time::Duration;
use tokio::sync::oneshot;
-use tracing_subscriber::EnvFilter;
#[tokio::test]
async fn confirm_successful() {
- let _ = tracing_subscriber::fmt()
- .with_env_filter(EnvFilter::from_default_env())
- .try_init();
+ libp2p_test_utils::with_default_env_filter();
let (mut alice, mut bob) = start_and_connect().await;
let cor_server_peer = *alice.local_peer_id();
@@ -128,9 +125,7 @@ async fn confirm_successful() {
#[tokio::test]
async fn dial_back_to_unsupported_protocol() {
- let _ = tracing_subscriber::fmt()
- .with_env_filter(EnvFilter::from_default_env())
- .try_init();
+ libp2p_test_utils::with_default_env_filter();
let (mut alice, mut bob) = bootstrap().await;
let alice_peer_id = *alice.local_peer_id();
@@ -226,9 +221,7 @@ async fn dial_back_to_unsupported_protocol() {
#[tokio::test]
async fn dial_back_to_non_libp2p() {
- let _ = tracing_subscriber::fmt()
- .with_env_filter(EnvFilter::from_default_env())
- .try_init();
+ libp2p_test_utils::with_default_env_filter();
let (mut alice, mut bob) = bootstrap().await;
let alice_peer_id = *alice.local_peer_id();
@@ -314,9 +307,7 @@ async fn dial_back_to_non_libp2p() {
#[tokio::test]
async fn dial_back_to_not_supporting() {
- let _ = tracing_subscriber::fmt()
- .with_env_filter(EnvFilter::from_default_env())
- .try_init();
+ libp2p_test_utils::with_default_env_filter();
let (mut alice, mut bob) = bootstrap().await;
let alice_peer_id = *alice.local_peer_id();
diff --git a/protocols/autonat/tests/test_client.rs b/protocols/autonat/tests/test_client.rs
index f5c18e3f34e..49c6c483514 100644
--- a/protocols/autonat/tests/test_client.rs
+++ b/protocols/autonat/tests/test_client.rs
@@ -18,6 +18,8 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::time::Duration;
+
use libp2p_autonat::{
Behaviour, Config, Event, NatStatus, OutboundProbeError, OutboundProbeEvent, ResponseError,
};
@@ -25,7 +27,6 @@ use libp2p_core::Multiaddr;
use libp2p_identity::PeerId;
use libp2p_swarm::{Swarm, SwarmEvent};
use libp2p_swarm_test::SwarmExt as _;
-use std::time::Duration;
use tokio::task::JoinHandle;
const MAX_CONFIDENCE: usize = 3;
@@ -116,7 +117,8 @@ async fn test_auto_probe() {
// It can happen that the server observed the established connection and
// returned a response before the inbound established connection was reported at the client.
- // In this (rare) case the `ConnectionEstablished` event occurs after the `OutboundProbeEvent::Response`.
+ // In this (rare) case the `ConnectionEstablished` event
+ // occurs after the `OutboundProbeEvent::Response`.
if !had_connection_event {
match client.next_swarm_event().await {
SwarmEvent::ConnectionEstablished {
diff --git a/protocols/autonat/tests/test_server.rs b/protocols/autonat/tests/test_server.rs
index d43d14198d4..944c4301b20 100644
--- a/protocols/autonat/tests/test_server.rs
+++ b/protocols/autonat/tests/test_server.rs
@@ -18,15 +18,15 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::{num::NonZeroU32, time::Duration};
+
use libp2p_autonat::{
Behaviour, Config, Event, InboundProbeError, InboundProbeEvent, ResponseError,
};
use libp2p_core::{multiaddr::Protocol, ConnectedPoint, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
-use libp2p_swarm::DialError;
-use libp2p_swarm::{Swarm, SwarmEvent};
+use libp2p_swarm::{DialError, Swarm, SwarmEvent};
use libp2p_swarm_test::SwarmExt as _;
-use std::{num::NonZeroU32, time::Duration};
#[tokio::test]
async fn test_dial_back() {
@@ -340,7 +340,8 @@ async fn test_global_ips_config() {
client.listen().await;
tokio::spawn(client.loop_on_next());
- // Expect the probe to be refused as both peers run on the same machine and thus in the same local network.
+ // Expect the probe to be refused as both peers run
+ // on the same machine and thus in the same local network.
match server.next_behaviour_event().await {
Event::InboundProbe(InboundProbeEvent::Error { error, .. }) => assert!(matches!(
error,
diff --git a/protocols/dcutr/Cargo.toml b/protocols/dcutr/Cargo.toml
index a47f5400488..7bc05671aa2 100644
--- a/protocols/dcutr/Cargo.toml
+++ b/protocols/dcutr/Cargo.toml
@@ -27,19 +27,14 @@ lru = "0.12.3"
futures-bounded = { workspace = true }
[dev-dependencies]
-clap = { version = "4.5.6", features = ["derive"] }
-libp2p-dns = { workspace = true, features = ["async-std"] }
libp2p-identify = { workspace = true }
-libp2p-noise = { workspace = true }
-libp2p-ping = { workspace = true }
libp2p-plaintext = { workspace = true }
libp2p-relay = { workspace = true }
libp2p-swarm = { workspace = true, features = ["macros"] }
libp2p-swarm-test = { path = "../../swarm-test" }
libp2p-tcp = { workspace = true, features = ["async-io"] }
libp2p-yamux = { workspace = true }
-rand = "0.8"
-tracing-subscriber = { workspace = true, features = ["env-filter"] }
+libp2p-test-utils = { workspace = true }
tokio = { workspace = true, features = ["rt", "macros"] }
# Passing arguments to the docsrs builder in order to properly document cfg's.
diff --git a/protocols/dcutr/src/behaviour.rs b/protocols/dcutr/src/behaviour.rs
index 7d0366c98bc..989635c02ba 100644
--- a/protocols/dcutr/src/behaviour.rs
+++ b/protocols/dcutr/src/behaviour.rs
@@ -20,27 +20,29 @@
//! [`NetworkBehaviour`] to act as a direct connection upgrade through relay node.
-use crate::{handler, protocol};
+use std::{
+ collections::{HashMap, HashSet, VecDeque},
+ convert::Infallible,
+ num::NonZeroUsize,
+ task::{Context, Poll},
+};
+
use either::Either;
-use libp2p_core::connection::ConnectedPoint;
-use libp2p_core::multiaddr::Protocol;
-use libp2p_core::transport::PortUse;
-use libp2p_core::{Endpoint, Multiaddr};
+use libp2p_core::{
+ connection::ConnectedPoint, multiaddr::Protocol, transport::PortUse, Endpoint, Multiaddr,
+};
use libp2p_identity::PeerId;
-use libp2p_swarm::behaviour::{ConnectionClosed, DialFailure, FromSwarm};
-use libp2p_swarm::dial_opts::{self, DialOpts};
use libp2p_swarm::{
- dummy, ConnectionDenied, ConnectionHandler, ConnectionId, NewExternalAddrCandidate, THandler,
- THandlerOutEvent,
+ behaviour::{ConnectionClosed, DialFailure, FromSwarm},
+ dial_opts::{self, DialOpts},
+ dummy, ConnectionDenied, ConnectionHandler, ConnectionId, NetworkBehaviour,
+ NewExternalAddrCandidate, NotifyHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm,
};
-use libp2p_swarm::{NetworkBehaviour, NotifyHandler, THandlerInEvent, ToSwarm};
use lru::LruCache;
-use std::collections::{HashMap, HashSet, VecDeque};
-use std::convert::Infallible;
-use std::num::NonZeroUsize;
-use std::task::{Context, Poll};
use thiserror::Error;
+use crate::{handler, protocol};
+
pub(crate) const MAX_NUMBER_OF_UPGRADE_ATTEMPTS: u8 = 3;
/// The events produced by the [`Behaviour`].
@@ -184,7 +186,8 @@ impl NetworkBehaviour for Behaviour {
handler::relayed::Handler::new(connected_point, self.observed_addresses());
handler.on_behaviour_event(handler::relayed::Command::Connect);
- return Ok(Either::Left(handler)); // TODO: We could make two `handler::relayed::Handler` here, one inbound one outbound.
+ // TODO: We could make two `handler::relayed::Handler` here, one inbound one outbound.
+ return Ok(Either::Left(handler));
}
self.direct_connections
.entry(peer)
@@ -217,7 +220,8 @@ impl NetworkBehaviour for Behaviour {
port_use,
},
self.observed_addresses(),
- ))); // TODO: We could make two `handler::relayed::Handler` here, one inbound one outbound.
+ ))); // TODO: We could make two `handler::relayed::Handler` here, one inbound one
+ // outbound.
}
self.direct_connections
@@ -255,7 +259,8 @@ impl NetworkBehaviour for Behaviour {
Either::Left(_) => connection_id,
Either::Right(_) => match self.direct_to_relayed_connections.get(&connection_id) {
None => {
- // If the connection ID is unknown to us, it means we didn't create it so ignore any event coming from it.
+ // If the connection ID is unknown to us, it means we didn't create it so ignore
+ // any event coming from it.
return;
}
Some(relayed_connection_id) => *relayed_connection_id,
@@ -347,8 +352,9 @@ impl NetworkBehaviour for Behaviour {
///
/// We use an [`LruCache`] to favor addresses that are reported more often.
/// When attempting a hole-punch, we will try more frequent addresses first.
-/// Most of these addresses will come from observations by other nodes (via e.g. the identify protocol).
-/// More common observations mean a more likely stable port-mapping and thus a higher chance of a successful hole-punch.
+/// Most of these addresses will come from observations by other nodes (via e.g. the identify
+/// protocol). More common observations mean a more likely stable port-mapping and thus a higher
+/// chance of a successful hole-punch.
struct Candidates {
inner: LruCache,
me: PeerId,
diff --git a/protocols/dcutr/src/handler/relayed.rs b/protocols/dcutr/src/handler/relayed.rs
index ad12a196cb9..47813493e9e 100644
--- a/protocols/dcutr/src/handler/relayed.rs
+++ b/protocols/dcutr/src/handler/relayed.rs
@@ -20,26 +20,31 @@
//! [`ConnectionHandler`] handling relayed connection potentially upgraded to a direct connection.
-use crate::behaviour::MAX_NUMBER_OF_UPGRADE_ATTEMPTS;
-use crate::{protocol, PROTOCOL_NAME};
+use std::{
+ collections::VecDeque,
+ io,
+ task::{Context, Poll},
+ time::Duration,
+};
+
use either::Either;
use futures::future;
-use libp2p_core::multiaddr::Multiaddr;
-use libp2p_core::upgrade::{DeniedUpgrade, ReadyUpgrade};
-use libp2p_core::ConnectedPoint;
-use libp2p_swarm::handler::{
- ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound,
- ListenUpgradeError,
+use libp2p_core::{
+ multiaddr::Multiaddr,
+ upgrade::{DeniedUpgrade, ReadyUpgrade},
+ ConnectedPoint,
};
use libp2p_swarm::{
+ handler::{
+ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound,
+ ListenUpgradeError,
+ },
ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, StreamUpgradeError,
SubstreamProtocol,
};
use protocol::{inbound, outbound};
-use std::collections::VecDeque;
-use std::io;
-use std::task::{Context, Poll};
-use std::time::Duration;
+
+use crate::{behaviour::MAX_NUMBER_OF_UPGRADE_ATTEMPTS, protocol, PROTOCOL_NAME};
#[derive(Debug)]
pub enum Command {
@@ -60,7 +65,7 @@ pub struct Handler {
queued_events: VecDeque<
ConnectionHandlerEvent<
::OutboundProtocol,
- ::OutboundOpenInfo,
+ (),
::ToBehaviour,
>,
>,
@@ -93,10 +98,7 @@ impl Handler {
&mut self,
FullyNegotiatedInbound {
protocol: output, ..
- }: FullyNegotiatedInbound<
- ::InboundProtocol,
- ::InboundOpenInfo,
- >,
+ }: FullyNegotiatedInbound<::InboundProtocol>,
) {
match output {
future::Either::Left(stream) => {
@@ -114,8 +116,8 @@ impl Handler {
}
self.attempts += 1;
}
- // A connection listener denies all incoming substreams, thus none can ever be fully negotiated.
- // TODO: remove when Rust 1.82 is MSRV
+ // A connection listener denies all incoming substreams, thus none can ever be fully
+ // negotiated. TODO: remove when Rust 1.82 is MSRV
#[allow(unreachable_patterns)]
future::Either::Right(output) => libp2p_core::util::unreachable(output),
}
@@ -125,10 +127,7 @@ impl Handler {
&mut self,
FullyNegotiatedOutbound {
protocol: stream, ..
- }: FullyNegotiatedOutbound<
- ::OutboundProtocol,
- ::OutboundOpenInfo,
- >,
+ }: FullyNegotiatedOutbound<::OutboundProtocol>,
) {
assert!(
self.endpoint.is_listener(),
@@ -151,7 +150,7 @@ impl Handler {
fn on_listen_upgrade_error(
&mut self,
ListenUpgradeError { error, .. }: ListenUpgradeError<
- ::InboundOpenInfo,
+ (),
::InboundProtocol,
>,
) {
@@ -163,7 +162,7 @@ impl Handler {
fn on_dial_upgrade_error(
&mut self,
DialUpgradeError { error, .. }: DialUpgradeError<
- ::OutboundOpenInfo,
+ (),
::OutboundProtocol,
>,
) {
@@ -191,7 +190,7 @@ impl ConnectionHandler for Handler {
type OutboundOpenInfo = ();
type InboundOpenInfo = ();
- fn listen_protocol(&self) -> SubstreamProtocol {
+ fn listen_protocol(&self) -> SubstreamProtocol {
match self.endpoint {
ConnectedPoint::Dialer { .. } => {
SubstreamProtocol::new(Either::Left(ReadyUpgrade::new(PROTOCOL_NAME)), ())
@@ -231,9 +230,7 @@ impl ConnectionHandler for Handler {
fn poll(
&mut self,
cx: &mut Context<'_>,
- ) -> Poll<
- ConnectionHandlerEvent,
- > {
+ ) -> Poll> {
// Return queued events.
if let Some(event) = self.queued_events.pop_front() {
return Poll::Ready(event);
@@ -290,12 +287,7 @@ impl ConnectionHandler for Handler {
fn on_connection_event(
&mut self,
- event: ConnectionEvent<
- Self::InboundProtocol,
- Self::OutboundProtocol,
- Self::InboundOpenInfo,
- Self::OutboundOpenInfo,
- >,
+ event: ConnectionEvent,
) {
match event {
ConnectionEvent::FullyNegotiatedInbound(fully_negotiated_inbound) => {
diff --git a/protocols/dcutr/src/protocol/inbound.rs b/protocols/dcutr/src/protocol/inbound.rs
index 005d8394f5e..c5209930ca2 100644
--- a/protocols/dcutr/src/protocol/inbound.rs
+++ b/protocols/dcutr/src/protocol/inbound.rs
@@ -18,14 +18,16 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::proto;
+use std::io;
+
use asynchronous_codec::Framed;
use futures::prelude::*;
use libp2p_core::{multiaddr::Protocol, Multiaddr};
use libp2p_swarm::Stream;
-use std::io;
use thiserror::Error;
+use crate::proto;
+
pub(crate) async fn handshake(
stream: Stream,
candidates: Vec,
diff --git a/protocols/dcutr/src/protocol/outbound.rs b/protocols/dcutr/src/protocol/outbound.rs
index 8639ff4f053..cdd3d5fbf0b 100644
--- a/protocols/dcutr/src/protocol/outbound.rs
+++ b/protocols/dcutr/src/protocol/outbound.rs
@@ -18,17 +18,18 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::proto;
-use crate::PROTOCOL_NAME;
+use std::io;
+
use asynchronous_codec::Framed;
use futures::prelude::*;
use futures_timer::Delay;
use libp2p_core::{multiaddr::Protocol, Multiaddr};
use libp2p_swarm::Stream;
-use std::io;
use thiserror::Error;
use web_time::Instant;
+use crate::{proto, PROTOCOL_NAME};
+
pub(crate) async fn handshake(
stream: Stream,
candidates: Vec,
diff --git a/protocols/dcutr/tests/lib.rs b/protocols/dcutr/tests/lib.rs
index 36f168fb04a..ce7119cebcf 100644
--- a/protocols/dcutr/tests/lib.rs
+++ b/protocols/dcutr/tests/lib.rs
@@ -18,9 +18,12 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use libp2p_core::multiaddr::{Multiaddr, Protocol};
-use libp2p_core::transport::upgrade::Version;
-use libp2p_core::transport::{MemoryTransport, Transport};
+use std::time::Duration;
+
+use libp2p_core::{
+ multiaddr::{Multiaddr, Protocol},
+ transport::{upgrade::Version, MemoryTransport, Transport},
+};
use libp2p_dcutr as dcutr;
use libp2p_identify as identify;
use libp2p_identity as identity;
@@ -29,14 +32,10 @@ use libp2p_plaintext as plaintext;
use libp2p_relay as relay;
use libp2p_swarm::{Config, NetworkBehaviour, Swarm, SwarmEvent};
use libp2p_swarm_test::SwarmExt as _;
-use std::time::Duration;
-use tracing_subscriber::EnvFilter;
#[tokio::test]
async fn connect() {
- let _ = tracing_subscriber::fmt()
- .with_env_filter(EnvFilter::from_default_env())
- .try_init();
+ libp2p_test_utils::with_default_env_filter();
let mut relay = build_relay();
let mut dst = build_client();
diff --git a/protocols/floodsub/src/layer.rs b/protocols/floodsub/src/layer.rs
index 1a70d2213b2..477172b42c0 100644
--- a/protocols/floodsub/src/layer.rs
+++ b/protocols/floodsub/src/layer.rs
@@ -18,27 +18,36 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::protocol::{
- FloodsubMessage, FloodsubProtocol, FloodsubRpc, FloodsubSubscription,
- FloodsubSubscriptionAction,
+use std::{
+ collections::{
+ hash_map::{DefaultHasher, HashMap},
+ VecDeque,
+ },
+ iter,
+ task::{Context, Poll},
};
-use crate::topic::Topic;
-use crate::FloodsubConfig;
+
use bytes::Bytes;
use cuckoofilter::{CuckooError, CuckooFilter};
use fnv::FnvHashSet;
-use libp2p_core::transport::PortUse;
-use libp2p_core::{Endpoint, Multiaddr};
+use libp2p_core::{transport::PortUse, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
-use libp2p_swarm::behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm};
use libp2p_swarm::{
- dial_opts::DialOpts, CloseConnection, ConnectionDenied, ConnectionId, NetworkBehaviour,
- NotifyHandler, OneShotHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm,
+ behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm},
+ dial_opts::DialOpts,
+ CloseConnection, ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler,
+ OneShotHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm,
};
use smallvec::SmallVec;
-use std::collections::hash_map::{DefaultHasher, HashMap};
-use std::task::{Context, Poll};
-use std::{collections::VecDeque, iter};
+
+use crate::{
+ protocol::{
+ FloodsubMessage, FloodsubProtocol, FloodsubRpc, FloodsubSubscription,
+ FloodsubSubscriptionAction,
+ },
+ topic::Topic,
+ FloodsubConfig,
+};
/// Network behaviour that handles the floodsub protocol.
pub struct Floodsub {
@@ -192,7 +201,8 @@ impl Floodsub {
self.publish_many_inner(topic, data, true)
}
- /// Publishes a message with multiple topics to the network, even if we're not subscribed to any of the topics.
+ /// Publishes a message with multiple topics to the network, even if we're not subscribed to any
+ /// of the topics.
pub fn publish_many_any(
&mut self,
topic: impl IntoIterator
- >,
diff --git a/protocols/floodsub/src/lib.rs b/protocols/floodsub/src/lib.rs
index 94766d5fdca..d43b0c88788 100644
--- a/protocols/floodsub/src/lib.rs
+++ b/protocols/floodsub/src/lib.rs
@@ -35,9 +35,11 @@ mod proto {
pub(crate) use self::floodsub::pb::{mod_RPC::SubOpts, Message, RPC};
}
-pub use self::layer::{Floodsub, FloodsubEvent};
-pub use self::protocol::{FloodsubMessage, FloodsubRpc};
-pub use self::topic::Topic;
+pub use self::{
+ layer::{Floodsub, FloodsubEvent},
+ protocol::{FloodsubMessage, FloodsubRpc},
+ topic::Topic,
+};
/// Configuration options for the Floodsub protocol.
#[derive(Debug, Clone)]
diff --git a/protocols/floodsub/src/protocol.rs b/protocols/floodsub/src/protocol.rs
index edc842be8ce..69cfcbd9dc7 100644
--- a/protocols/floodsub/src/protocol.rs
+++ b/protocols/floodsub/src/protocol.rs
@@ -18,19 +18,19 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::proto;
-use crate::topic::Topic;
+use std::{io, iter, pin::Pin};
+
use asynchronous_codec::Framed;
use bytes::Bytes;
use futures::{
io::{AsyncRead, AsyncWrite},
- Future,
+ Future, SinkExt, StreamExt,
};
-use futures::{SinkExt, StreamExt};
use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo};
use libp2p_identity::PeerId;
use libp2p_swarm::StreamProtocol;
-use std::{io, iter, pin::Pin};
+
+use crate::{proto, topic::Topic};
const MAX_MESSAGE_LEN_BYTES: usize = 2048;
diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md
index ddbbc7fb552..94b9b922973 100644
--- a/protocols/gossipsub/CHANGELOG.md
+++ b/protocols/gossipsub/CHANGELOG.md
@@ -1,5 +1,14 @@
## 0.48.0
+- Allow broadcasting `IDONTWANT` messages when publishing to avoid downloading data that is already available.
+ See [PR 5773](https://github.com/libp2p/rust-libp2p/pull/5773)
+
+- Add configurable `idontwant_message_size_threshold` parameter.
+ See [PR 5770](https://github.com/libp2p/rust-libp2p/pull/5770)
+
+- Introduce Gossipsub v1.2 [spec](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.2.md).
+ See [PR 5697](https://github.com/libp2p/rust-libp2p/pull/5697)
+
- Correct state inconsistencies with the mesh and fanout when unsubscribing.
See [PR 5690](https://github.com/libp2p/rust-libp2p/pull/5690)
@@ -18,10 +27,17 @@
- Introduce back pressure and penalize slow peers. Drop stale messages that timeout before being
delivered.
See [PR 5595](https://github.com/libp2p/rust-libp2p/pull/5595).
+
- Change `Behaviour::unsubscribe` and `Behaviour::report_message_validation_result`
to `bool` they don't need to be a `Result`.
See [PR 5595](https://github.com/libp2p/rust-libp2p/pull/5595).
+- Fix `cargo clippy` warnings in `rustc 1.84.0-beta.1`.
+ See [PR 5700](https://github.com/libp2p/rust-libp2p/pull/5700).
+
+- Fixe an issue where an `InsufficientPeers` error could occur under certain conditions, despite having peers subscribed to a topic.
+ See [PR 5793](https://github.com/libp2p/rust-libp2p/pull/5793).
+
## 0.47.0
@@ -242,7 +258,7 @@
- Move from `open-metrics-client` to `prometheus-client` (see [PR 2442]).
-- Emit gossip of all non empty topics (see [PR 2481]).
+- Emit gossip of all non-empty topics (see [PR 2481]).
- Merge NetworkBehaviour's inject_\* paired methods (see [PR 2445]).
diff --git a/protocols/gossipsub/Cargo.toml b/protocols/gossipsub/Cargo.toml
index c09286c8aa0..328d4367204 100644
--- a/protocols/gossipsub/Cargo.toml
+++ b/protocols/gossipsub/Cargo.toml
@@ -23,7 +23,8 @@ either = "1.11"
fnv = "1.0.7"
futures = { workspace = true }
futures-timer = "3.0.2"
-getrandom = "0.2.15"
+getrandom = { workspace = true }
+hashlink = { workspace = true}
hex_fmt = "0.3.0"
web-time = { workspace = true }
libp2p-core = { workspace = true }
@@ -35,20 +36,16 @@ rand = "0.8"
regex = "1.10.5"
serde = { version = "1", optional = true, features = ["derive"] }
sha2 = "0.10.8"
-smallvec = "1.13.2"
tracing = { workspace = true }
# Metrics dependencies
prometheus-client = { workspace = true }
[dev-dependencies]
-hex = "0.4.2"
libp2p-core = { workspace = true }
-libp2p-yamux = { workspace = true }
-libp2p-noise = { workspace = true }
libp2p-swarm-test = { path = "../../swarm-test" }
quickcheck = { workspace = true }
-tracing-subscriber = { workspace = true, features = ["env-filter"] }
+libp2p-test-utils = { workspace = true }
tokio = { workspace = true, features = ["rt", "rt-multi-thread", "time", "macros"] }
# Passing arguments to the docsrs builder in order to properly document cfg's.
diff --git a/protocols/gossipsub/src/backoff.rs b/protocols/gossipsub/src/backoff.rs
index 4414ffb00e6..ee600d22098 100644
--- a/protocols/gossipsub/src/backoff.rs
+++ b/protocols/gossipsub/src/backoff.rs
@@ -19,15 +19,19 @@
// DEALINGS IN THE SOFTWARE.
//! Data structure for efficiently storing known back-off's when pruning peers.
-use crate::topic::TopicHash;
-use libp2p_identity::PeerId;
-use std::collections::{
- hash_map::{Entry, HashMap},
- HashSet,
+use std::{
+ collections::{
+ hash_map::{Entry, HashMap},
+ HashSet,
+ },
+ time::Duration,
};
-use std::time::Duration;
+
+use libp2p_identity::PeerId;
use web_time::Instant;
+use crate::topic::TopicHash;
+
#[derive(Copy, Clone)]
struct HeartbeatIndex(usize);
@@ -68,8 +72,8 @@ impl BackoffStorage {
}
}
- /// Updates the backoff for a peer (if there is already a more restrictive backoff then this call
- /// doesn't change anything).
+ /// Updates the backoff for a peer (if there is already a more restrictive backoff then this
+ /// call doesn't change anything).
pub(crate) fn update_backoff(&mut self, topic: &TopicHash, peer: &PeerId, time: Duration) {
let instant = Instant::now() + time;
let insert_into_backoffs_by_heartbeat =
@@ -124,7 +128,7 @@ impl BackoffStorage {
pub(crate) fn is_backoff_with_slack(&self, topic: &TopicHash, peer: &PeerId) -> bool {
self.backoffs
.get(topic)
- .map_or(false, |m| m.contains_key(peer))
+ .is_some_and(|m| m.contains_key(peer))
}
pub(crate) fn get_backoff_time(&self, topic: &TopicHash, peer: &PeerId) -> Option {
@@ -155,7 +159,7 @@ impl BackoffStorage {
None => false,
};
if !keep {
- //remove from backoffs
+ // remove from backoffs
if let Entry::Occupied(mut m) = backoffs.entry(topic.clone()) {
if m.get_mut().remove(peer).is_some() && m.get().is_empty() {
m.remove();
diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs
index 075a881db48..356f1d6cd77 100644
--- a/protocols/gossipsub/src/behaviour.rs
+++ b/protocols/gossipsub/src/behaviour.rs
@@ -19,11 +19,10 @@
// DEALINGS IN THE SOFTWARE.
use std::{
- cmp::{max, Ordering},
- collections::HashSet,
- collections::VecDeque,
- collections::{BTreeSet, HashMap},
+ cmp::{max, Ordering, Ordering::Equal},
+ collections::{BTreeSet, HashMap, HashSet, VecDeque},
fmt,
+ fmt::Debug,
net::IpAddr,
task::{Context, Poll},
time::Duration,
@@ -31,56 +30,56 @@ use std::{
use futures::FutureExt;
use futures_timer::Delay;
-use prometheus_client::registry::Registry;
-use rand::{seq::SliceRandom, thread_rng};
-
+use hashlink::LinkedHashMap;
use libp2p_core::{
- multiaddr::Protocol::Ip4, multiaddr::Protocol::Ip6, transport::PortUse, Endpoint, Multiaddr,
+ multiaddr::Protocol::{Ip4, Ip6},
+ transport::PortUse,
+ Endpoint, Multiaddr,
};
-use libp2p_identity::Keypair;
-use libp2p_identity::PeerId;
+use libp2p_identity::{Keypair, PeerId};
use libp2p_swarm::{
behaviour::{AddressChange, ConnectionClosed, ConnectionEstablished, FromSwarm},
dial_opts::DialOpts,
ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, THandler, THandlerInEvent,
THandlerOutEvent, ToSwarm,
};
+use prometheus_client::registry::Registry;
+use quick_protobuf::{MessageWrite, Writer};
+use rand::{seq::SliceRandom, thread_rng};
use web_time::{Instant, SystemTime};
-use crate::peer_score::{PeerScore, PeerScoreParams, PeerScoreThresholds, RejectReason};
-use crate::protocol::SIGNING_PREFIX;
-use crate::subscription_filter::{AllowAllSubscriptionFilter, TopicSubscriptionFilter};
-use crate::time_cache::DuplicateCache;
-use crate::topic::{Hasher, Topic, TopicHash};
-use crate::transform::{DataTransform, IdentityTransform};
-use crate::types::{
- ControlAction, Message, MessageAcceptance, MessageId, PeerInfo, RawMessage, Subscription,
- SubscriptionAction,
-};
-use crate::types::{PeerConnections, PeerKind, RpcOut};
-use crate::{backoff::BackoffStorage, FailedMessages};
use crate::{
+ backoff::BackoffStorage,
config::{Config, ValidationMode},
- types::Graft,
-};
-use crate::{gossip_promises::GossipPromises, types::Prune};
-use crate::{
+ gossip_promises::GossipPromises,
handler::{Handler, HandlerEvent, HandlerIn},
- types::IWant,
-};
-use crate::{mcache::MessageCache, types::IHave};
-use crate::{
+ mcache::MessageCache,
metrics::{Churn, Config as MetricsConfig, Inclusion, Metrics, Penalty},
+ peer_score::{PeerScore, PeerScoreParams, PeerScoreThresholds, RejectReason},
+ protocol::SIGNING_PREFIX,
rpc::Sender,
+ rpc_proto::proto,
+ subscription_filter::{AllowAllSubscriptionFilter, TopicSubscriptionFilter},
+ time_cache::DuplicateCache,
+ topic::{Hasher, Topic, TopicHash},
+ transform::{DataTransform, IdentityTransform},
+ types::{
+ ControlAction, Graft, IDontWant, IHave, IWant, Message, MessageAcceptance, MessageId,
+ PeerConnections, PeerInfo, PeerKind, Prune, RawMessage, RpcOut, Subscription,
+ SubscriptionAction,
+ },
+ FailedMessages, PublishError, SubscriptionError, TopicScoreParams, ValidationError,
};
-use crate::{rpc_proto::proto, TopicScoreParams};
-use crate::{PublishError, SubscriptionError, ValidationError};
-use quick_protobuf::{MessageWrite, Writer};
-use std::{cmp::Ordering::Equal, fmt::Debug};
#[cfg(test)]
mod tests;
+/// IDONTWANT cache capacity.
+const IDONTWANT_CAP: usize = 10_000;
+
+/// IDONTWANT timeout before removal.
+const IDONTWANT_TIMEOUT: Duration = Duration::new(3, 0);
+
/// Determines if published messages should be signed or not.
///
/// Without signing, a number of privacy preserving modes can be selected.
@@ -221,8 +220,9 @@ impl From for PublishConfig {
let public_key = keypair.public();
let key_enc = public_key.encode_protobuf();
let key = if key_enc.len() <= 42 {
- // The public key can be inlined in [`rpc_proto::proto::::Message::from`], so we don't include it
- // specifically in the [`rpc_proto::proto::Message::key`] field.
+ // The public key can be inlined in [`rpc_proto::proto::::Message::from`], so we
+ // don't include it specifically in the
+ // [`rpc_proto::proto::Message::key`] field.
None
} else {
// Include the protobuf encoding of the public key in the message.
@@ -289,7 +289,7 @@ pub struct Behaviour {
/// The last publish time for fanout topics.
fanout_last_pub: HashMap,
- ///Storage for backoffs
+ /// Storage for backoffs
backoffs: BackoffStorage,
/// Message cache for the last few heartbeats.
@@ -314,7 +314,7 @@ pub struct Behaviour {
/// Stores optional peer score data together with thresholds, decay interval and gossip
/// promises.
- peer_score: Option<(PeerScore, PeerScoreThresholds, Delay, GossipPromises)>,
+ peer_score: Option<(PeerScore, PeerScoreThresholds, Delay)>,
/// Counts the number of `IHAVE` received from each peer since the last heartbeat.
count_received_ihave: HashMap,
@@ -339,6 +339,9 @@ pub struct Behaviour {
/// Tracks the numbers of failed messages per peer-id.
failed_messages: HashMap,
+
+ /// Tracks recently sent `IWANT` messages and checks if peers respond to them.
+ gossip_promises: GossipPromises,
}
impl Behaviour
@@ -472,6 +475,7 @@ where
subscription_filter,
data_transform,
failed_messages: Default::default(),
+ gossip_promises: Default::default(),
})
}
}
@@ -671,9 +675,14 @@ where
// Gossipsub peers
None => {
tracing::debug!(topic=%topic_hash, "Topic not in the mesh");
+ // `fanout_peers` is always non-empty if it's `Some`.
+ let fanout_peers = self
+ .fanout
+ .get(&topic_hash)
+ .filter(|peers| !peers.is_empty());
// If we have fanout peers add them to the map.
- if self.fanout.contains_key(&topic_hash) {
- for peer in self.fanout.get(&topic_hash).expect("Topic must exist") {
+ if let Some(peers) = fanout_peers {
+ for peer in peers {
recipient_peers.insert(*peer);
}
} else {
@@ -753,6 +762,13 @@ where
return Err(PublishError::AllQueuesFull(recipient_peers.len()));
}
+ // Broadcast IDONTWANT messages
+ if raw_message.raw_protobuf_len() > self.config.idontwant_message_size_threshold()
+ && self.config.idontwant_on_publish()
+ {
+ self.send_idontwant(&raw_message, &msg_id, raw_message.source.as_ref());
+ }
+
tracing::debug!(message=%msg_id, "Published message");
if let Some(metrics) = self.metrics.as_mut() {
@@ -905,7 +921,7 @@ where
let interval = Delay::new(params.decay_interval);
let peer_score = PeerScore::new_with_message_delivery_time_callback(params, callback);
- self.peer_score = Some((peer_score, threshold, interval, GossipPromises::default()));
+ self.peer_score = Some((peer_score, threshold, interval));
Ok(())
}
@@ -1169,7 +1185,7 @@ where
}
fn score_below_threshold_from_scores(
- peer_score: &Option<(PeerScore, PeerScoreThresholds, Delay, GossipPromises)>,
+ peer_score: &Option<(PeerScore, PeerScoreThresholds, Delay)>,
peer_id: &PeerId,
threshold: impl Fn(&PeerScoreThresholds) -> f64,
) -> (bool, f64) {
@@ -1230,10 +1246,7 @@ where
return false;
}
- self.peer_score
- .as_ref()
- .map(|(_, _, _, promises)| !promises.contains(id))
- .unwrap_or(true)
+ !self.gossip_promises.contains(id)
};
for (topic, ids) in ihave_msgs {
@@ -1280,13 +1293,11 @@ where
iwant_ids_vec.truncate(iask);
*iasked += iask;
- if let Some((_, _, _, gossip_promises)) = &mut self.peer_score {
- gossip_promises.add_promise(
- *peer_id,
- &iwant_ids_vec,
- Instant::now() + self.config.iwant_followup_time(),
- );
- }
+ self.gossip_promises.add_promise(
+ *peer_id,
+ &iwant_ids_vec,
+ Instant::now() + self.config.iwant_followup_time(),
+ );
tracing::trace!(
peer=%peer_id,
"IHAVE: Asking for the following messages from peer: {:?}",
@@ -1415,7 +1426,7 @@ where
+ self.config.graft_flood_threshold())
- self.config.prune_backoff();
if flood_cutoff > now {
- //extra penalty
+ // extra penalty
peer_score.add_penalty(peer_id, 1);
}
}
@@ -1436,15 +1447,16 @@ where
topic=%topic_hash,
"GRAFT: ignoring peer with negative score"
);
- // we do send them PRUNE however, because it's a matter of protocol correctness
+ // we do send them PRUNE however, because it's a matter of protocol
+ // correctness
to_prune_topics.insert(topic_hash.clone());
// but we won't PX to them
do_px = false;
continue;
}
- // check mesh upper bound and only allow graft if the upper bound is not reached or
- // if it is an outbound peer
+ // check mesh upper bound and only allow graft if the upper bound is not reached
+ // or if it is an outbound peer
if peers.len() >= self.config.mesh_n_high()
&& !self.outbound_peers.contains(peer_id)
{
@@ -1572,7 +1584,7 @@ where
self.remove_peer_from_mesh(peer_id, &topic_hash, backoff, true, Churn::Prune);
if self.mesh.contains_key(&topic_hash) {
- //connect to px peers
+ // connect to px peers
if !px.is_empty() {
// we ignore PX from peers with insufficient score
if below_threshold {
@@ -1604,7 +1616,7 @@ where
let n = self.config.prune_peers();
// Ignore peerInfo with no ID
//
- //TODO: Once signed records are spec'd: Can we use peerInfo without any IDs if they have a
+ // TODO: Once signed records are spec'd: Can we use peerInfo without any IDs if they have a
// signed peer record?
px.retain(|p| p.peer_id.is_some());
if px.len() > n {
@@ -1649,14 +1661,15 @@ where
peer=%propagation_source,
"Rejecting message from blacklisted peer"
);
- if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score {
+ self.gossip_promises
+ .reject_message(msg_id, &RejectReason::BlackListedPeer);
+ if let Some((peer_score, ..)) = &mut self.peer_score {
peer_score.reject_message(
propagation_source,
msg_id,
&raw_message.topic,
RejectReason::BlackListedPeer,
);
- gossip_promises.reject_message(msg_id, &RejectReason::BlackListedPeer);
}
return false;
}
@@ -1689,7 +1702,7 @@ where
let self_published = !self.config.allow_self_origin()
&& if let Some(own_id) = self.publish_config.get_own_id() {
own_id != propagation_source
- && raw_message.source.as_ref().map_or(false, |s| s == own_id)
+ && raw_message.source.as_ref().is_some_and(|s| s == own_id)
} else {
self.published_message_ids.contains(msg_id)
};
@@ -1738,6 +1751,11 @@ where
// Calculate the message id on the transformed data.
let msg_id = self.config.message_id(&message);
+ // Broadcast IDONTWANT messages
+ if raw_message.raw_protobuf_len() > self.config.idontwant_message_size_threshold() {
+ self.send_idontwant(&raw_message, &msg_id, Some(propagation_source));
+ }
+
// Check the validity of the message
// Peers get penalized if this message is invalid. We don't add it to the duplicate cache
// and instead continually penalize peers that repeatedly send this message.
@@ -1753,6 +1771,7 @@ where
self.mcache.observe_duplicate(&msg_id, propagation_source);
return;
}
+
tracing::debug!(
message=%msg_id,
"Put message in duplicate_cache and resolve promises"
@@ -1765,9 +1784,11 @@ where
// Tells score that message arrived (but is maybe not fully validated yet).
// Consider the message as delivered for gossip promises.
- if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score {
+ self.gossip_promises.message_delivered(&msg_id);
+
+ // Tells score that message arrived (but is maybe not fully validated yet).
+ if let Some((peer_score, ..)) = &mut self.peer_score {
peer_score.validate_message(propagation_source, &msg_id, &message.topic);
- gossip_promises.message_delivered(&msg_id);
}
// Add the message to our memcache
@@ -1809,12 +1830,14 @@ where
raw_message: &RawMessage,
reject_reason: RejectReason,
) {
- if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score {
- if let Some(metrics) = self.metrics.as_mut() {
- metrics.register_invalid_message(&raw_message.topic);
- }
+ if let Some(metrics) = self.metrics.as_mut() {
+ metrics.register_invalid_message(&raw_message.topic);
+ }
- if let Ok(message) = self.data_transform.inbound_transform(raw_message.clone()) {
+ let message = self.data_transform.inbound_transform(raw_message.clone());
+
+ match (&mut self.peer_score, message) {
+ (Some((peer_score, ..)), Ok(message)) => {
let message_id = self.config.message_id(&message);
peer_score.reject_message(
@@ -1824,13 +1847,22 @@ where
reject_reason,
);
- gossip_promises.reject_message(&message_id, &reject_reason);
- } else {
+ self.gossip_promises
+ .reject_message(&message_id, &reject_reason);
+ }
+ (Some((peer_score, ..)), Err(_)) => {
// The message is invalid, we reject it ignoring any gossip promises. If a peer is
// advertising this message via an IHAVE and it's invalid it will be double
// penalized, one for sending us an invalid and again for breaking a promise.
peer_score.reject_invalid_message(propagation_source, &raw_message.topic);
}
+ (None, Ok(message)) => {
+ // Valid transformation without peer scoring
+ let message_id = self.config.message_id(&message);
+ self.gossip_promises
+ .reject_message(&message_id, &reject_reason);
+ }
+ (None, Err(_)) => {}
}
}
@@ -1897,7 +1929,7 @@ where
// if the mesh needs peers add the peer to the mesh
if !self.explicit_peers.contains(propagation_source)
- && matches!(peer.kind, PeerKind::Gossipsubv1_1 | PeerKind::Gossipsub)
+ && peer.kind.is_gossipsub()
&& !Self::score_below_threshold_from_scores(
&self.peer_score,
propagation_source,
@@ -2001,8 +2033,8 @@ where
/// Applies penalties to peers that did not respond to our IWANT requests.
fn apply_iwant_penalties(&mut self) {
- if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score {
- for (peer, count) in gossip_promises.get_broken_promises() {
+ if let Some((peer_score, ..)) = &mut self.peer_score {
+ for (peer, count) in self.gossip_promises.get_broken_promises() {
peer_score.add_penalty(&peer, count);
if let Some(metrics) = self.metrics.as_mut() {
metrics.register_score_penalty(Penalty::BrokenPromise);
@@ -2223,7 +2255,7 @@ where
&& peers.len() > 1
&& self.peer_score.is_some()
{
- if let Some((_, thresholds, _, _)) = &self.peer_score {
+ if let Some((_, thresholds, _)) = &self.peer_score {
// Opportunistic grafting works as follows: we check the median score of peers
// in the mesh; if this score is below the opportunisticGraftThreshold, we
// select a few peers at random with score over the median.
@@ -2316,7 +2348,7 @@ where
for (topic_hash, peers) in self.fanout.iter_mut() {
let mut to_remove_peers = Vec::new();
let publish_threshold = match &self.peer_score {
- Some((_, thresholds, _, _)) => thresholds.publish_threshold,
+ Some((_, thresholds, _)) => thresholds.publish_threshold,
_ => 0.0,
};
for peer_id in peers.iter() {
@@ -2409,6 +2441,17 @@ where
}
self.failed_messages.shrink_to_fit();
+ // Flush stale IDONTWANTs.
+ for peer in self.connected_peers.values_mut() {
+ while let Some((_front, instant)) = peer.dont_send.front() {
+ if (*instant + IDONTWANT_TIMEOUT) >= Instant::now() {
+ break;
+ } else {
+ peer.dont_send.pop_front();
+ }
+ }
+ }
+
tracing::debug!("Completed Heartbeat");
if let Some(metrics) = self.metrics.as_mut() {
let duration = u64::try_from(start.elapsed().as_millis()).unwrap_or(u64::MAX);
@@ -2564,6 +2607,49 @@ where
}
}
+ /// Helper function which sends an IDONTWANT message to mesh\[topic\] peers.
+ fn send_idontwant(
+ &mut self,
+ message: &RawMessage,
+ msg_id: &MessageId,
+ propagation_source: Option<&PeerId>,
+ ) {
+ let Some(mesh_peers) = self.mesh.get(&message.topic) else {
+ return;
+ };
+
+ let iwant_peers = self.gossip_promises.peers_for_message(msg_id);
+
+ let recipient_peers: Vec = mesh_peers
+ .iter()
+ .chain(iwant_peers.iter())
+ .filter(|&peer_id| {
+ Some(peer_id) != propagation_source && Some(peer_id) != message.source.as_ref()
+ })
+ .cloned()
+ .collect();
+
+ for peer_id in recipient_peers {
+ let Some(peer) = self.connected_peers.get_mut(&peer_id) else {
+ tracing::error!(peer = %peer_id,
+ "Could not IDONTWANT, peer doesn't exist in connected peer list");
+ continue;
+ };
+
+ // Only gossipsub 1.2 peers support IDONTWANT.
+ if peer.kind != PeerKind::Gossipsubv1_2 {
+ continue;
+ }
+
+ self.send_message(
+ peer_id,
+ RpcOut::IDontWant(IDontWant {
+ message_ids: vec![msg_id.clone()],
+ }),
+ );
+ }
+ }
+
/// Helper function which forwards a message to mesh\[topic\] peers.
///
/// Returns true if at least one peer was messaged.
@@ -2619,13 +2705,23 @@ where
}
// forward the message to peers
- for peer in recipient_peers.iter() {
- let event = RpcOut::Forward {
- message: message.clone(),
- timeout: Delay::new(self.config.forward_queue_duration()),
- };
- tracing::debug!(%peer, message=%msg_id, "Sending message to peer");
- self.send_message(*peer, event);
+ for peer_id in recipient_peers.iter() {
+ if let Some(peer) = self.connected_peers.get_mut(peer_id) {
+ if peer.dont_send.contains_key(msg_id) {
+ tracing::debug!(%peer_id, message=%msg_id, "Peer doesn't want message");
+ continue;
+ }
+
+ tracing::debug!(%peer_id, message=%msg_id, "Sending message to peer");
+
+ self.send_message(
+ *peer_id,
+ RpcOut::Forward {
+ message: message.clone(),
+ timeout: Delay::new(self.config.forward_queue_duration()),
+ },
+ );
+ }
}
tracing::debug!("Completed forwarding message");
true
@@ -2761,14 +2857,14 @@ where
failed_messages.non_priority += 1;
failed_messages.forward += 1;
}
- RpcOut::IWant(_) | RpcOut::IHave(_) => {
+ RpcOut::IWant(_) | RpcOut::IHave(_) | RpcOut::IDontWant(_) => {
failed_messages.non_priority += 1;
}
RpcOut::Graft(_)
| RpcOut::Prune(_)
| RpcOut::Subscribe(_)
| RpcOut::Unsubscribe(_) => {
- unreachable!("Channel for highpriority contorl messages is unbounded and should always be open.")
+ unreachable!("Channel for highpriority control messages is unbounded and should always be open.")
}
}
@@ -2867,8 +2963,8 @@ where
.expect("Previously established connection to peer must be present");
peer.connections.remove(index);
- // If there are more connections and this peer is in a mesh, inform the first connection
- // handler.
+ // If there are more connections and this peer is in a mesh, inform the first
+ // connection handler.
if !peer.connections.is_empty() {
for topic in &peer.topics {
if let Some(mesh_peers) = self.mesh.get(topic) {
@@ -2921,7 +3017,7 @@ where
// If metrics are enabled, register the disconnection of a peer based on its protocol.
if let Some(metrics) = self.metrics.as_mut() {
- metrics.peer_protocol_disconnected(connected_peer.kind.clone());
+ metrics.peer_protocol_disconnected(connected_peer.kind);
}
self.connected_peers.remove(&peer_id);
@@ -3001,6 +3097,7 @@ where
connections: vec![],
sender: Sender::new(self.config.connection_handler_queue_len()),
topics: Default::default(),
+ dont_send: LinkedHashMap::new(),
});
// Add the new connection
connected_peer.connections.push(connection_id);
@@ -3027,6 +3124,7 @@ where
connections: vec![],
sender: Sender::new(self.config.connection_handler_queue_len()),
topics: Default::default(),
+ dont_send: LinkedHashMap::new(),
});
// Add the new connection
connected_peer.connections.push(connection_id);
@@ -3048,7 +3146,7 @@ where
// We have identified the protocol this peer is using
if let Some(metrics) = self.metrics.as_mut() {
- metrics.peer_protocol_connected(kind.clone());
+ metrics.peer_protocol_connected(kind);
}
if let PeerKind::NotSupported = kind {
@@ -3076,7 +3174,7 @@ where
}
HandlerEvent::MessageDropped(rpc) => {
// Account for this in the scoring logic
- if let Some((peer_score, _, _, _)) = &mut self.peer_score {
+ if let Some((peer_score, _, _)) = &mut self.peer_score {
peer_score.failed_message_slow_peer(&propagation_source);
}
@@ -3162,7 +3260,8 @@ where
}
// Handle control messages
- // group some control messages, this minimises SendEvents (code is simplified to handle each event at a time however)
+ // group some control messages, this minimises SendEvents (code is simplified to
+ // handle each event at a time however)
let mut ihave_msgs = vec![];
let mut graft_msgs = vec![];
let mut prune_msgs = vec![];
@@ -3183,6 +3282,24 @@ where
peers,
backoff,
}) => prune_msgs.push((topic_hash, peers, backoff)),
+ ControlAction::IDontWant(IDontWant { message_ids }) => {
+ let Some(peer) = self.connected_peers.get_mut(&propagation_source)
+ else {
+ tracing::error!(peer = %propagation_source,
+ "Could not handle IDONTWANT, peer doesn't exist in connected peer list");
+ continue;
+ };
+ if let Some(metrics) = self.metrics.as_mut() {
+ metrics.register_idontwant(message_ids.len());
+ }
+ for message_id in message_ids {
+ peer.dont_send.insert(message_id, Instant::now());
+ // Don't exceed capacity.
+ if peer.dont_send.len() > IDONTWANT_CAP {
+ peer.dont_send.pop_front();
+ }
+ }
+ }
}
}
if !ihave_msgs.is_empty() {
@@ -3208,7 +3325,7 @@ where
}
// update scores
- if let Some((peer_score, _, delay, _)) = &mut self.peer_score {
+ if let Some((peer_score, _, delay)) = &mut self.peer_score {
if delay.poll_unpin(cx).is_ready() {
peer_score.refresh_scores();
delay.reset(peer_score.params.decay_interval);
@@ -3335,7 +3452,7 @@ fn get_random_peers_dynamic(
.iter()
.filter(|(_, p)| p.topics.contains(topic_hash))
.filter(|(peer_id, _)| f(peer_id))
- .filter(|(_, p)| p.kind == PeerKind::Gossipsub || p.kind == PeerKind::Gossipsubv1_1)
+ .filter(|(_, p)| p.kind.is_gossipsub())
.map(|(peer_id, _)| *peer_id)
.collect::>();
diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs
index 9567150382a..bed74ecdce7 100644
--- a/protocols/gossipsub/src/behaviour/tests.rs
+++ b/protocols/gossipsub/src/behaviour/tests.rs
@@ -20,25 +20,20 @@
// Collection of tests for the gossipsub network behaviour
-use super::*;
-use crate::rpc::Receiver;
-use crate::subscription_filter::WhitelistSubscriptionFilter;
-use crate::{config::ConfigBuilder, types::Rpc, IdentTopic as Topic};
+use std::{future, net::Ipv4Addr, thread::sleep};
+
use byteorder::{BigEndian, ByteOrder};
use libp2p_core::ConnectedPoint;
use rand::Rng;
-use std::future;
-use std::net::Ipv4Addr;
-use std::thread::sleep;
+
+use super::*;
+use crate::{
+ config::ConfigBuilder, rpc::Receiver, subscription_filter::WhitelistSubscriptionFilter,
+ types::Rpc, IdentTopic as Topic,
+};
#[derive(Default, Debug)]
-struct InjectNodes
-// TODO: remove trait bound Default when this issue is fixed:
-// https://github.com/colin-kiegel/rust-derive-builder/issues/93
-where
- D: DataTransform + Default + Clone + Send + 'static,
- F: TopicSubscriptionFilter + Clone + Default + Send + 'static,
-{
+struct InjectNodes {
peer_no: usize,
topics: Vec,
to_subscribe: bool,
@@ -48,6 +43,7 @@ where
scoring: Option<(PeerScoreParams, PeerScoreThresholds)>,
data_transform: D,
subscription_filter: F,
+ peer_kind: Option,
}
impl InjectNodes
@@ -95,7 +91,7 @@ where
let empty = vec![];
for i in 0..self.peer_no {
- let (peer, receiver) = add_peer(
+ let (peer, receiver) = add_peer_with_addr_and_kind(
&mut gs,
if self.to_subscribe {
&topic_hashes
@@ -104,6 +100,8 @@ where
},
i < self.outbound,
i < self.explicit,
+ Multiaddr::empty(),
+ self.peer_kind.or(Some(PeerKind::Gossipsubv1_1)),
);
peers.push(peer);
receivers.insert(peer, receiver);
@@ -152,6 +150,11 @@ where
self.subscription_filter = subscription_filter;
self
}
+
+ fn peer_kind(mut self, peer_kind: PeerKind) -> Self {
+ self.peer_kind = Some(peer_kind);
+ self
+ }
}
fn inject_nodes() -> InjectNodes
@@ -234,10 +237,11 @@ where
gs.connected_peers.insert(
peer,
PeerConnections {
- kind: kind.clone().unwrap_or(PeerKind::Floodsub),
+ kind: kind.unwrap_or(PeerKind::Floodsub),
connections: vec![connection_id],
topics: Default::default(),
sender,
+ dont_send: LinkedHashMap::new(),
},
);
@@ -311,7 +315,8 @@ fn proto_to_message(rpc: &proto::RPC) -> Rpc {
messages.push(RawMessage {
source: message.from.map(|x| PeerId::from_bytes(&x).unwrap()),
data: message.data.unwrap_or_default(),
- sequence_number: message.seqno.map(|x| BigEndian::read_u64(&x)), // don't inform the application
+ sequence_number: message.seqno.map(|x| BigEndian::read_u64(&x)), /* don't inform the
+ * application */
topic: TopicHash::from_raw(message.topic),
signature: message.signature, // don't inform the application
key: None,
@@ -623,6 +628,7 @@ fn test_join() {
connections: vec![connection_id],
topics: Default::default(),
sender,
+ dont_send: LinkedHashMap::new(),
},
);
receivers.insert(random_peer, receiver);
@@ -677,7 +683,7 @@ fn test_publish_without_flood_publishing() {
// - Send publish message to all peers
// - Insert message into gs.mcache and gs.received
- //turn off flood publish to test old behaviour
+ // turn off flood publish to test old behaviour
let config = ConfigBuilder::default()
.flood_publish(false)
.build()
@@ -757,7 +763,7 @@ fn test_fanout() {
// - Send publish message to fanout peers
// - Insert message into gs.mcache and gs.received
- //turn off flood publish to test fanout behaviour
+ // turn off flood publish to test fanout behaviour
let config = ConfigBuilder::default()
.flood_publish(false)
.build()
@@ -1018,6 +1024,7 @@ fn test_get_random_peers() {
connections: vec![ConnectionId::new_unchecked(0)],
topics: topics.clone(),
sender: Sender::new(gs.config.connection_handler_queue_len()),
+ dont_send: LinkedHashMap::new(),
},
);
}
@@ -1447,10 +1454,10 @@ fn test_explicit_peer_gets_connected() {
.to_subscribe(true)
.create_network();
- //create new peer
+ // create new peer
let peer = PeerId::random();
- //add peer as explicit peer
+ // add peer as explicit peer
gs.add_explicit_peer(&peer);
let num_events = gs
@@ -1483,17 +1490,17 @@ fn test_explicit_peer_reconnects() {
let peer = others.first().unwrap();
- //add peer as explicit peer
+ // add peer as explicit peer
gs.add_explicit_peer(peer);
flush_events(&mut gs, receivers);
- //disconnect peer
+ // disconnect peer
disconnect_peer(&mut gs, peer);
gs.heartbeat();
- //check that no reconnect after first heartbeat since `explicit_peer_ticks == 2`
+ // check that no reconnect after first heartbeat since `explicit_peer_ticks == 2`
assert_eq!(
gs.events
.iter()
@@ -1508,7 +1515,7 @@ fn test_explicit_peer_reconnects() {
gs.heartbeat();
- //check that there is a reconnect after second heartbeat
+ // check that there is a reconnect after second heartbeat
assert!(
gs.events
.iter()
@@ -1536,11 +1543,11 @@ fn test_handle_graft_explicit_peer() {
gs.handle_graft(peer, topic_hashes.clone());
- //peer got not added to mesh
+ // peer got not added to mesh
assert!(gs.mesh[&topic_hashes[0]].is_empty());
assert!(gs.mesh[&topic_hashes[1]].is_empty());
- //check prunes
+ // check prunes
let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| {
peer_id == peer
&& match m {
@@ -1566,13 +1573,13 @@ fn explicit_peers_not_added_to_mesh_on_receiving_subscription() {
.explicit(1)
.create_network();
- //only peer 1 is in the mesh not peer 0 (which is an explicit peer)
+ // only peer 1 is in the mesh not peer 0 (which is an explicit peer)
assert_eq!(
gs.mesh[&topic_hashes[0]],
vec![peers[1]].into_iter().collect()
);
- //assert that graft gets created to non-explicit peer
+ // assert that graft gets created to non-explicit peer
let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| {
peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. })
});
@@ -1581,7 +1588,7 @@ fn explicit_peers_not_added_to_mesh_on_receiving_subscription() {
"No graft message got created to non-explicit peer"
);
- //assert that no graft gets created to explicit peer
+ // assert that no graft gets created to explicit peer
let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| {
peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. })
});
@@ -1603,10 +1610,10 @@ fn do_not_graft_explicit_peer() {
gs.heartbeat();
- //mesh stays empty
+ // mesh stays empty
assert_eq!(gs.mesh[&topic_hashes[0]], BTreeSet::new());
- //assert that no graft gets created to explicit peer
+ // assert that no graft gets created to explicit peer
let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| {
peer_id == &others[0] && matches!(m, RpcOut::Graft { .. })
});
@@ -1663,7 +1670,7 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() {
.explicit(1)
.create_network();
- //create new topic, both peers subscribing to it but we do not subscribe to it
+ // create new topic, both peers subscribing to it but we do not subscribe to it
let topic = Topic::new(String::from("t"));
let topic_hash = topic.hash();
for peer in peers.iter().take(2) {
@@ -1676,13 +1683,13 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() {
);
}
- //subscribe now to topic
+ // subscribe now to topic
gs.subscribe(&topic).unwrap();
- //only peer 1 is in the mesh not peer 0 (which is an explicit peer)
+ // only peer 1 is in the mesh not peer 0 (which is an explicit peer)
assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect());
- //assert that graft gets created to non-explicit peer
+ // assert that graft gets created to non-explicit peer
let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| {
peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. })
});
@@ -1691,7 +1698,7 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() {
"No graft message got created to non-explicit peer"
);
- //assert that no graft gets created to explicit peer
+ // assert that no graft gets created to explicit peer
let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| {
peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. })
});
@@ -1711,7 +1718,7 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() {
.explicit(1)
.create_network();
- //create new topic, both peers subscribing to it but we do not subscribe to it
+ // create new topic, both peers subscribing to it but we do not subscribe to it
let topic = Topic::new(String::from("t"));
let topic_hash = topic.hash();
for peer in peers.iter().take(2) {
@@ -1724,16 +1731,16 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() {
);
}
- //we send a message for this topic => this will initialize the fanout
+ // we send a message for this topic => this will initialize the fanout
gs.publish(topic.clone(), vec![1, 2, 3]).unwrap();
- //subscribe now to topic
+ // subscribe now to topic
gs.subscribe(&topic).unwrap();
- //only peer 1 is in the mesh not peer 0 (which is an explicit peer)
+ // only peer 1 is in the mesh not peer 0 (which is an explicit peer)
assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect());
- //assert that graft gets created to non-explicit peer
+ // assert that graft gets created to non-explicit peer
let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| {
peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. })
});
@@ -1742,7 +1749,7 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() {
"No graft message got created to non-explicit peer"
);
- //assert that no graft gets created to explicit peer
+ // assert that no graft gets created to explicit peer
let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| {
peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. })
});
@@ -1774,15 +1781,15 @@ fn no_gossip_gets_sent_to_explicit_peers() {
validated: true,
};
- //forward the message
+ // forward the message
gs.handle_received_message(message, &local_id);
- //simulate multiple gossip calls (for randomness)
+ // simulate multiple gossip calls (for randomness)
for _ in 0..3 {
gs.emit_gossip();
}
- //assert that no gossip gets sent to explicit peer
+ // assert that no gossip gets sent to explicit peer
let receiver = receivers.remove(&peers[0]).unwrap();
let mut gossips = 0;
let non_priority = receiver.non_priority.get_ref();
@@ -1835,7 +1842,7 @@ fn test_mesh_subtraction() {
// Adds mesh_low peers and PRUNE 2 giving us a deficit.
let n = config.mesh_n_high() + 10;
- //make all outbound connections so that we allow grafting to all
+ // make all outbound connections so that we allow grafting to all
let (mut gs, peers, _receivers, topics) = inject_nodes1()
.peer_no(n)
.topics(vec!["test".into()])
@@ -1866,10 +1873,10 @@ fn test_connect_to_px_peers_on_handle_prune() {
.to_subscribe(true)
.create_network();
- //handle prune from single peer with px peers
+ // handle prune from single peer with px peers
let mut px = Vec::new();
- //propose more px peers than config.prune_peers()
+ // propose more px peers than config.prune_peers()
for _ in 0..config.prune_peers() + 5 {
px.push(PeerInfo {
peer_id: Some(PeerId::random()),
@@ -1885,7 +1892,7 @@ fn test_connect_to_px_peers_on_handle_prune() {
)],
);
- //Check DialPeer events for px peers
+ // Check DialPeer events for px peers
let dials: Vec<_> = gs
.events
.iter()
@@ -1903,7 +1910,7 @@ fn test_connect_to_px_peers_on_handle_prune() {
// No duplicates
assert_eq!(dials_set.len(), config.prune_peers());
- //all dial peers must be in px
+ // all dial peers must be in px
assert!(dials_set.is_subset(
&px.iter()
.map(|i| *i.peer_id.as_ref().unwrap())
@@ -1915,14 +1922,14 @@ fn test_connect_to_px_peers_on_handle_prune() {
fn test_send_px_and_backoff_in_prune() {
let config: Config = Config::default();
- //build mesh with enough peers for px
+ // build mesh with enough peers for px
let (mut gs, peers, receivers, topics) = inject_nodes1()
.peer_no(config.prune_peers() + 1)
.topics(vec!["test".into()])
.to_subscribe(true)
.create_network();
- //send prune to peer
+ // send prune to peer
gs.send_graft_prune(
HashMap::new(),
vec![(peers[0], vec![topics[0].clone()])]
@@ -1931,7 +1938,7 @@ fn test_send_px_and_backoff_in_prune() {
HashSet::new(),
);
- //check prune message
+ // check prune message
let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| {
peer_id == &peers[0]
&& match m {
@@ -1957,14 +1964,14 @@ fn test_send_px_and_backoff_in_prune() {
fn test_prune_backoffed_peer_on_graft() {
let config: Config = Config::default();
- //build mesh with enough peers for px
+ // build mesh with enough peers for px
let (mut gs, peers, receivers, topics) = inject_nodes1()
.peer_no(config.prune_peers() + 1)
.topics(vec!["test".into()])
.to_subscribe(true)
.create_network();
- //remove peer from mesh and send prune to peer => this adds a backoff for this peer
+ // remove peer from mesh and send prune to peer => this adds a backoff for this peer
gs.mesh.get_mut(&topics[0]).unwrap().remove(&peers[0]);
gs.send_graft_prune(
HashMap::new(),
@@ -1974,13 +1981,13 @@ fn test_prune_backoffed_peer_on_graft() {
HashSet::new(),
);
- //ignore all messages until now
+ // ignore all messages until now
let receivers = flush_events(&mut gs, receivers);
- //handle graft
+ // handle graft
gs.handle_graft(&peers[0], vec![topics[0].clone()]);
- //check prune message
+ // check prune message
let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| {
peer_id == &peers[0]
&& match m {
@@ -2007,7 +2014,7 @@ fn test_do_not_graft_within_backoff_period() {
.heartbeat_interval(Duration::from_millis(100))
.build()
.unwrap();
- //only one peer => mesh too small and will try to regraft as early as possible
+ // only one peer => mesh too small and will try to regraft as early as possible
let (mut gs, peers, receivers, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -2015,22 +2022,22 @@ fn test_do_not_graft_within_backoff_period() {
.gs_config(config)
.create_network();
- //handle prune from peer with backoff of one second
+ // handle prune from peer with backoff of one second
gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), Some(1))]);
- //forget all events until now
+ // forget all events until now
let receivers = flush_events(&mut gs, receivers);
- //call heartbeat
+ // call heartbeat
gs.heartbeat();
- //Sleep for one second and apply 10 regular heartbeats (interval = 100ms).
+ // Sleep for one second and apply 10 regular heartbeats (interval = 100ms).
for _ in 0..10 {
sleep(Duration::from_millis(100));
gs.heartbeat();
}
- //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat
+ // Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat
// is needed).
let (control_msgs, receivers) =
count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. }));
@@ -2039,11 +2046,11 @@ fn test_do_not_graft_within_backoff_period() {
"Graft message created too early within backoff period"
);
- //Heartbeat one more time this should graft now
+ // Heartbeat one more time this should graft now
sleep(Duration::from_millis(100));
gs.heartbeat();
- //check that graft got created
+ // check that graft got created
let (control_msgs, _) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. }));
assert!(
control_msgs > 0,
@@ -2053,14 +2060,14 @@ fn test_do_not_graft_within_backoff_period() {
#[test]
fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without_backoff() {
- //set default backoff period to 1 second
+ // set default backoff period to 1 second
let config = ConfigBuilder::default()
.prune_backoff(Duration::from_millis(90))
.backoff_slack(1)
.heartbeat_interval(Duration::from_millis(100))
.build()
.unwrap();
- //only one peer => mesh too small and will try to regraft as early as possible
+ // only one peer => mesh too small and will try to regraft as early as possible
let (mut gs, peers, receivers, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -2068,20 +2075,20 @@ fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without
.gs_config(config)
.create_network();
- //handle prune from peer without a specified backoff
+ // handle prune from peer without a specified backoff
gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), None)]);
- //forget all events until now
+ // forget all events until now
let receivers = flush_events(&mut gs, receivers);
- //call heartbeat
+ // call heartbeat
gs.heartbeat();
- //Apply one more heartbeat
+ // Apply one more heartbeat
sleep(Duration::from_millis(100));
gs.heartbeat();
- //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat
+ // Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat
// is needed).
let (control_msgs, receivers) =
count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. }));
@@ -2090,11 +2097,11 @@ fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without
"Graft message created too early within backoff period"
);
- //Heartbeat one more time this should graft now
+ // Heartbeat one more time this should graft now
sleep(Duration::from_millis(100));
gs.heartbeat();
- //check that graft got created
+ // check that graft got created
let (control_msgs, _) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. }));
assert!(
control_msgs > 0,
@@ -2181,7 +2188,7 @@ fn test_flood_publish() {
.to_subscribe(true)
.create_network();
- //publish message
+ // publish message
let publish_data = vec![0; 42];
gs.publish(Topic::new(topic), publish_data).unwrap();
@@ -2228,15 +2235,15 @@ fn test_flood_publish() {
fn test_gossip_to_at_least_gossip_lazy_peers() {
let config: Config = Config::default();
- //add more peers than in mesh to test gossipping
- //by default only mesh_n_low peers will get added to mesh
+ // add more peers than in mesh to test gossipping
+ // by default only mesh_n_low peers will get added to mesh
let (mut gs, _, receivers, topic_hashes) = inject_nodes1()
.peer_no(config.mesh_n_low() + config.gossip_lazy() + 1)
.topics(vec!["topic".into()])
.to_subscribe(true)
.create_network();
- //receive message
+ // receive message
let raw_message = RawMessage {
source: Some(PeerId::random()),
data: vec![],
@@ -2248,7 +2255,7 @@ fn test_gossip_to_at_least_gossip_lazy_peers() {
};
gs.handle_received_message(raw_message.clone(), &PeerId::random());
- //emit gossip
+ // emit gossip
gs.emit_gossip();
// Transform the inbound message
@@ -2256,7 +2263,7 @@ fn test_gossip_to_at_least_gossip_lazy_peers() {
let msg_id = gs.config.message_id(message);
- //check that exactly config.gossip_lazy() many gossip messages were sent.
+ // check that exactly config.gossip_lazy() many gossip messages were sent.
let (control_msgs, _) = count_control_msgs(receivers, |_, action| match action {
RpcOut::IHave(IHave {
topic_hash,
@@ -2271,7 +2278,7 @@ fn test_gossip_to_at_least_gossip_lazy_peers() {
fn test_gossip_to_at_most_gossip_factor_peers() {
let config: Config = Config::default();
- //add a lot of peers
+ // add a lot of peers
let m = config.mesh_n_low() + config.gossip_lazy() * (2.0 / config.gossip_factor()) as usize;
let (mut gs, _, receivers, topic_hashes) = inject_nodes1()
.peer_no(m)
@@ -2279,7 +2286,7 @@ fn test_gossip_to_at_most_gossip_factor_peers() {
.to_subscribe(true)
.create_network();
- //receive message
+ // receive message
let raw_message = RawMessage {
source: Some(PeerId::random()),
data: vec![],
@@ -2291,14 +2298,14 @@ fn test_gossip_to_at_most_gossip_factor_peers() {
};
gs.handle_received_message(raw_message.clone(), &PeerId::random());
- //emit gossip
+ // emit gossip
gs.emit_gossip();
// Transform the inbound message
let message = &gs.data_transform.inbound_transform(raw_message).unwrap();
let msg_id = gs.config.message_id(message);
- //check that exactly config.gossip_lazy() many gossip messages were sent.
+ // check that exactly config.gossip_lazy() many gossip messages were sent.
let (control_msgs, _) = count_control_msgs(receivers, |_, action| match action {
RpcOut::IHave(IHave {
topic_hash,
@@ -2316,7 +2323,7 @@ fn test_gossip_to_at_most_gossip_factor_peers() {
fn test_accept_only_outbound_peer_grafts_when_mesh_full() {
let config: Config = Config::default();
- //enough peers to fill the mesh
+ // enough peers to fill the mesh
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(config.mesh_n_high())
.topics(vec!["test".into()])
@@ -2328,30 +2335,30 @@ fn test_accept_only_outbound_peer_grafts_when_mesh_full() {
gs.handle_graft(&peer, topics.clone());
}
- //assert current mesh size
+ // assert current mesh size
assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high());
- //create an outbound and an inbound peer
+ // create an outbound and an inbound peer
let (inbound, _in_reciver) = add_peer(&mut gs, &topics, false, false);
let (outbound, _out_receiver) = add_peer(&mut gs, &topics, true, false);
- //send grafts
+ // send grafts
gs.handle_graft(&inbound, vec![topics[0].clone()]);
gs.handle_graft(&outbound, vec![topics[0].clone()]);
- //assert mesh size
+ // assert mesh size
assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high() + 1);
- //inbound is not in mesh
+ // inbound is not in mesh
assert!(!gs.mesh[&topics[0]].contains(&inbound));
- //outbound is in mesh
+ // outbound is in mesh
assert!(gs.mesh[&topics[0]].contains(&outbound));
}
#[test]
fn test_do_not_remove_too_many_outbound_peers() {
- //use an extreme case to catch errors with high probability
+ // use an extreme case to catch errors with high probability
let m = 50;
let n = 2 * m;
let config = ConfigBuilder::default()
@@ -2362,7 +2369,7 @@ fn test_do_not_remove_too_many_outbound_peers() {
.build()
.unwrap();
- //fill the mesh with inbound connections
+ // fill the mesh with inbound connections
let (mut gs, peers, _receivers, topics) = inject_nodes1()
.peer_no(n)
.topics(vec!["test".into()])
@@ -2375,7 +2382,7 @@ fn test_do_not_remove_too_many_outbound_peers() {
gs.handle_graft(&peer, topics.clone());
}
- //create m outbound connections and graft (we will accept the graft)
+ // create m outbound connections and graft (we will accept the graft)
let mut outbound = HashSet::new();
for _ in 0..m {
let (peer, _) = add_peer(&mut gs, &topics, true, false);
@@ -2383,7 +2390,7 @@ fn test_do_not_remove_too_many_outbound_peers() {
gs.handle_graft(&peer, topics.clone());
}
- //mesh is overly full
+ // mesh is overly full
assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n + m);
// run a heartbeat
@@ -2392,7 +2399,7 @@ fn test_do_not_remove_too_many_outbound_peers() {
// Peers should be removed to reach n
assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n);
- //all outbound peers are still in the mesh
+ // all outbound peers are still in the mesh
assert!(outbound.iter().all(|p| gs.mesh[&topics[0]].contains(p)));
}
@@ -2412,7 +2419,7 @@ fn test_add_outbound_peers_if_min_is_not_satisfied() {
gs.handle_graft(&peer, topics.clone());
}
- //create config.mesh_outbound_min() many outbound connections without grafting
+ // create config.mesh_outbound_min() many outbound connections without grafting
let mut peers = vec![];
for _ in 0..config.mesh_outbound_min() {
peers.push(add_peer(&mut gs, &topics, true, false));
@@ -2435,7 +2442,7 @@ fn test_add_outbound_peers_if_min_is_not_satisfied() {
fn test_prune_negative_scored_peers() {
let config = Config::default();
- //build mesh with one peer
+ // build mesh with one peer
let (mut gs, peers, receivers, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -2449,16 +2456,16 @@ fn test_prune_negative_scored_peers() {
)))
.create_network();
- //add penalty to peer
+ // add penalty to peer
gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1);
- //execute heartbeat
+ // execute heartbeat
gs.heartbeat();
- //peer should not be in mesh anymore
+ // peer should not be in mesh anymore
assert!(gs.mesh[&topics[0]].is_empty());
- //check prune message
+ // check prune message
let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| {
peer_id == &peers[0]
&& match m {
@@ -2481,7 +2488,7 @@ fn test_prune_negative_scored_peers() {
#[test]
fn test_dont_graft_to_negative_scored_peers() {
let config = Config::default();
- //init full mesh
+ // init full mesh
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(config.mesh_n_high())
.topics(vec!["test".into()])
@@ -2493,34 +2500,34 @@ fn test_dont_graft_to_negative_scored_peers() {
)))
.create_network();
- //add two additional peers that will not be part of the mesh
+ // add two additional peers that will not be part of the mesh
let (p1, _receiver1) = add_peer(&mut gs, &topics, false, false);
let (p2, _receiver2) = add_peer(&mut gs, &topics, false, false);
- //reduce score of p1 to negative
+ // reduce score of p1 to negative
gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 1);
- //handle prunes of all other peers
+ // handle prunes of all other peers
for p in peers {
gs.handle_prune(&p, vec![(topics[0].clone(), Vec::new(), None)]);
}
- //heartbeat
+ // heartbeat
gs.heartbeat();
- //assert that mesh only contains p2
+ // assert that mesh only contains p2
assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), 1);
assert!(gs.mesh.get(&topics[0]).unwrap().contains(&p2));
}
-///Note that in this test also without a penalty the px would be ignored because of the
+/// Note that in this test also without a penalty the px would be ignored because of the
/// acceptPXThreshold, but the spec still explicitly states the rule that px from negative
/// peers should get ignored, therefore we test it here.
#[test]
fn test_ignore_px_from_negative_scored_peer() {
let config = Config::default();
- //build mesh with one peer
+ // build mesh with one peer
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -2532,10 +2539,10 @@ fn test_ignore_px_from_negative_scored_peer() {
)))
.create_network();
- //penalize peer
+ // penalize peer
gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1);
- //handle prune from single peer with px peers
+ // handle prune from single peer with px peers
let px = vec![PeerInfo {
peer_id: Some(PeerId::random()),
}];
@@ -2549,7 +2556,7 @@ fn test_ignore_px_from_negative_scored_peer() {
)],
);
- //assert no dials
+ // assert no dials
assert_eq!(
gs.events
.iter()
@@ -2760,7 +2767,7 @@ fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() {
collected_messages
});
- //the message got sent to p2
+ // the message got sent to p2
assert!(sent_messages
.iter()
.map(|(peer_id, msg)| (
@@ -2768,7 +2775,7 @@ fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() {
gs.data_transform.inbound_transform(msg.clone()).unwrap()
))
.any(|(peer_id, msg)| peer_id == &p2 && gs.config.message_id(&msg) == msg_id));
- //the message got not sent to p1
+ // the message got not sent to p1
assert!(sent_messages
.iter()
.map(|(peer_id, msg)| (
@@ -2786,7 +2793,7 @@ fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() {
gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight,
..PeerScoreThresholds::default()
};
- //build full mesh
+ // build full mesh
let (mut gs, peers, mut receivers, topics) = inject_nodes1()
.peer_no(config.mesh_n_high())
.topics(vec!["test".into()])
@@ -2802,21 +2809,21 @@ fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() {
gs.handle_graft(&peer, topics.clone());
}
- //add two additional peers that will not be part of the mesh
+ // add two additional peers that will not be part of the mesh
let (p1, receiver1) = add_peer(&mut gs, &topics, false, false);
receivers.insert(p1, receiver1);
let (p2, receiver2) = add_peer(&mut gs, &topics, false, false);
receivers.insert(p2, receiver2);
- //reduce score of p1 below peer_score_thresholds.gossip_threshold
- //note that penalties get squared so two penalties means a score of
+ // reduce score of p1 below peer_score_thresholds.gossip_threshold
+ // note that penalties get squared so two penalties means a score of
// 4 * peer_score_params.behaviour_penalty_weight.
gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2);
- //reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold
+ // reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold
gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1);
- //message that other peers have
+ // message that other peers have
let raw_message = RawMessage {
source: Some(PeerId::random()),
data: vec![],
@@ -2863,31 +2870,31 @@ fn test_do_not_publish_to_peer_below_publish_threshold() {
..PeerScoreThresholds::default()
};
- //build mesh with no peers and no subscribed topics
+ // build mesh with no peers and no subscribed topics
let (mut gs, _, mut receivers, _) = inject_nodes1()
.gs_config(config)
.scoring(Some((peer_score_params, peer_score_thresholds)))
.create_network();
- //create a new topic for which we are not subscribed
+ // create a new topic for which we are not subscribed
let topic = Topic::new("test");
let topics = vec![topic.hash()];
- //add two additional peers that will be added to the mesh
+ // add two additional peers that will be added to the mesh
let (p1, receiver1) = add_peer(&mut gs, &topics, false, false);
receivers.insert(p1, receiver1);
let (p2, receiver2) = add_peer(&mut gs, &topics, false, false);
receivers.insert(p2, receiver2);
- //reduce score of p1 below peer_score_thresholds.publish_threshold
- //note that penalties get squared so two penalties means a score of
+ // reduce score of p1 below peer_score_thresholds.publish_threshold
+ // note that penalties get squared so two penalties means a score of
// 4 * peer_score_params.behaviour_penalty_weight.
gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2);
- //reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold
+ // reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold
gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1);
- //a heartbeat will remove the peers from the mesh
+ // a heartbeat will remove the peers from the mesh
gs.heartbeat();
// publish on topic
@@ -2907,7 +2914,7 @@ fn test_do_not_publish_to_peer_below_publish_threshold() {
collected_publish
});
- //assert only published to p2
+ // assert only published to p2
assert_eq!(publishes.len(), 1);
assert_eq!(publishes[0].0, p2);
}
@@ -2921,28 +2928,28 @@ fn test_do_not_flood_publish_to_peer_below_publish_threshold() {
publish_threshold: 3.0 * peer_score_params.behaviour_penalty_weight,
..PeerScoreThresholds::default()
};
- //build mesh with no peers
+ // build mesh with no peers
let (mut gs, _, mut receivers, topics) = inject_nodes1()
.topics(vec!["test".into()])
.gs_config(config)
.scoring(Some((peer_score_params, peer_score_thresholds)))
.create_network();
- //add two additional peers that will be added to the mesh
+ // add two additional peers that will be added to the mesh
let (p1, receiver1) = add_peer(&mut gs, &topics, false, false);
receivers.insert(p1, receiver1);
let (p2, receiver2) = add_peer(&mut gs, &topics, false, false);
receivers.insert(p2, receiver2);
- //reduce score of p1 below peer_score_thresholds.publish_threshold
- //note that penalties get squared so two penalties means a score of
+ // reduce score of p1 below peer_score_thresholds.publish_threshold
+ // note that penalties get squared so two penalties means a score of
// 4 * peer_score_params.behaviour_penalty_weight.
gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2);
- //reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold
+ // reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold
gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1);
- //a heartbeat will remove the peers from the mesh
+ // a heartbeat will remove the peers from the mesh
gs.heartbeat();
// publish on topic
@@ -2962,7 +2969,7 @@ fn test_do_not_flood_publish_to_peer_below_publish_threshold() {
collected_publish
});
- //assert only published to p2
+ // assert only published to p2
assert_eq!(publishes.len(), 1);
assert!(publishes[0].0 == p2);
}
@@ -2978,23 +2985,23 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() {
..PeerScoreThresholds::default()
};
- //build mesh with no peers
+ // build mesh with no peers
let (mut gs, _, _, topics) = inject_nodes1()
.topics(vec!["test".into()])
.gs_config(config.clone())
.scoring(Some((peer_score_params, peer_score_thresholds)))
.create_network();
- //add two additional peers that will be added to the mesh
+ // add two additional peers that will be added to the mesh
let (p1, _receiver1) = add_peer(&mut gs, &topics, false, false);
let (p2, _receiver2) = add_peer(&mut gs, &topics, false, false);
- //reduce score of p1 below peer_score_thresholds.graylist_threshold
- //note that penalties get squared so two penalties means a score of
+ // reduce score of p1 below peer_score_thresholds.graylist_threshold
+ // note that penalties get squared so two penalties means a score of
// 4 * peer_score_params.behaviour_penalty_weight.
gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2);
- //reduce score of p2 below publish_threshold but not below graylist_threshold
+ // reduce score of p2 below publish_threshold but not below graylist_threshold
gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1);
let raw_message1 = RawMessage {
@@ -3053,10 +3060,10 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() {
message_ids: vec![config.message_id(message2)],
});
- //clear events
+ // clear events
gs.events.clear();
- //receive from p1
+ // receive from p1
gs.on_connection_handler_event(
p1,
ConnectionId::new_unchecked(0),
@@ -3070,7 +3077,7 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() {
},
);
- //only the subscription event gets processed, the rest is dropped
+ // only the subscription event gets processed, the rest is dropped
assert_eq!(gs.events.len(), 1);
assert!(matches!(
gs.events[0],
@@ -3082,7 +3089,7 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() {
message_ids: vec![config.message_id(message4)],
});
- //receive from p2
+ // receive from p2
gs.on_connection_handler_event(
p2,
ConnectionId::new_unchecked(0),
@@ -3096,7 +3103,7 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() {
},
);
- //events got processed
+ // events got processed
assert!(gs.events.len() > 1);
}
@@ -3145,7 +3152,7 @@ fn test_ignore_px_from_peers_below_accept_px_threshold() {
0
);
- //handle prune from peer peers[1] with px peers
+ // handle prune from peer peers[1] with px peers
let px = vec![PeerInfo {
peer_id: Some(PeerId::random()),
}];
@@ -3158,7 +3165,7 @@ fn test_ignore_px_from_peers_below_accept_px_threshold() {
)],
);
- //assert there are dials now
+ // assert there are dials now
assert!(
gs.events
.iter()
@@ -3178,7 +3185,7 @@ fn test_keep_best_scoring_peers_on_oversubscription() {
.build()
.unwrap();
- //build mesh with more peers than mesh can hold
+ // build mesh with more peers than mesh can hold
let n = config.mesh_n_high() + 1;
let (mut gs, peers, _receivers, topics) = inject_nodes1()
.peer_no(n)
@@ -3198,21 +3205,21 @@ fn test_keep_best_scoring_peers_on_oversubscription() {
gs.handle_graft(peer, topics.clone());
}
- //assign scores to peers equalling their index
+ // assign scores to peers equalling their index
- //set random positive scores
+ // set random positive scores
for (index, peer) in peers.iter().enumerate() {
gs.set_application_score(peer, index as f64);
}
assert_eq!(gs.mesh[&topics[0]].len(), n);
- //heartbeat to prune some peers
+ // heartbeat to prune some peers
gs.heartbeat();
assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n());
- //mesh contains retain_scores best peers
+ // mesh contains retain_scores best peers
assert!(gs.mesh[&topics[0]].is_superset(
&peers[(n - config.retain_scores())..]
.iter()
@@ -3239,7 +3246,7 @@ fn test_scoring_p1() {
.insert(topic_hash, topic_params.clone());
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with one peer
+ // build mesh with one peer
let (mut gs, peers, _, _) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -3250,9 +3257,9 @@ fn test_scoring_p1() {
.scoring(Some((peer_score_params, peer_score_thresholds)))
.create_network();
- //sleep for 2 times the mesh_quantum
+ // sleep for 2 times the mesh_quantum
sleep(topic_params.time_in_mesh_quantum * 2);
- //refresh scores
+ // refresh scores
gs.peer_score.as_mut().unwrap().0.refresh_scores();
assert!(
gs.peer_score.as_ref().unwrap().0.score(&peers[0])
@@ -3265,9 +3272,9 @@ fn test_scoring_p1() {
"score should be less than 3 * time_in_mesh_weight * topic_weight"
);
- //sleep again for 2 times the mesh_quantum
+ // sleep again for 2 times the mesh_quantum
sleep(topic_params.time_in_mesh_quantum * 2);
- //refresh scores
+ // refresh scores
gs.peer_score.as_mut().unwrap().0.refresh_scores();
assert!(
gs.peer_score.as_ref().unwrap().0.score(&peers[0])
@@ -3275,9 +3282,9 @@ fn test_scoring_p1() {
"score should be at least 4 * time_in_mesh_weight * topic_weight"
);
- //sleep for enough periods to reach maximum
+ // sleep for enough periods to reach maximum
sleep(topic_params.time_in_mesh_quantum * (topic_params.time_in_mesh_cap - 3.0) as u32);
- //refresh scores
+ // refresh scores
gs.peer_score.as_mut().unwrap().0.refresh_scores();
assert_eq!(
gs.peer_score.as_ref().unwrap().0.score(&peers[0]),
@@ -3309,7 +3316,7 @@ fn test_scoring_p2() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let topic_params = TopicScoreParams {
- time_in_mesh_weight: 0.0, //deactivate time in mesh
+ time_in_mesh_weight: 0.0, // deactivate time in mesh
first_message_deliveries_weight: 2.0,
first_message_deliveries_cap: 10.0,
first_message_deliveries_decay: 0.9,
@@ -3321,7 +3328,7 @@ fn test_scoring_p2() {
.insert(topic_hash, topic_params.clone());
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with one peer
+ // build mesh with one peer
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(2)
.topics(vec!["test".into()])
@@ -3338,9 +3345,9 @@ fn test_scoring_p2() {
};
let m1 = random_message(&mut seq, &topics);
- //peer 0 delivers message first
+ // peer 0 delivers message first
deliver_message(&mut gs, 0, m1.clone());
- //peer 1 delivers message second
+ // peer 1 delivers message second
deliver_message(&mut gs, 1, m1);
assert_eq!(
@@ -3355,7 +3362,7 @@ fn test_scoring_p2() {
"there should be no score for second message deliveries * topic_weight"
);
- //peer 2 delivers two new messages
+ // peer 2 delivers two new messages
deliver_message(&mut gs, 1, random_message(&mut seq, &topics));
deliver_message(&mut gs, 1, random_message(&mut seq, &topics));
assert_eq!(
@@ -3364,7 +3371,7 @@ fn test_scoring_p2() {
"score should be exactly 2 * first_message_deliveries_weight * topic_weight"
);
- //test decaying
+ // test decaying
gs.peer_score.as_mut().unwrap().0.refresh_scores();
assert_eq!(
@@ -3385,7 +3392,7 @@ fn test_scoring_p2() {
first_message_deliveries_weight * topic_weight"
);
- //test cap
+ // test cap
for _ in 0..topic_params.first_message_deliveries_cap as u64 {
deliver_message(&mut gs, 1, random_message(&mut seq, &topics));
}
@@ -3407,8 +3414,8 @@ fn test_scoring_p3() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let topic_params = TopicScoreParams {
- time_in_mesh_weight: 0.0, //deactivate time in mesh
- first_message_deliveries_weight: 0.0, //deactivate first time deliveries
+ time_in_mesh_weight: 0.0, // deactivate time in mesh
+ first_message_deliveries_weight: 0.0, // deactivate first time deliveries
mesh_message_deliveries_weight: -2.0,
mesh_message_deliveries_decay: 0.9,
mesh_message_deliveries_cap: 10.0,
@@ -3421,7 +3428,7 @@ fn test_scoring_p3() {
peer_score_params.topics.insert(topic_hash, topic_params);
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with two peers
+ // build mesh with two peers
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(2)
.topics(vec!["test".into()])
@@ -3439,35 +3446,35 @@ fn test_scoring_p3() {
let mut expected_message_deliveries = 0.0;
- //messages used to test window
+ // messages used to test window
let m1 = random_message(&mut seq, &topics);
let m2 = random_message(&mut seq, &topics);
- //peer 1 delivers m1
+ // peer 1 delivers m1
deliver_message(&mut gs, 1, m1.clone());
- //peer 0 delivers two message
+ // peer 0 delivers two message
deliver_message(&mut gs, 0, random_message(&mut seq, &topics));
deliver_message(&mut gs, 0, random_message(&mut seq, &topics));
expected_message_deliveries += 2.0;
sleep(Duration::from_millis(60));
- //peer 1 delivers m2
+ // peer 1 delivers m2
deliver_message(&mut gs, 1, m2.clone());
sleep(Duration::from_millis(70));
- //peer 0 delivers m1 and m2 only m2 gets counted
+ // peer 0 delivers m1 and m2 only m2 gets counted
deliver_message(&mut gs, 0, m1);
deliver_message(&mut gs, 0, m2);
expected_message_deliveries += 1.0;
sleep(Duration::from_millis(900));
- //message deliveries penalties get activated, peer 0 has only delivered 3 messages and
+ // message deliveries penalties get activated, peer 0 has only delivered 3 messages and
// therefore gets a penalty
gs.peer_score.as_mut().unwrap().0.refresh_scores();
- expected_message_deliveries *= 0.9; //decay
+ expected_message_deliveries *= 0.9; // decay
assert_eq!(
gs.peer_score.as_ref().unwrap().0.score(&peers[0]),
@@ -3483,10 +3490,10 @@ fn test_scoring_p3() {
assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0);
- //apply 10 decays
+ // apply 10 decays
for _ in 0..10 {
gs.peer_score.as_mut().unwrap().0.refresh_scores();
- expected_message_deliveries *= 0.9; //decay
+ expected_message_deliveries *= 0.9; // decay
}
assert_eq!(
@@ -3505,8 +3512,8 @@ fn test_scoring_p3b() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let topic_params = TopicScoreParams {
- time_in_mesh_weight: 0.0, //deactivate time in mesh
- first_message_deliveries_weight: 0.0, //deactivate first time deliveries
+ time_in_mesh_weight: 0.0, // deactivate time in mesh
+ first_message_deliveries_weight: 0.0, // deactivate first time deliveries
mesh_message_deliveries_weight: -2.0,
mesh_message_deliveries_decay: 0.9,
mesh_message_deliveries_cap: 10.0,
@@ -3522,7 +3529,7 @@ fn test_scoring_p3b() {
peer_score_params.app_specific_weight = 1.0;
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with one peer
+ // build mesh with one peer
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -3540,49 +3547,49 @@ fn test_scoring_p3b() {
let mut expected_message_deliveries = 0.0;
- //add some positive score
+ // add some positive score
gs.peer_score
.as_mut()
.unwrap()
.0
.set_application_score(&peers[0], 100.0);
- //peer 0 delivers two message
+ // peer 0 delivers two message
deliver_message(&mut gs, 0, random_message(&mut seq, &topics));
deliver_message(&mut gs, 0, random_message(&mut seq, &topics));
expected_message_deliveries += 2.0;
sleep(Duration::from_millis(1050));
- //activation kicks in
+ // activation kicks in
gs.peer_score.as_mut().unwrap().0.refresh_scores();
- expected_message_deliveries *= 0.9; //decay
+ expected_message_deliveries *= 0.9; // decay
- //prune peer
+ // prune peer
gs.handle_prune(&peers[0], vec![(topics[0].clone(), vec![], None)]);
- //wait backoff
+ // wait backoff
sleep(Duration::from_millis(130));
- //regraft peer
+ // regraft peer
gs.handle_graft(&peers[0], topics.clone());
- //the score should now consider p3b
+ // the score should now consider p3b
let mut expected_b3 = (5f64 - expected_message_deliveries).powi(2);
assert_eq!(
gs.peer_score.as_ref().unwrap().0.score(&peers[0]),
100.0 + expected_b3 * -3.0 * 0.7
);
- //we can also add a new p3 to the score
+ // we can also add a new p3 to the score
- //peer 0 delivers one message
+ // peer 0 delivers one message
deliver_message(&mut gs, 0, random_message(&mut seq, &topics));
expected_message_deliveries += 1.0;
sleep(Duration::from_millis(1050));
gs.peer_score.as_mut().unwrap().0.refresh_scores();
- expected_message_deliveries *= 0.9; //decay
+ expected_message_deliveries *= 0.9; // decay
expected_b3 *= 0.95;
assert_eq!(
@@ -3601,10 +3608,14 @@ fn test_scoring_p4_valid_message() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let topic_params = TopicScoreParams {
- time_in_mesh_weight: 0.0, //deactivate time in mesh
- first_message_deliveries_weight: 0.0, //deactivate first time deliveries
- mesh_message_deliveries_weight: 0.0, //deactivate message deliveries
- mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties
+ // deactivate time in mesh
+ time_in_mesh_weight: 0.0,
+ // deactivate first time deliveries
+ first_message_deliveries_weight: 0.0,
+ // deactivate message deliveries
+ mesh_message_deliveries_weight: 0.0,
+ // deactivate mesh failure penalties
+ mesh_failure_penalty_weight: 0.0,
invalid_message_deliveries_weight: -2.0,
invalid_message_deliveries_decay: 0.9,
topic_weight: 0.7,
@@ -3614,7 +3625,7 @@ fn test_scoring_p4_valid_message() {
peer_score_params.app_specific_weight = 1.0;
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with two peers
+ // build mesh with two peers
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -3630,7 +3641,7 @@ fn test_scoring_p4_valid_message() {
gs.handle_received_message(msg, &peers[index]);
};
- //peer 0 delivers valid message
+ // peer 0 delivers valid message
let m1 = random_message(&mut seq, &topics);
deliver_message(&mut gs, 0, m1.clone());
@@ -3639,7 +3650,7 @@ fn test_scoring_p4_valid_message() {
assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0);
- //message m1 gets validated
+ // message m1 gets validated
gs.report_message_validation_result(
&config.message_id(message1),
&peers[0],
@@ -3659,10 +3670,14 @@ fn test_scoring_p4_invalid_signature() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let topic_params = TopicScoreParams {
- time_in_mesh_weight: 0.0, //deactivate time in mesh
- first_message_deliveries_weight: 0.0, //deactivate first time deliveries
- mesh_message_deliveries_weight: 0.0, //deactivate message deliveries
- mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties
+ // deactivate time in mesh
+ time_in_mesh_weight: 0.0,
+ // deactivate first time deliveries
+ first_message_deliveries_weight: 0.0,
+ // deactivate message deliveries
+ mesh_message_deliveries_weight: 0.0,
+ // deactivate mesh failure penalties
+ mesh_failure_penalty_weight: 0.0,
invalid_message_deliveries_weight: -2.0,
invalid_message_deliveries_decay: 0.9,
topic_weight: 0.7,
@@ -3672,7 +3687,7 @@ fn test_scoring_p4_invalid_signature() {
peer_score_params.app_specific_weight = 1.0;
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with one peer
+ // build mesh with one peer
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -3685,7 +3700,7 @@ fn test_scoring_p4_invalid_signature() {
let mut seq = 0;
- //peer 0 delivers message with invalid signature
+ // peer 0 delivers message with invalid signature
let m = random_message(&mut seq, &topics);
gs.on_connection_handler_event(
@@ -3717,10 +3732,14 @@ fn test_scoring_p4_message_from_self() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let topic_params = TopicScoreParams {
- time_in_mesh_weight: 0.0, //deactivate time in mesh
- first_message_deliveries_weight: 0.0, //deactivate first time deliveries
- mesh_message_deliveries_weight: 0.0, //deactivate message deliveries
- mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties
+ // deactivate time in mesh
+ time_in_mesh_weight: 0.0,
+ // deactivate first time deliveries
+ first_message_deliveries_weight: 0.0,
+ // deactivate message deliveries
+ mesh_message_deliveries_weight: 0.0,
+ // deactivate mesh failure penalties
+ mesh_failure_penalty_weight: 0.0,
invalid_message_deliveries_weight: -2.0,
invalid_message_deliveries_decay: 0.9,
topic_weight: 0.7,
@@ -3730,7 +3749,7 @@ fn test_scoring_p4_message_from_self() {
peer_score_params.app_specific_weight = 1.0;
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with two peers
+ // build mesh with two peers
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -3746,7 +3765,7 @@ fn test_scoring_p4_message_from_self() {
gs.handle_received_message(msg, &peers[index]);
};
- //peer 0 delivers invalid message from self
+ // peer 0 delivers invalid message from self
let mut m = random_message(&mut seq, &topics);
m.source = Some(*gs.publish_config.get_own_id().unwrap());
@@ -3767,10 +3786,14 @@ fn test_scoring_p4_ignored_message() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let topic_params = TopicScoreParams {
- time_in_mesh_weight: 0.0, //deactivate time in mesh
- first_message_deliveries_weight: 0.0, //deactivate first time deliveries
- mesh_message_deliveries_weight: 0.0, //deactivate message deliveries
- mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties
+ // deactivate time in mesh
+ time_in_mesh_weight: 0.0,
+ // deactivate first time deliveries
+ first_message_deliveries_weight: 0.0,
+ // deactivate message deliveries
+ mesh_message_deliveries_weight: 0.0,
+ // deactivate mesh failure penalties
+ mesh_failure_penalty_weight: 0.0,
invalid_message_deliveries_weight: -2.0,
invalid_message_deliveries_decay: 0.9,
topic_weight: 0.7,
@@ -3780,7 +3803,7 @@ fn test_scoring_p4_ignored_message() {
peer_score_params.app_specific_weight = 1.0;
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with two peers
+ // build mesh with two peers
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -3796,7 +3819,7 @@ fn test_scoring_p4_ignored_message() {
gs.handle_received_message(msg, &peers[index]);
};
- //peer 0 delivers ignored message
+ // peer 0 delivers ignored message
let m1 = random_message(&mut seq, &topics);
deliver_message(&mut gs, 0, m1.clone());
@@ -3805,7 +3828,7 @@ fn test_scoring_p4_ignored_message() {
// Transform the inbound message
let message1 = &gs.data_transform.inbound_transform(m1).unwrap();
- //message m1 gets ignored
+ // message m1 gets ignored
gs.report_message_validation_result(
&config.message_id(message1),
&peers[0],
@@ -3825,10 +3848,14 @@ fn test_scoring_p4_application_invalidated_message() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let topic_params = TopicScoreParams {
- time_in_mesh_weight: 0.0, //deactivate time in mesh
- first_message_deliveries_weight: 0.0, //deactivate first time deliveries
- mesh_message_deliveries_weight: 0.0, //deactivate message deliveries
- mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties
+ // deactivate time in mesh
+ time_in_mesh_weight: 0.0,
+ // deactivate first time deliveries
+ first_message_deliveries_weight: 0.0,
+ // deactivate message deliveries
+ mesh_message_deliveries_weight: 0.0,
+ // deactivate mesh failure penalties
+ mesh_failure_penalty_weight: 0.0,
invalid_message_deliveries_weight: -2.0,
invalid_message_deliveries_decay: 0.9,
topic_weight: 0.7,
@@ -3838,7 +3865,7 @@ fn test_scoring_p4_application_invalidated_message() {
peer_score_params.app_specific_weight = 1.0;
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with two peers
+ // build mesh with two peers
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -3854,7 +3881,7 @@ fn test_scoring_p4_application_invalidated_message() {
gs.handle_received_message(msg, &peers[index]);
};
- //peer 0 delivers invalid message
+ // peer 0 delivers invalid message
let m1 = random_message(&mut seq, &topics);
deliver_message(&mut gs, 0, m1.clone());
@@ -3863,7 +3890,7 @@ fn test_scoring_p4_application_invalidated_message() {
// Transform the inbound message
let message1 = &gs.data_transform.inbound_transform(m1).unwrap();
- //message m1 gets rejected
+ // message m1 gets rejected
gs.report_message_validation_result(
&config.message_id(message1),
&peers[0],
@@ -3886,10 +3913,14 @@ fn test_scoring_p4_application_invalid_message_from_two_peers() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let topic_params = TopicScoreParams {
- time_in_mesh_weight: 0.0, //deactivate time in mesh
- first_message_deliveries_weight: 0.0, //deactivate first time deliveries
- mesh_message_deliveries_weight: 0.0, //deactivate message deliveries
- mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties
+ // deactivate time in mesh
+ time_in_mesh_weight: 0.0,
+ // deactivate first time deliveries
+ first_message_deliveries_weight: 0.0,
+ // deactivate message deliveries
+ mesh_message_deliveries_weight: 0.0,
+ // deactivate mesh failure penalties
+ mesh_failure_penalty_weight: 0.0,
invalid_message_deliveries_weight: -2.0,
invalid_message_deliveries_decay: 0.9,
topic_weight: 0.7,
@@ -3899,7 +3930,7 @@ fn test_scoring_p4_application_invalid_message_from_two_peers() {
peer_score_params.app_specific_weight = 1.0;
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with two peers
+ // build mesh with two peers
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(2)
.topics(vec!["test".into()])
@@ -3915,20 +3946,20 @@ fn test_scoring_p4_application_invalid_message_from_two_peers() {
gs.handle_received_message(msg, &peers[index]);
};
- //peer 0 delivers invalid message
+ // peer 0 delivers invalid message
let m1 = random_message(&mut seq, &topics);
deliver_message(&mut gs, 0, m1.clone());
// Transform the inbound message
let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap();
- //peer 1 delivers same message
+ // peer 1 delivers same message
deliver_message(&mut gs, 1, m1);
assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0);
assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[1]), 0.0);
- //message m1 gets rejected
+ // message m1 gets rejected
gs.report_message_validation_result(
&config.message_id(message1),
&peers[0],
@@ -3955,10 +3986,14 @@ fn test_scoring_p4_three_application_invalid_messages() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let topic_params = TopicScoreParams {
- time_in_mesh_weight: 0.0, //deactivate time in mesh
- first_message_deliveries_weight: 0.0, //deactivate first time deliveries
- mesh_message_deliveries_weight: 0.0, //deactivate message deliveries
- mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties
+ // deactivate time in mesh
+ time_in_mesh_weight: 0.0,
+ // deactivate first time deliveries
+ first_message_deliveries_weight: 0.0,
+ // deactivate message deliveries
+ mesh_message_deliveries_weight: 0.0,
+ // deactivate mesh failure penalties
+ mesh_failure_penalty_weight: 0.0,
invalid_message_deliveries_weight: -2.0,
invalid_message_deliveries_decay: 0.9,
topic_weight: 0.7,
@@ -3968,7 +4003,7 @@ fn test_scoring_p4_three_application_invalid_messages() {
peer_score_params.app_specific_weight = 1.0;
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with one peer
+ // build mesh with one peer
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -3984,7 +4019,7 @@ fn test_scoring_p4_three_application_invalid_messages() {
gs.handle_received_message(msg, &peers[index]);
};
- //peer 0 delivers two invalid message
+ // peer 0 delivers two invalid message
let m1 = random_message(&mut seq, &topics);
let m2 = random_message(&mut seq, &topics);
let m3 = random_message(&mut seq, &topics);
@@ -4002,7 +4037,7 @@ fn test_scoring_p4_three_application_invalid_messages() {
assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0);
- //messages gets rejected
+ // messages gets rejected
gs.report_message_validation_result(
&config.message_id(message1),
&peers[0],
@@ -4021,7 +4056,7 @@ fn test_scoring_p4_three_application_invalid_messages() {
MessageAcceptance::Reject,
);
- //number of invalid messages gets squared
+ // number of invalid messages gets squared
assert_eq!(
gs.peer_score.as_ref().unwrap().0.score(&peers[0]),
9.0 * -2.0 * 0.7
@@ -4038,10 +4073,14 @@ fn test_scoring_p4_decay() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let topic_params = TopicScoreParams {
- time_in_mesh_weight: 0.0, //deactivate time in mesh
- first_message_deliveries_weight: 0.0, //deactivate first time deliveries
- mesh_message_deliveries_weight: 0.0, //deactivate message deliveries
- mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties
+ // deactivate time in mesh
+ time_in_mesh_weight: 0.0,
+ // deactivate first time deliveries
+ first_message_deliveries_weight: 0.0,
+ // deactivate message deliveries
+ mesh_message_deliveries_weight: 0.0,
+ // deactivate mesh failure penalties
+ mesh_failure_penalty_weight: 0.0,
invalid_message_deliveries_weight: -2.0,
invalid_message_deliveries_decay: 0.9,
topic_weight: 0.7,
@@ -4051,7 +4090,7 @@ fn test_scoring_p4_decay() {
peer_score_params.app_specific_weight = 1.0;
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with one peer
+ // build mesh with one peer
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -4067,7 +4106,7 @@ fn test_scoring_p4_decay() {
gs.handle_received_message(msg, &peers[index]);
};
- //peer 0 delivers invalid message
+ // peer 0 delivers invalid message
let m1 = random_message(&mut seq, &topics);
deliver_message(&mut gs, 0, m1.clone());
@@ -4075,7 +4114,7 @@ fn test_scoring_p4_decay() {
let message1 = &gs.data_transform.inbound_transform(m1).unwrap();
assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0);
- //message m1 gets rejected
+ // message m1 gets rejected
gs.report_message_validation_result(
&config.message_id(message1),
&peers[0],
@@ -4087,7 +4126,7 @@ fn test_scoring_p4_decay() {
-2.0 * 0.7
);
- //we decay
+ // we decay
gs.peer_score.as_mut().unwrap().0.refresh_scores();
// the number of invalids gets decayed to 0.9 and then squared in the score
@@ -4104,7 +4143,7 @@ fn test_scoring_p5() {
..PeerScoreParams::default()
};
- //build mesh with one peer
+ // build mesh with one peer
let (mut gs, peers, _, _) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -4141,7 +4180,7 @@ fn test_scoring_p6() {
.scoring(Some((peer_score_params, PeerScoreThresholds::default())))
.create_network();
- //create 5 peers with the same ip
+ // create 5 peers with the same ip
let addr = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 3));
let peers = vec![
add_peer_with_addr(&mut gs, &[], false, false, addr.clone()).0,
@@ -4151,7 +4190,7 @@ fn test_scoring_p6() {
add_peer_with_addr(&mut gs, &[], true, true, addr.clone()).0,
];
- //create 4 other peers with other ip
+ // create 4 other peers with other ip
let addr2 = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 4));
let others = vec![
add_peer_with_addr(&mut gs, &[], false, false, addr2.clone()).0,
@@ -4160,12 +4199,12 @@ fn test_scoring_p6() {
add_peer_with_addr(&mut gs, &[], true, false, addr2.clone()).0,
];
- //no penalties yet
+ // no penalties yet
for peer in peers.iter().chain(others.iter()) {
assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 0.0);
}
- //add additional connection for 3 others with addr
+ // add additional connection for 3 others with addr
for id in others.iter().take(3) {
gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished {
peer_id: *id,
@@ -4180,14 +4219,14 @@ fn test_scoring_p6() {
}));
}
- //penalties apply squared
+ // penalties apply squared
for peer in peers.iter().chain(others.iter().take(3)) {
assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0);
}
- //fourth other peer still no penalty
+ // fourth other peer still no penalty
assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&others[3]), 0.0);
- //add additional connection for 3 of the peers to addr2
+ // add additional connection for 3 of the peers to addr2
for peer in peers.iter().take(3) {
gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished {
peer_id: *peer,
@@ -4202,7 +4241,7 @@ fn test_scoring_p6() {
}));
}
- //double penalties for the first three of each
+ // double penalties for the first three of each
for peer in peers.iter().take(3).chain(others.iter().take(3)) {
assert_eq!(
gs.peer_score.as_ref().unwrap().0.score(peer),
@@ -4210,7 +4249,7 @@ fn test_scoring_p6() {
);
}
- //single penalties for the rest
+ // single penalties for the rest
for peer in peers.iter().skip(3) {
assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0);
}
@@ -4219,7 +4258,7 @@ fn test_scoring_p6() {
4.0 * -2.0
);
- //two times same ip doesn't count twice
+ // two times same ip doesn't count twice
gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished {
peer_id: peers[0],
connection_id: ConnectionId::new_unchecked(0),
@@ -4232,8 +4271,8 @@ fn test_scoring_p6() {
other_established: 2,
}));
- //nothing changed
- //double penalties for the first three of each
+ // nothing changed
+ // double penalties for the first three of each
for peer in peers.iter().take(3).chain(others.iter().take(3)) {
assert_eq!(
gs.peer_score.as_ref().unwrap().0.score(peer),
@@ -4241,7 +4280,7 @@ fn test_scoring_p6() {
);
}
- //single penalties for the rest
+ // single penalties for the rest
for peer in peers.iter().skip(3) {
assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0);
}
@@ -4274,7 +4313,7 @@ fn test_scoring_p7_grafts_before_backoff() {
.scoring(Some((peer_score_params, PeerScoreThresholds::default())))
.create_network();
- //remove peers from mesh and send prune to them => this adds a backoff for the peers
+ // remove peers from mesh and send prune to them => this adds a backoff for the peers
for peer in peers.iter().take(2) {
gs.mesh.get_mut(&topics[0]).unwrap().remove(peer);
gs.send_graft_prune(
@@ -4284,31 +4323,31 @@ fn test_scoring_p7_grafts_before_backoff() {
);
}
- //wait 50 millisecs
+ // wait 50 millisecs
sleep(Duration::from_millis(50));
- //first peer tries to graft
+ // first peer tries to graft
gs.handle_graft(&peers[0], vec![topics[0].clone()]);
- //double behaviour penalty for first peer (squared)
+ // double behaviour penalty for first peer (squared)
assert_eq!(
gs.peer_score.as_ref().unwrap().0.score(&peers[0]),
4.0 * -2.0
);
- //wait 100 millisecs
+ // wait 100 millisecs
sleep(Duration::from_millis(100));
- //second peer tries to graft
+ // second peer tries to graft
gs.handle_graft(&peers[1], vec![topics[0].clone()]);
- //single behaviour penalty for second peer
+ // single behaviour penalty for second peer
assert_eq!(
gs.peer_score.as_ref().unwrap().0.score(&peers[1]),
1.0 * -2.0
);
- //test decay
+ // test decay
gs.peer_score.as_mut().unwrap().0.refresh_scores();
assert_eq!(
@@ -4327,7 +4366,7 @@ fn test_opportunistic_grafting() {
.mesh_n_low(3)
.mesh_n(5)
.mesh_n_high(7)
- .mesh_outbound_min(0) //deactivate outbound handling
+ .mesh_outbound_min(0) // deactivate outbound handling
.opportunistic_graft_ticks(2)
.opportunistic_graft_peers(2)
.build()
@@ -4351,30 +4390,30 @@ fn test_opportunistic_grafting() {
.scoring(Some((peer_score_params, thresholds)))
.create_network();
- //fill mesh with 5 peers
+ // fill mesh with 5 peers
for peer in &peers {
gs.handle_graft(peer, topics.clone());
}
- //add additional 5 peers
+ // add additional 5 peers
let others: Vec<_> = (0..5)
.map(|_| add_peer(&mut gs, &topics, false, false))
.collect();
- //currently mesh equals peers
+ // currently mesh equals peers
assert_eq!(gs.mesh[&topics[0]], peers.iter().cloned().collect());
- //give others high scores (but the first two have not high enough scores)
+ // give others high scores (but the first two have not high enough scores)
for (i, peer) in peers.iter().enumerate().take(5) {
gs.set_application_score(peer, 0.0 + i as f64);
}
- //set scores for peers in the mesh
+ // set scores for peers in the mesh
for (i, (peer, _receiver)) in others.iter().enumerate().take(5) {
gs.set_application_score(peer, 0.0 + i as f64);
}
- //this gives a median of exactly 2.0 => should not apply opportunistic grafting
+ // this gives a median of exactly 2.0 => should not apply opportunistic grafting
gs.heartbeat();
gs.heartbeat();
@@ -4384,10 +4423,10 @@ fn test_opportunistic_grafting() {
"should not apply opportunistic grafting"
);
- //reduce middle score to 1.0 giving a median of 1.0
+ // reduce middle score to 1.0 giving a median of 1.0
gs.set_application_score(&peers[2], 1.0);
- //opportunistic grafting after two heartbeats
+ // opportunistic grafting after two heartbeats
gs.heartbeat();
assert_eq!(
@@ -4417,17 +4456,17 @@ fn test_opportunistic_grafting() {
#[test]
fn test_ignore_graft_from_unknown_topic() {
- //build gossipsub without subscribing to any topics
+ // build gossipsub without subscribing to any topics
let (mut gs, peers, receivers, _) = inject_nodes1()
.peer_no(1)
.topics(vec![])
.to_subscribe(false)
.create_network();
- //handle an incoming graft for some topic
+ // handle an incoming graft for some topic
gs.handle_graft(&peers[0], vec![Topic::new("test").hash()]);
- //assert that no prune got created
+ // assert that no prune got created
let (control_msgs, _) = count_control_msgs(receivers, |_, a| matches!(a, RpcOut::Prune { .. }));
assert_eq!(
control_msgs, 0,
@@ -4438,18 +4477,18 @@ fn test_ignore_graft_from_unknown_topic() {
#[test]
fn test_ignore_too_many_iwants_from_same_peer_for_same_message() {
let config = Config::default();
- //build gossipsub with full mesh
+ // build gossipsub with full mesh
let (mut gs, _, mut receivers, topics) = inject_nodes1()
.peer_no(config.mesh_n_high())
.topics(vec!["test".into()])
.to_subscribe(false)
.create_network();
- //add another peer not in the mesh
+ // add another peer not in the mesh
let (peer, receiver) = add_peer(&mut gs, &topics, false, false);
receivers.insert(peer, receiver);
- //receive a message
+ // receive a message
let mut seq = 0;
let m1 = random_message(&mut seq, &topics);
@@ -4460,10 +4499,10 @@ fn test_ignore_too_many_iwants_from_same_peer_for_same_message() {
gs.handle_received_message(m1, &PeerId::random());
- //clear events
+ // clear events
let receivers = flush_events(&mut gs, receivers);
- //the first gossip_retransimission many iwants return the valid message, all others are
+ // the first gossip_retransimission many iwants return the valid message, all others are
// ignored.
for _ in 0..(2 * config.gossip_retransimission() + 10) {
gs.handle_iwant(&peer, vec![id.clone()]);
@@ -4490,7 +4529,7 @@ fn test_ignore_too_many_ihaves() {
.max_ihave_messages(10)
.build()
.unwrap();
- //build gossipsub with full mesh
+ // build gossipsub with full mesh
let (mut gs, _, mut receivers, topics) = inject_nodes1()
.peer_no(config.mesh_n_high())
.topics(vec!["test".into()])
@@ -4498,15 +4537,15 @@ fn test_ignore_too_many_ihaves() {
.gs_config(config.clone())
.create_network();
- //add another peer not in the mesh
+ // add another peer not in the mesh
let (peer, receiver) = add_peer(&mut gs, &topics, false, false);
receivers.insert(peer, receiver);
- //peer has 20 messages
+ // peer has 20 messages
let mut seq = 0;
let messages: Vec<_> = (0..20).map(|_| random_message(&mut seq, &topics)).collect();
- //peer sends us one ihave for each message in order
+ // peer sends us one ihave for each message in order
for raw_message in &messages {
// Transform the inbound message
let message = &gs
@@ -4527,7 +4566,7 @@ fn test_ignore_too_many_ihaves() {
.map(|m| config.message_id(&m))
.collect();
- //we send iwant only for the first 10 messages
+ // we send iwant only for the first 10 messages
let (control_msgs, receivers) = count_control_msgs(receivers, |p, action| {
p == &peer
&& matches!(action, RpcOut::IWant(IWant { message_ids }) if message_ids.len() == 1 && first_ten.contains(&message_ids[0]))
@@ -4537,7 +4576,7 @@ fn test_ignore_too_many_ihaves() {
"exactly the first ten ihaves should be processed and one iwant for each created"
);
- //after a heartbeat everything is forgotten
+ // after a heartbeat everything is forgotten
gs.heartbeat();
for raw_message in messages[10..].iter() {
@@ -4553,7 +4592,7 @@ fn test_ignore_too_many_ihaves() {
);
}
- //we sent iwant for all 10 messages
+ // we sent iwant for all 10 messages
let (control_msgs, _) = count_control_msgs(receivers, |p, action| {
p == &peer
&& matches!(action, RpcOut::IWant(IWant { message_ids }) if message_ids.len() == 1)
@@ -4568,7 +4607,7 @@ fn test_ignore_too_many_messages_in_ihave() {
.max_ihave_length(10)
.build()
.unwrap();
- //build gossipsub with full mesh
+ // build gossipsub with full mesh
let (mut gs, _, mut receivers, topics) = inject_nodes1()
.peer_no(config.mesh_n_high())
.topics(vec!["test".into()])
@@ -4576,19 +4615,19 @@ fn test_ignore_too_many_messages_in_ihave() {
.gs_config(config.clone())
.create_network();
- //add another peer not in the mesh
+ // add another peer not in the mesh
let (peer, receiver) = add_peer(&mut gs, &topics, false, false);
receivers.insert(peer, receiver);
- //peer has 20 messages
+ // peer has 30 messages
let mut seq = 0;
- let message_ids: Vec<_> = (0..20)
+ let message_ids: Vec<_> = (0..30)
.map(|_| random_message(&mut seq, &topics))
.map(|msg| gs.data_transform.inbound_transform(msg).unwrap())
.map(|msg| config.message_id(&msg))
.collect();
- //peer sends us three ihaves
+ // peer sends us three ihaves
gs.handle_ihave(&peer, vec![(topics[0].clone(), message_ids[0..8].to_vec())]);
gs.handle_ihave(
&peer,
@@ -4601,7 +4640,7 @@ fn test_ignore_too_many_messages_in_ihave() {
let first_twelve: HashSet<_> = message_ids.iter().take(12).collect();
- //we send iwant only for the first 10 messages
+ // we send iwant only for the first 10 messages
let mut sum = 0;
let (control_msgs, receivers) = count_control_msgs(receivers, |p, rpc| match rpc {
RpcOut::IWant(IWant { message_ids }) => {
@@ -4620,14 +4659,14 @@ fn test_ignore_too_many_messages_in_ihave() {
assert_eq!(sum, 10, "exactly the first ten ihaves should be processed");
- //after a heartbeat everything is forgotten
+ // after a heartbeat everything is forgotten
gs.heartbeat();
gs.handle_ihave(
&peer,
- vec![(topics[0].clone(), message_ids[10..20].to_vec())],
+ vec![(topics[0].clone(), message_ids[20..30].to_vec())],
);
- //we sent 10 iwant messages ids via a IWANT rpc.
+ // we sent 10 iwant messages ids via a IWANT rpc.
let mut sum = 0;
let (control_msgs, _) = count_control_msgs(receivers, |p, rpc| match rpc {
RpcOut::IWant(IWant { message_ids }) => {
@@ -4649,7 +4688,7 @@ fn test_limit_number_of_message_ids_inside_ihave() {
.max_ihave_length(100)
.build()
.unwrap();
- //build gossipsub with full mesh
+ // build gossipsub with full mesh
let (mut gs, peers, mut receivers, topics) = inject_nodes1()
.peer_no(config.mesh_n_high())
.topics(vec!["test".into()])
@@ -4657,24 +4696,24 @@ fn test_limit_number_of_message_ids_inside_ihave() {
.gs_config(config)
.create_network();
- //graft to all peers to really fill the mesh with all the peers
+ // graft to all peers to really fill the mesh with all the peers
for peer in peers {
gs.handle_graft(&peer, topics.clone());
}
- //add two other peers not in the mesh
+ // add two other peers not in the mesh
let (p1, receiver1) = add_peer(&mut gs, &topics, false, false);
receivers.insert(p1, receiver1);
let (p2, receiver2) = add_peer(&mut gs, &topics, false, false);
receivers.insert(p2, receiver2);
- //receive 200 messages from another peer
+ // receive 200 messages from another peer
let mut seq = 0;
for _ in 0..200 {
gs.handle_received_message(random_message(&mut seq, &topics), &PeerId::random());
}
- //emit gossip
+ // emit gossip
gs.emit_gossip();
// both peers should have gotten 100 random ihave messages, to assert the randomness, we
@@ -4727,12 +4766,7 @@ fn test_limit_number_of_message_ids_inside_ihave() {
#[test]
fn test_iwant_penalties() {
- /*
- use tracing_subscriber::EnvFilter;
- let _ = tracing_subscriber::fmt()
- .with_env_filter(EnvFilter::from_default_env())
- .try_init();
- */
+ libp2p_test_utils::with_default_env_filter();
let config = ConfigBuilder::default()
.iwant_followup_time(Duration::from_secs(4))
.build()
@@ -4862,7 +4896,7 @@ fn test_publish_to_floodsub_peers_without_flood_publish() {
.gs_config(config)
.create_network();
- //add two floodsub peer, one explicit, one implicit
+ // add two floodsub peer, one explicit, one implicit
let (p1, receiver1) = add_peer_with_addr_and_kind(
&mut gs,
&topics,
@@ -4877,10 +4911,10 @@ fn test_publish_to_floodsub_peers_without_flood_publish() {
add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None);
receivers.insert(p2, receiver2);
- //p1 and p2 are not in the mesh
+ // p1 and p2 are not in the mesh
assert!(!gs.mesh[&topics[0]].contains(&p1) && !gs.mesh[&topics[0]].contains(&p2));
- //publish a message
+ // publish a message
let publish_data = vec![0; 42];
gs.publish(Topic::new("test"), publish_data).unwrap();
@@ -4921,7 +4955,7 @@ fn test_do_not_use_floodsub_in_fanout() {
let topic = Topic::new("test");
let topics = vec![topic.hash()];
- //add two floodsub peer, one explicit, one implicit
+ // add two floodsub peer, one explicit, one implicit
let (p1, receiver1) = add_peer_with_addr_and_kind(
&mut gs,
&topics,
@@ -4936,7 +4970,7 @@ fn test_do_not_use_floodsub_in_fanout() {
add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None);
receivers.insert(p2, receiver2);
- //publish a message
+ // publish a message
let publish_data = vec![0; 42];
gs.publish(Topic::new("test"), publish_data).unwrap();
@@ -4977,7 +5011,7 @@ fn test_dont_add_floodsub_peers_to_mesh_on_join() {
let topic = Topic::new("test");
let topics = vec![topic.hash()];
- //add two floodsub peer, one explicit, one implicit
+ // add two floodsub peer, one explicit, one implicit
let _p1 = add_peer_with_addr_and_kind(
&mut gs,
&topics,
@@ -5004,7 +5038,7 @@ fn test_dont_send_px_to_old_gossipsub_peers() {
.to_subscribe(false)
.create_network();
- //add an old gossipsub peer
+ // add an old gossipsub peer
let (p1, _receiver1) = add_peer_with_addr_and_kind(
&mut gs,
&topics,
@@ -5014,14 +5048,14 @@ fn test_dont_send_px_to_old_gossipsub_peers() {
Some(PeerKind::Gossipsub),
);
- //prune the peer
+ // prune the peer
gs.send_graft_prune(
HashMap::new(),
vec![(p1, topics.clone())].into_iter().collect(),
HashSet::new(),
);
- //check that prune does not contain px
+ // check that prune does not contain px
let (control_msgs, _) = count_control_msgs(receivers, |_, m| match m {
RpcOut::Prune(Prune { peers: px, .. }) => !px.is_empty(),
_ => false,
@@ -5031,14 +5065,14 @@ fn test_dont_send_px_to_old_gossipsub_peers() {
#[test]
fn test_dont_send_floodsub_peers_in_px() {
- //build mesh with one peer
+ // build mesh with one peer
let (mut gs, peers, receivers, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
.to_subscribe(true)
.create_network();
- //add two floodsub peers
+ // add two floodsub peers
let _p1 = add_peer_with_addr_and_kind(
&mut gs,
&topics,
@@ -5049,14 +5083,14 @@ fn test_dont_send_floodsub_peers_in_px() {
);
let _p2 = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None);
- //prune only mesh node
+ // prune only mesh node
gs.send_graft_prune(
HashMap::new(),
vec![(peers[0], topics.clone())].into_iter().collect(),
HashSet::new(),
);
- //check that px in prune message is empty
+ // check that px in prune message is empty
let (control_msgs, _) = count_control_msgs(receivers, |_, m| match m {
RpcOut::Prune(Prune { peers: px, .. }) => !px.is_empty(),
_ => false,
@@ -5072,7 +5106,7 @@ fn test_dont_add_floodsub_peers_to_mesh_in_heartbeat() {
.to_subscribe(false)
.create_network();
- //add two floodsub peer, one explicit, one implicit
+ // add two floodsub peer, one explicit, one implicit
let _p1 = add_peer_with_addr_and_kind(
&mut gs,
&topics,
@@ -5139,7 +5173,7 @@ fn test_subscribe_to_invalid_topic() {
#[test]
fn test_subscribe_and_graft_with_negative_score() {
- //simulate a communication between two gossipsub instances
+ // simulate a communication between two gossipsub instances
let (mut gs1, _, _, topic_hashes) = inject_nodes1()
.topics(vec!["test".into()])
.scoring(Some((
@@ -5157,12 +5191,12 @@ fn test_subscribe_and_graft_with_negative_score() {
let (p2, _receiver1) = add_peer(&mut gs1, &Vec::new(), true, false);
let (p1, _receiver2) = add_peer(&mut gs2, &topic_hashes, false, false);
- //add penalty to peer p2
+ // add penalty to peer p2
gs1.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1);
let original_score = gs1.peer_score.as_ref().unwrap().0.score(&p2);
- //subscribe to topic in gs2
+ // subscribe to topic in gs2
gs2.subscribe(&topic).unwrap();
let forward_messages_to_p1 = |gs1: &mut Behaviour<_, _>,
@@ -5191,17 +5225,17 @@ fn test_subscribe_and_graft_with_negative_score() {
new_receivers
};
- //forward the subscribe message
+ // forward the subscribe message
let receivers = forward_messages_to_p1(&mut gs1, p1, p2, connection_id, receivers);
- //heartbeats on both
+ // heartbeats on both
gs1.heartbeat();
gs2.heartbeat();
- //forward messages again
+ // forward messages again
forward_messages_to_p1(&mut gs1, p1, p2, connection_id, receivers);
- //nobody got penalized
+ // nobody got penalized
assert!(gs1.peer_score.as_ref().unwrap().0.score(&p2) >= original_score);
}
@@ -5237,6 +5271,236 @@ fn test_graft_without_subscribe() {
let _ = gs.unsubscribe(&Topic::new(topic));
}
+/// Test that a node sends IDONTWANT messages to the mesh peers
+/// that run Gossipsub v1.2.
+#[test]
+fn sends_idontwant() {
+ let (mut gs, peers, receivers, topic_hashes) = inject_nodes1()
+ .peer_no(5)
+ .topics(vec![String::from("topic1")])
+ .to_subscribe(true)
+ .gs_config(Config::default())
+ .explicit(1)
+ .peer_kind(PeerKind::Gossipsubv1_2)
+ .create_network();
+
+ let local_id = PeerId::random();
+
+ let message = RawMessage {
+ source: Some(peers[1]),
+ data: vec![12u8; 1024],
+ sequence_number: Some(0),
+ topic: topic_hashes[0].clone(),
+ signature: None,
+ key: None,
+ validated: true,
+ };
+ gs.handle_received_message(message.clone(), &local_id);
+ assert_eq!(
+ receivers
+ .into_iter()
+ .fold(0, |mut idontwants, (peer_id, c)| {
+ let non_priority = c.non_priority.get_ref();
+ while !non_priority.is_empty() {
+ if let Ok(RpcOut::IDontWant(_)) = non_priority.try_recv() {
+ assert_ne!(peer_id, peers[1]);
+ idontwants += 1;
+ }
+ }
+ idontwants
+ }),
+ 3,
+ "IDONTWANT was not sent"
+ );
+}
+
+#[test]
+fn doesnt_sends_idontwant_for_lower_message_size() {
+ let (mut gs, peers, receivers, topic_hashes) = inject_nodes1()
+ .peer_no(5)
+ .topics(vec![String::from("topic1")])
+ .to_subscribe(true)
+ .gs_config(Config::default())
+ .explicit(1)
+ .peer_kind(PeerKind::Gossipsubv1_2)
+ .create_network();
+
+ let local_id = PeerId::random();
+
+ let message = RawMessage {
+ source: Some(peers[1]),
+ data: vec![12],
+ sequence_number: Some(0),
+ topic: topic_hashes[0].clone(),
+ signature: None,
+ key: None,
+ validated: true,
+ };
+
+ gs.handle_received_message(message.clone(), &local_id);
+ assert_eq!(
+ receivers
+ .into_iter()
+ .fold(0, |mut idontwants, (peer_id, c)| {
+ let non_priority = c.non_priority.get_ref();
+ while !non_priority.is_empty() {
+ if let Ok(RpcOut::IDontWant(_)) = non_priority.try_recv() {
+ assert_ne!(peer_id, peers[1]);
+ idontwants += 1;
+ }
+ }
+ idontwants
+ }),
+ 0,
+ "IDONTWANT was sent"
+ );
+}
+
+/// Test that a node doesn't send IDONTWANT messages to the mesh peers
+/// that don't run Gossipsub v1.2.
+#[test]
+fn doesnt_send_idontwant() {
+ let (mut gs, peers, receivers, topic_hashes) = inject_nodes1()
+ .peer_no(5)
+ .topics(vec![String::from("topic1")])
+ .to_subscribe(true)
+ .gs_config(Config::default())
+ .explicit(1)
+ .peer_kind(PeerKind::Gossipsubv1_1)
+ .create_network();
+
+ let local_id = PeerId::random();
+
+ let message = RawMessage {
+ source: Some(peers[1]),
+ data: vec![12],
+ sequence_number: Some(0),
+ topic: topic_hashes[0].clone(),
+ signature: None,
+ key: None,
+ validated: true,
+ };
+ gs.handle_received_message(message.clone(), &local_id);
+ assert_eq!(
+ receivers
+ .into_iter()
+ .fold(0, |mut idontwants, (peer_id, c)| {
+ let non_priority = c.non_priority.get_ref();
+ while !non_priority.is_empty() {
+ if matches!(non_priority.try_recv(), Ok(RpcOut::IDontWant(_)) if peer_id != peers[1]) {
+ idontwants += 1;
+ }
+ }
+ idontwants
+ }),
+ 0,
+ "IDONTWANT were sent"
+ );
+}
+
+/// Test that a node doesn't forward a messages to the mesh peers
+/// that sent IDONTWANT.
+#[test]
+fn doesnt_forward_idontwant() {
+ let (mut gs, peers, receivers, topic_hashes) = inject_nodes1()
+ .peer_no(4)
+ .topics(vec![String::from("topic1")])
+ .to_subscribe(true)
+ .gs_config(Config::default())
+ .explicit(1)
+ .peer_kind(PeerKind::Gossipsubv1_2)
+ .create_network();
+
+ let local_id = PeerId::random();
+
+ let raw_message = RawMessage {
+ source: Some(peers[1]),
+ data: vec![12],
+ sequence_number: Some(0),
+ topic: topic_hashes[0].clone(),
+ signature: None,
+ key: None,
+ validated: true,
+ };
+ let message = gs
+ .data_transform
+ .inbound_transform(raw_message.clone())
+ .unwrap();
+ let message_id = gs.config.message_id(&message);
+ let peer = gs.connected_peers.get_mut(&peers[2]).unwrap();
+ peer.dont_send.insert(message_id, Instant::now());
+
+ gs.handle_received_message(raw_message.clone(), &local_id);
+ assert_eq!(
+ receivers.into_iter().fold(0, |mut fwds, (peer_id, c)| {
+ let non_priority = c.non_priority.get_ref();
+ while !non_priority.is_empty() {
+ if let Ok(RpcOut::Forward { .. }) = non_priority.try_recv() {
+ assert_ne!(peer_id, peers[2]);
+ fwds += 1;
+ }
+ }
+ fwds
+ }),
+ 2,
+ "IDONTWANT was not sent"
+ );
+}
+
+/// Test that a node parses an
+/// IDONTWANT message to the respective peer.
+#[test]
+fn parses_idontwant() {
+ let (mut gs, peers, _receivers, _topic_hashes) = inject_nodes1()
+ .peer_no(2)
+ .topics(vec![String::from("topic1")])
+ .to_subscribe(true)
+ .gs_config(Config::default())
+ .explicit(1)
+ .peer_kind(PeerKind::Gossipsubv1_2)
+ .create_network();
+
+ let message_id = MessageId::new(&[0, 1, 2, 3]);
+ let rpc = Rpc {
+ messages: vec![],
+ subscriptions: vec![],
+ control_msgs: vec![ControlAction::IDontWant(IDontWant {
+ message_ids: vec![message_id.clone()],
+ })],
+ };
+ gs.on_connection_handler_event(
+ peers[1],
+ ConnectionId::new_unchecked(0),
+ HandlerEvent::Message {
+ rpc,
+ invalid_messages: vec![],
+ },
+ );
+ let peer = gs.connected_peers.get_mut(&peers[1]).unwrap();
+ assert!(peer.dont_send.get(&message_id).is_some());
+}
+
+/// Test that a node clears stale IDONTWANT messages.
+#[test]
+fn clear_stale_idontwant() {
+ let (mut gs, peers, _receivers, _topic_hashes) = inject_nodes1()
+ .peer_no(4)
+ .topics(vec![String::from("topic1")])
+ .to_subscribe(true)
+ .gs_config(Config::default())
+ .explicit(1)
+ .peer_kind(PeerKind::Gossipsubv1_2)
+ .create_network();
+
+ let peer = gs.connected_peers.get_mut(&peers[2]).unwrap();
+ peer.dont_send
+ .insert(MessageId::new(&[1, 2, 3, 4]), Instant::now());
+ std::thread::sleep(Duration::from_secs(3));
+ gs.heartbeat();
+ let peer = gs.connected_peers.get_mut(&peers[2]).unwrap();
+ assert!(peer.dont_send.is_empty());
+}
+
#[test]
fn test_all_queues_full() {
let gs_config = ConfigBuilder::default()
@@ -5260,6 +5524,7 @@ fn test_all_queues_full() {
connections: vec![ConnectionId::new_unchecked(0)],
topics: topics.clone(),
sender: Sender::new(2),
+ dont_send: LinkedHashMap::new(),
},
);
@@ -5294,6 +5559,7 @@ fn test_slow_peer_returns_failed_publish() {
connections: vec![ConnectionId::new_unchecked(0)],
topics: topics.clone(),
sender: Sender::new(2),
+ dont_send: LinkedHashMap::new(),
},
);
let peer_id = PeerId::random();
@@ -5305,6 +5571,7 @@ fn test_slow_peer_returns_failed_publish() {
connections: vec![ConnectionId::new_unchecked(0)],
topics: topics.clone(),
sender: Sender::new(gs.config.connection_handler_queue_len()),
+ dont_send: LinkedHashMap::new(),
},
);
@@ -5357,7 +5624,6 @@ fn test_slow_peer_returns_failed_ihave_handling() {
topics.insert(topic_hash.clone());
let slow_peer_id = PeerId::random();
- peers.push(slow_peer_id);
gs.connected_peers.insert(
slow_peer_id,
PeerConnections {
@@ -5365,6 +5631,7 @@ fn test_slow_peer_returns_failed_ihave_handling() {
connections: vec![ConnectionId::new_unchecked(0)],
topics: topics.clone(),
sender: Sender::new(2),
+ dont_send: LinkedHashMap::new(),
},
);
peers.push(slow_peer_id);
@@ -5380,9 +5647,11 @@ fn test_slow_peer_returns_failed_ihave_handling() {
connections: vec![ConnectionId::new_unchecked(0)],
topics: topics.clone(),
sender: Sender::new(gs.config.connection_handler_queue_len()),
+ dont_send: LinkedHashMap::new(),
},
);
+ // First message.
let publish_data = vec![1; 59];
let transformed = gs
.data_transform
@@ -5402,6 +5671,22 @@ fn test_slow_peer_returns_failed_ihave_handling() {
&slow_peer_id,
vec![(topic_hash.clone(), vec![msg_id.clone()])],
);
+
+ // Second message.
+ let publish_data = vec![2; 59];
+ let transformed = gs
+ .data_transform
+ .outbound_transform(&topic_hash, publish_data.clone())
+ .unwrap();
+ let raw_message = gs
+ .build_raw_message(topic_hash.clone(), transformed)
+ .unwrap();
+ let msg_id = gs.config.message_id(&Message {
+ source: raw_message.source,
+ data: publish_data,
+ sequence_number: raw_message.sequence_number,
+ topic: raw_message.topic.clone(),
+ });
gs.handle_ihave(&slow_peer_id, vec![(topic_hash, vec![msg_id.clone()])]);
gs.heartbeat();
@@ -5458,6 +5743,7 @@ fn test_slow_peer_returns_failed_iwant_handling() {
connections: vec![ConnectionId::new_unchecked(0)],
topics: topics.clone(),
sender: Sender::new(2),
+ dont_send: LinkedHashMap::new(),
},
);
peers.push(slow_peer_id);
@@ -5473,6 +5759,7 @@ fn test_slow_peer_returns_failed_iwant_handling() {
connections: vec![ConnectionId::new_unchecked(0)],
topics: topics.clone(),
sender: Sender::new(gs.config.connection_handler_queue_len()),
+ dont_send: LinkedHashMap::new(),
},
);
@@ -5548,6 +5835,7 @@ fn test_slow_peer_returns_failed_forward() {
connections: vec![ConnectionId::new_unchecked(0)],
topics: topics.clone(),
sender: Sender::new(2),
+ dont_send: LinkedHashMap::new(),
},
);
peers.push(slow_peer_id);
@@ -5563,6 +5851,7 @@ fn test_slow_peer_returns_failed_forward() {
connections: vec![ConnectionId::new_unchecked(0)],
topics: topics.clone(),
sender: Sender::new(gs.config.connection_handler_queue_len()),
+ dont_send: LinkedHashMap::new(),
},
);
@@ -5643,6 +5932,7 @@ fn test_slow_peer_is_downscored_on_publish() {
connections: vec![ConnectionId::new_unchecked(0)],
topics: topics.clone(),
sender: Sender::new(2),
+ dont_send: LinkedHashMap::new(),
},
);
gs.peer_score.as_mut().unwrap().0.add_peer(slow_peer_id);
@@ -5655,6 +5945,7 @@ fn test_slow_peer_is_downscored_on_publish() {
connections: vec![ConnectionId::new_unchecked(0)],
topics: topics.clone(),
sender: Sender::new(gs.config.connection_handler_queue_len()),
+ dont_send: LinkedHashMap::new(),
},
);
diff --git a/protocols/gossipsub/src/config.rs b/protocols/gossipsub/src/config.rs
index 6e7861bae10..3b0eeafcbb6 100644
--- a/protocols/gossipsub/src/config.rs
+++ b/protocols/gossipsub/src/config.rs
@@ -18,22 +18,22 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use std::borrow::Cow;
-use std::sync::Arc;
-use std::time::Duration;
-
-use crate::error::ConfigBuilderError;
-use crate::protocol::{ProtocolConfig, ProtocolId, FLOODSUB_PROTOCOL};
-use crate::types::{Message, MessageId, PeerKind};
+use std::{borrow::Cow, sync::Arc, time::Duration};
use libp2p_identity::PeerId;
use libp2p_swarm::StreamProtocol;
+use crate::{
+ error::ConfigBuilderError,
+ protocol::{ProtocolConfig, ProtocolId, FLOODSUB_PROTOCOL},
+ types::{Message, MessageId, PeerKind},
+};
+
/// The types of message validation that can be employed by gossipsub.
#[derive(Debug, Clone)]
pub enum ValidationMode {
- /// This is the default setting. This requires the message author to be a valid [`PeerId`] and to
- /// be present as well as the sequence number. All messages must have valid signatures.
+ /// This is the default setting. This requires the message author to be a valid [`PeerId`] and
+ /// to be present as well as the sequence number. All messages must have valid signatures.
///
/// NOTE: This setting will reject messages from nodes using
/// [`crate::behaviour::MessageAuthenticity::Anonymous`] and all messages that do not have
@@ -98,6 +98,8 @@ pub struct Config {
connection_handler_queue_len: usize,
connection_handler_publish_duration: Duration,
connection_handler_forward_duration: Duration,
+ idontwant_message_size_threshold: usize,
+ idontwant_on_publish: bool,
}
impl Config {
@@ -134,8 +136,8 @@ impl Config {
/// Affects how peers are selected when pruning a mesh due to over subscription.
///
- /// At least `retain_scores` of the retained peers will be high-scoring, while the remainder are
- /// chosen randomly (D_score in the spec, default is 4).
+ /// At least `retain_scores` of the retained peers will be high-scoring, while the remainder
+ /// are chosen randomly (D_score in the spec, default is 4).
pub fn retain_scores(&self) -> usize {
self.retain_scores
}
@@ -371,6 +373,23 @@ impl Config {
pub fn forward_queue_duration(&self) -> Duration {
self.connection_handler_forward_duration
}
+
+ /// The message size threshold for which IDONTWANT messages are sent.
+ /// Sending IDONTWANT messages for small messages can have a negative effect to the overall
+ /// traffic and CPU load. This acts as a lower bound cutoff for the message size to which
+ /// IDONTWANT won't be sent to peers. Only works if the peers support Gossipsub1.2
+ /// (see )
+ /// default is 1kB
+ pub fn idontwant_message_size_threshold(&self) -> usize {
+ self.idontwant_message_size_threshold
+ }
+
+ /// Send IDONTWANT messages after publishing message on gossip. This is an optimisation
+ /// to avoid bandwidth consumption by downloading the published message over gossip.
+ /// By default it is false.
+ pub fn idontwant_on_publish(&self) -> bool {
+ self.idontwant_on_publish
+ }
}
impl Default for Config {
@@ -423,7 +442,9 @@ impl Default for ConfigBuilder {
}),
allow_self_origin: false,
do_px: false,
- prune_peers: 0, // NOTE: Increasing this currently has little effect until Signed records are implemented.
+ // NOTE: Increasing this currently has little effect until Signed
+ // records are implemented.
+ prune_peers: 0,
prune_backoff: Duration::from_secs(60),
unsubscribe_backoff: Duration::from_secs(10),
backoff_slack: 1,
@@ -441,6 +462,8 @@ impl Default for ConfigBuilder {
connection_handler_queue_len: 5000,
connection_handler_publish_duration: Duration::from_secs(5),
connection_handler_forward_duration: Duration::from_secs(1),
+ idontwant_message_size_threshold: 1000,
+ idontwant_on_publish: false,
},
invalid_protocol: false,
}
@@ -457,7 +480,8 @@ impl From for ConfigBuilder {
}
impl ConfigBuilder {
- /// The protocol id prefix to negotiate this protocol (default is `/meshsub/1.1.0` and `/meshsub/1.0.0`).
+ /// The protocol id prefix to negotiate this protocol (default is `/meshsub/1.1.0` and
+ /// `/meshsub/1.0.0`).
pub fn protocol_id_prefix(
&mut self,
protocol_id_prefix: impl Into>,
@@ -547,8 +571,8 @@ impl ConfigBuilder {
/// Affects how peers are selected when pruning a mesh due to over subscription.
///
- /// At least [`Self::retain_scores`] of the retained peers will be high-scoring, while the remainder are
- /// chosen randomly (D_score in the spec, default is 4).
+ /// At least [`Self::retain_scores`] of the retained peers will be high-scoring, while the
+ /// remainder are chosen randomly (D_score in the spec, default is 4).
pub fn retain_scores(&mut self, retain_scores: usize) -> &mut Self {
self.config.retain_scores = retain_scores;
self
@@ -826,6 +850,25 @@ impl ConfigBuilder {
self
}
+ /// The message size threshold for which IDONTWANT messages are sent.
+ /// Sending IDONTWANT messages for small messages can have a negative effect to the overall
+ /// traffic and CPU load. This acts as a lower bound cutoff for the message size to which
+ /// IDONTWANT won't be sent to peers. Only works if the peers support Gossipsub1.2
+ /// (see )
+ /// default is 1kB
+ pub fn idontwant_message_size_threshold(&mut self, size: usize) -> &mut Self {
+ self.config.idontwant_message_size_threshold = size;
+ self
+ }
+
+ /// Send IDONTWANT messages after publishing message on gossip. This is an optimisation
+ /// to avoid bandwidth consumption by downloading the published message over gossip.
+ /// By default it is false.
+ pub fn idontwant_on_publish(&mut self, idontwant_on_publish: bool) -> &mut Self {
+ self.config.idontwant_on_publish = idontwant_on_publish;
+ self
+ }
+
/// Constructs a [`Config`] from the given configuration and validates the settings.
pub fn build(&self) -> Result {
// check all constraints on config
@@ -896,18 +939,26 @@ impl std::fmt::Debug for Config {
"published_message_ids_cache_time",
&self.published_message_ids_cache_time,
);
+ let _ = builder.field(
+ "idontwant_message_size_threhold",
+ &self.idontwant_message_size_threshold,
+ );
+ let _ = builder.field("idontwant_on_publish", &self.idontwant_on_publish);
builder.finish()
}
}
#[cfg(test)]
mod test {
- use super::*;
- use crate::topic::IdentityHash;
- use crate::Topic;
+ use std::{
+ collections::hash_map::DefaultHasher,
+ hash::{Hash, Hasher},
+ };
+
use libp2p_core::UpgradeInfo;
- use std::collections::hash_map::DefaultHasher;
- use std::hash::{Hash, Hasher};
+
+ use super::*;
+ use crate::{topic::IdentityHash, Topic};
#[test]
fn create_config_with_message_id_as_plain_function() {
diff --git a/protocols/gossipsub/src/error.rs b/protocols/gossipsub/src/error.rs
index 047d50f2338..eae4c51214e 100644
--- a/protocols/gossipsub/src/error.rs
+++ b/protocols/gossipsub/src/error.rs
@@ -36,8 +36,8 @@ pub enum PublishError {
MessageTooLarge,
/// The compression algorithm failed.
TransformFailed(std::io::Error),
- /// Messages could not be sent because the queues for all peers were full. The usize represents the
- /// number of peers that were attempted.
+ /// Messages could not be sent because the queues for all peers were full. The usize represents
+ /// the number of peers that were attempted.
AllQueuesFull(usize),
}
diff --git a/protocols/gossipsub/src/generated/gossipsub/pb.rs b/protocols/gossipsub/src/generated/gossipsub/pb.rs
index 9a074fd61fc..24ac80d2755 100644
--- a/protocols/gossipsub/src/generated/gossipsub/pb.rs
+++ b/protocols/gossipsub/src/generated/gossipsub/pb.rs
@@ -154,6 +154,7 @@ pub struct ControlMessage {
pub iwant: Vec,
pub graft: Vec,
pub prune: Vec,
+ pub idontwant: Vec,
}
impl<'a> MessageRead<'a> for ControlMessage {
@@ -165,6 +166,7 @@ impl<'a> MessageRead<'a> for ControlMessage {
Ok(18) => msg.iwant.push(r.read_message::(bytes)?),
Ok(26) => msg.graft.push(r.read_message::(bytes)?),
Ok(34) => msg.prune.push(r.read_message::(bytes)?),
+ Ok(42) => msg.idontwant.push(r.read_message::(bytes)?),
Ok(t) => { r.read_unknown(bytes, t)?; }
Err(e) => return Err(e),
}
@@ -180,6 +182,7 @@ impl MessageWrite for ControlMessage {
+ self.iwant.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::()
+ self.graft.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::()
+ self.prune.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::()
+ + self.idontwant.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::()
}
fn write_message(&self, w: &mut Writer) -> Result<()> {
@@ -187,6 +190,7 @@ impl MessageWrite for ControlMessage {
for s in &self.iwant { w.write_with_tag(18, |w| w.write_message(s))?; }
for s in &self.graft { w.write_with_tag(26, |w| w.write_message(s))?; }
for s in &self.prune { w.write_with_tag(34, |w| w.write_message(s))?; }
+ for s in &self.idontwant { w.write_with_tag(42, |w| w.write_message(s))?; }
Ok(())
}
}
@@ -331,6 +335,38 @@ impl MessageWrite for ControlPrune {
}
}
+#[allow(clippy::derive_partial_eq_without_eq)]
+#[derive(Debug, Default, PartialEq, Clone)]
+pub struct ControlIDontWant {
+ pub message_ids: Vec>,
+}
+
+impl<'a> MessageRead<'a> for ControlIDontWant {
+ fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result {
+ let mut msg = Self::default();
+ while !r.is_eof() {
+ match r.next_tag(bytes) {
+ Ok(10) => msg.message_ids.push(r.read_bytes(bytes)?.to_owned()),
+ Ok(t) => { r.read_unknown(bytes, t)?; }
+ Err(e) => return Err(e),
+ }
+ }
+ Ok(msg)
+ }
+}
+
+impl MessageWrite for ControlIDontWant {
+ fn get_size(&self) -> usize {
+ 0
+ + self.message_ids.iter().map(|s| 1 + sizeof_len((s).len())).sum::()
+ }
+
+ fn write_message(&self, w: &mut Writer) -> Result<()> {
+ for s in &self.message_ids { w.write_with_tag(10, |w| w.write_bytes(&**s))?; }
+ Ok(())
+ }
+}
+
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Debug, Default, PartialEq, Clone)]
pub struct PeerInfo {
diff --git a/protocols/gossipsub/src/generated/rpc.proto b/protocols/gossipsub/src/generated/rpc.proto
index 2ce12f3f37f..fe4d3bc9366 100644
--- a/protocols/gossipsub/src/generated/rpc.proto
+++ b/protocols/gossipsub/src/generated/rpc.proto
@@ -19,8 +19,8 @@ message Message {
optional bytes data = 2;
optional bytes seqno = 3;
required string topic = 4;
- optional bytes signature = 5;
- optional bytes key = 6;
+ optional bytes signature = 5;
+ optional bytes key = 6;
}
message ControlMessage {
@@ -28,6 +28,7 @@ message ControlMessage {
repeated ControlIWant iwant = 2;
repeated ControlGraft graft = 3;
repeated ControlPrune prune = 4;
+ repeated ControlIDontWant idontwant = 5;
}
message ControlIHave {
@@ -49,6 +50,10 @@ message ControlPrune {
optional uint64 backoff = 3; // gossipsub v1.1 backoff time (in seconds)
}
+message ControlIDontWant {
+ repeated bytes message_ids = 1;
+}
+
message PeerInfo {
optional bytes peer_id = 1;
optional bytes signed_peer_record = 2;
diff --git a/protocols/gossipsub/src/gossip_promises.rs b/protocols/gossipsub/src/gossip_promises.rs
index bdf58b74fc2..284ba7cab01 100644
--- a/protocols/gossipsub/src/gossip_promises.rs
+++ b/protocols/gossipsub/src/gossip_promises.rs
@@ -18,13 +18,13 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::peer_score::RejectReason;
-use crate::MessageId;
-use crate::ValidationError;
-use libp2p_identity::PeerId;
use std::collections::HashMap;
+
+use libp2p_identity::PeerId;
use web_time::Instant;
+use crate::{peer_score::RejectReason, MessageId, ValidationError};
+
/// Tracks recently sent `IWANT` messages and checks if peers respond to them.
#[derive(Default)]
pub(crate) struct GossipPromises {
@@ -41,6 +41,14 @@ impl GossipPromises {
self.promises.contains_key(message)
}
+ /// Get the peers we sent IWANT the input message id.
+ pub(crate) fn peers_for_message(&self, message_id: &MessageId) -> Vec {
+ self.promises
+ .get(message_id)
+ .map(|peers| peers.keys().copied().collect())
+ .unwrap_or_default()
+ }
+
/// Track a promise to deliver a message from a list of [`MessageId`]s we are requesting.
pub(crate) fn add_promise(&mut self, peer: PeerId, messages: &[MessageId], expires: Instant) {
for message_id in messages {
diff --git a/protocols/gossipsub/src/handler.rs b/protocols/gossipsub/src/handler.rs
index 5f9669c02c2..f93e993a854 100644
--- a/protocols/gossipsub/src/handler.rs
+++ b/protocols/gossipsub/src/handler.rs
@@ -18,27 +18,31 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::protocol::{GossipsubCodec, ProtocolConfig};
-use crate::rpc::Receiver;
-use crate::rpc_proto::proto;
-use crate::types::{PeerKind, RawMessage, Rpc, RpcOut};
-use crate::ValidationError;
-use asynchronous_codec::Framed;
-use futures::future::Either;
-use futures::prelude::*;
-use futures::StreamExt;
-use libp2p_core::upgrade::DeniedUpgrade;
-use libp2p_swarm::handler::{
- ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError,
- FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol,
-};
-use libp2p_swarm::Stream;
use std::{
pin::Pin,
task::{Context, Poll},
};
+
+use asynchronous_codec::Framed;
+use futures::{future::Either, prelude::*, StreamExt};
+use libp2p_core::upgrade::DeniedUpgrade;
+use libp2p_swarm::{
+ handler::{
+ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError,
+ FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol,
+ },
+ Stream,
+};
use web_time::Instant;
+use crate::{
+ protocol::{GossipsubCodec, ProtocolConfig},
+ rpc::Receiver,
+ rpc_proto::proto,
+ types::{PeerKind, RawMessage, Rpc, RpcOut},
+ ValidationError,
+};
+
/// The event emitted by the Handler. This informs the behaviour of various events created
/// by the handler.
#[derive(Debug)]
@@ -111,7 +115,6 @@ pub struct EnabledHandler {
peer_kind: Option,
/// Keeps track on whether we have sent the peer kind to the behaviour.
- //
// NOTE: Use this flag rather than checking the substream count each poll.
peer_kind_sent: bool,
@@ -195,7 +198,6 @@ impl EnabledHandler {
&mut self,
FullyNegotiatedOutbound { protocol, .. }: FullyNegotiatedOutbound<
::OutboundProtocol,
- ::OutboundOpenInfo,
>,
) {
let (substream, peer_kind) = protocol;
@@ -218,7 +220,7 @@ impl EnabledHandler {
) -> Poll<
ConnectionHandlerEvent<
::OutboundProtocol,
- ::OutboundOpenInfo,
+ (),
::ToBehaviour,
>,
> {
@@ -226,7 +228,7 @@ impl EnabledHandler {
if let Some(peer_kind) = self.peer_kind.as_ref() {
self.peer_kind_sent = true;
return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(
- HandlerEvent::PeerKind(peer_kind.clone()),
+ HandlerEvent::PeerKind(*peer_kind),
));
}
}
@@ -424,7 +426,7 @@ impl ConnectionHandler for Handler {
type OutboundOpenInfo = ();
type OutboundProtocol = ProtocolConfig;
- fn listen_protocol(&self) -> SubstreamProtocol {
+ fn listen_protocol(&self) -> SubstreamProtocol {
match self {
Handler::Enabled(handler) => {
SubstreamProtocol::new(either::Either::Left(handler.listen_protocol.clone()), ())
@@ -459,9 +461,7 @@ impl ConnectionHandler for Handler {
fn poll(
&mut self,
cx: &mut Context<'_>,
- ) -> Poll<
- ConnectionHandlerEvent,
- > {
+ ) -> Poll