diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 1a531e3646c..40131a47a17 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -9,7 +9,7 @@ body: - type: textarea attributes: label: Summary - description: Please provide a short summary of the bug, along with any information you feel relevant to replicate the bug. + description: Please provide a short summary of the bug, along with any information you feel is relevant to replicate the bug. validations: required: true - type: textarea @@ -34,7 +34,7 @@ body: - type: textarea attributes: label: Possible Solution - description: Suggest a fix/reason for the bug, or ideas how to implement the addition or change. + description: Suggest a fix/reason for the bug, or ideas on how to implement the addition or change. validations: required: false - type: textarea @@ -45,11 +45,11 @@ body: required: false - type: dropdown attributes: - label: Would you like to work on fixing this bug ? + label: Would you like to work on fixing this bug? description: Any contribution towards fixing the bug is greatly appreciated. We are more than happy to provide help on the process. options: - "Yes" - "No" - Maybe validations: - required: true \ No newline at end of file + required: true diff --git a/.github/ISSUE_TEMPLATE/enhancement.yml b/.github/ISSUE_TEMPLATE/enhancement.yml index ed7aeb644b3..05330cf071c 100644 --- a/.github/ISSUE_TEMPLATE/enhancement.yml +++ b/.github/ISSUE_TEMPLATE/enhancement.yml @@ -21,11 +21,11 @@ body: required: true - type: dropdown attributes: - label: Are you planning to do it yourself in a pull request ? + label: Are you planning to do it yourself in a pull request? description: Any contribution is greatly appreciated. We are more than happy to provide help on the process. options: - "Yes" - "No" - Maybe validations: - required: true \ No newline at end of file + required: true diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index 6fa3e638be8..45e1da2cad0 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -32,11 +32,11 @@ body: required: false - type: dropdown attributes: - label: Are you planning to do it yourself in a pull request ? + label: Are you planning to do it yourself in a pull request? description: Any contribution is greatly appreciated. We are more than happy to provide help on the process. options: - "Yes" - "No" - Maybe validations: - required: true \ No newline at end of file + required: true diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 90e8b2cda53..24eb1b75b2c 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -6,7 +6,7 @@ This section will appear as the commit message after merging. Please craft it accordingly. For a quick primer on good commit messages, check out this blog post: https://cbea.ms/git-commit/ -Please include any relevant issues in here, for example: +Please include any relevant issues here, for example: Related https://github.com/libp2p/rust-libp2p/issues/ABCD. Fixes https://github.com/libp2p/rust-libp2p/issues/XYZ. @@ -15,7 +15,7 @@ Fixes https://github.com/libp2p/rust-libp2p/issues/XYZ. ## Notes & open questions ## Change checklist diff --git a/.github/workflows/cache-factory.yml b/.github/workflows/cache-factory.yml index 7623b56f450..939df1c4b8e 100644 --- a/.github/workflows/cache-factory.yml +++ b/.github/workflows/cache-factory.yml @@ -22,7 +22,7 @@ jobs: - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 + - uses: Swatinem/rust-cache@f0deed1e0edfc6a9be95417288c0e1099b1eeec3 # v2.7.7 with: shared-key: stable-cache diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index aad5b39aec7..2519f7e45f8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -40,7 +40,7 @@ jobs: - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 + - uses: Swatinem/rust-cache@f0deed1e0edfc6a9be95417288c0e1099b1eeec3 # v2.7.7 with: shared-key: stable-cache save-if: false @@ -67,7 +67,7 @@ jobs: echo "CRATE_VERSION=$CRATE_VERSION" >> $GITHUB_ENV - - name: Enforce version in `workspace.dependencies` matches latest version + - name: Enforce version in `workspace.dependencies` matches the latest version if: env.CRATE != 'libp2p' run: | SPECIFIED_VERSION=$(tq "workspace.dependencies.$CRATE.version" --file ./Cargo.toml) @@ -77,7 +77,7 @@ jobs: test "$CRATE_VERSION" = "$SPECIFIED_VERSION" || test "=$CRATE_VERSION" = "$SPECIFIED_VERSION" - - name: Enforce version in CHANGELOG.md matches version in manifest + - name: Enforce version in CHANGELOG.md matches the version in manifest run: | MANIFEST_PATH=$(cargo metadata --format-version=1 --no-deps | jq -e -r '.packages[] | select(.name == "'"$CRATE"'") | .manifest_path') DIR_TO_CRATE=$(dirname "$MANIFEST_PATH") @@ -133,7 +133,7 @@ jobs: os: ubuntu-latest - target: "wasm32-unknown-emscripten" os: ubuntu-latest - - target: "wasm32-wasi" + - target: "wasm32-wasip1" os: ubuntu-latest - target: "x86_64-apple-darwin" os: macos-latest @@ -149,7 +149,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 + - uses: Swatinem/rust-cache@f0deed1e0edfc6a9be95417288c0e1099b1eeec3 # v2.7.7 with: key: ${{ matrix.target }} save-if: ${{ github.ref == 'refs/heads/master' }} @@ -174,7 +174,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 + - uses: Swatinem/rust-cache@f0deed1e0edfc6a9be95417288c0e1099b1eeec3 # v2.7.7 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -195,7 +195,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 + - uses: Swatinem/rust-cache@f0deed1e0edfc6a9be95417288c0e1099b1eeec3 # v2.7.7 with: key: ${{ matrix.features }} save-if: ${{ github.ref == 'refs/heads/master' }} @@ -212,7 +212,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 + - uses: Swatinem/rust-cache@f0deed1e0edfc6a9be95417288c0e1099b1eeec3 # v2.7.7 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -225,7 +225,7 @@ jobs: fail-fast: false matrix: rust-version: [ - 1.80.0, # current stable + 1.83.0, # current stable beta, ] steps: @@ -238,7 +238,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 + - uses: Swatinem/rust-cache@f0deed1e0edfc6a9be95417288c0e1099b1eeec3 # v2.7.7 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -254,7 +254,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 + - uses: Swatinem/rust-cache@f0deed1e0edfc6a9be95417288c0e1099b1eeec3 # v2.7.7 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -273,7 +273,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 + - uses: Swatinem/rust-cache@f0deed1e0edfc6a9be95417288c0e1099b1eeec3 # v2.7.7 with: shared-key: stable-cache save-if: false @@ -317,7 +317,7 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable + - uses: dtolnay/rust-toolchain@nightly with: components: rustfmt @@ -365,7 +365,7 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 + - uses: Swatinem/rust-cache@f0deed1e0edfc6a9be95417288c0e1099b1eeec3 # v2.7.7 - run: cargo install --version 0.10.0 pb-rs --locked @@ -391,7 +391,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 + - uses: Swatinem/rust-cache@f0deed1e0edfc6a9be95417288c0e1099b1eeec3 # v2.7.7 - run: cargo metadata --locked --format-version=1 > /dev/null cargo-deny: diff --git a/Cargo.lock b/Cargo.lock index d405464f58f..252429064af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,18 +4,18 @@ version = 3 [[package]] name = "addr2line" -version = "0.20.0" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] [[package]] -name = "adler" -version = "1.0.2" +name = "adler2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" [[package]] name = "aead" @@ -29,9 +29,9 @@ dependencies = [ [[package]] name = "aes" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ "cfg-if", "cipher", @@ -54,9 +54,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.7" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "once_cell", @@ -66,18 +66,18 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] name = "allocator-api2" -version = "0.2.16" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "anes" @@ -87,81 +87,76 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.7" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cd2405b3ac1faab2990b74d728624cd9fd115651fcecc7c2d8daf01376275ba" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", + "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.1" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anstyle-parse" -version = "0.2.1" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.1" +version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" +checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" dependencies = [ "anstyle", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" [[package]] name = "arbitrary" -version = "1.3.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d098ff73c1ca148721f37baad5ea6a465a13f9573aba8641fbbbae8164a54e" +checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" [[package]] name = "arc-swap" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] name = "arrayref" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" - -[[package]] -name = "arrayvec" -version = "0.7.4" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "asn1-rs" @@ -175,23 +170,23 @@ dependencies = [ "nom", "num-traits", "rusticata-macros", - "thiserror 1.0.63", + "thiserror 1.0.69", "time", ] [[package]] name = "asn1-rs" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ad1373757efa0f70ec53939aabc7152e1591cb485208052993070ac8d2429d" +checksum = "5493c3bedbacf7fd7382c6346bbd66687d12bbaad3a89a2d2c303ee6cf20b048" dependencies = [ - "asn1-rs-derive 0.5.0", + "asn1-rs-derive 0.5.1", "asn1-rs-impl 0.2.0", "displaydoc", "nom", "num-traits", "rusticata-macros", - "thiserror 1.0.63", + "thiserror 1.0.69", "time", ] @@ -209,13 +204,13 @@ dependencies = [ [[package]] name = "asn1-rs-derive" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" +checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", "synstructure 0.13.1", ] @@ -238,7 +233,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", ] [[package]] @@ -275,159 +270,157 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" dependencies = [ "concurrent-queue", - "event-listener-strategy 0.5.2", + "event-listener-strategy", "futures-core", "pin-project-lite", ] [[package]] name = "async-executor" -version = "1.5.1" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fa3dc5f2a8564f07759c008b9109dc0d39de92a88d5588b8a5036d286383afb" +checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" dependencies = [ - "async-lock 2.7.0", "async-task", "concurrent-queue", - "fastrand 1.9.0", - "futures-lite 1.13.0", + "fastrand", + "futures-lite", "slab", ] [[package]] name = "async-fs" -version = "1.6.0" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "279cf904654eeebfa37ac9bb1598880884924aab82e290aa65c9e77a0e142e06" +checksum = "ebcd09b382f40fcd159c2d695175b2ae620ffa5f3bd6f664131efff4e8b9e04a" dependencies = [ - "async-lock 2.7.0", - "autocfg", + "async-lock", "blocking", - "futures-lite 1.13.0", + "futures-lite", ] [[package]] name = "async-global-executor" -version = "2.3.1" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ - "async-channel 1.9.0", + "async-channel 2.3.1", "async-executor", - "async-io 1.13.0", - "async-lock 2.7.0", + "async-io", + "async-lock", "blocking", - "futures-lite 1.13.0", + "futures-lite", "once_cell", ] [[package]] name = "async-io" -version = "1.13.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" dependencies = [ - "async-lock 2.7.0", - "autocfg", + "async-lock", "cfg-if", "concurrent-queue", - "futures-lite 1.13.0", - "log", + "futures-io", + "futures-lite", "parking", - "polling 2.8.0", - "rustix 0.37.25", + "polling", + "rustix", "slab", - "socket2 0.4.9", - "waker-fn", + "tracing", + "windows-sys 0.59.0", ] [[package]] -name = "async-io" -version = "2.3.3" +name = "async-lock" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d6baa8f0178795da0e71bc42c9e5d13261aac7ee549853162e66a241ba17964" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "async-lock 3.1.0", - "cfg-if", - "concurrent-queue", - "futures-io", - "futures-lite 2.0.1", - "parking", - "polling 3.3.0", - "rustix 0.38.31", - "slab", - "tracing", - "windows-sys 0.52.0", + "event-listener 5.3.1", + "event-listener-strategy", + "pin-project-lite", ] [[package]] -name = "async-lock" -version = "2.7.0" +name = "async-net" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" +checksum = "b948000fad4873c1c9339d60f2623323a0cfd3816e5181033c6a5cb68b2accf7" dependencies = [ - "event-listener 2.5.3", + "async-io", + "blocking", + "futures-lite", ] [[package]] -name = "async-lock" -version = "3.1.0" +name = "async-process" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "deb2ab2aa8a746e221ab826c73f48bc6ba41be6763f0855cb249eb6d154cf1d7" +checksum = "63255f1dc2381611000436537bbedfe83183faa303a5a0edaf191edef06526bb" dependencies = [ - "event-listener 3.1.0", - "event-listener-strategy 0.3.0", - "pin-project-lite", + "async-channel 2.3.1", + "async-io", + "async-lock", + "async-signal", + "async-task", + "blocking", + "cfg-if", + "event-listener 5.3.1", + "futures-lite", + "rustix", + "tracing", ] [[package]] -name = "async-net" -version = "1.7.0" +name = "async-recursion" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4051e67316bc7eff608fe723df5d32ed639946adcd69e07df41fd42a7b411f1f" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ - "async-io 1.13.0", - "autocfg", - "blocking", - "futures-lite 1.13.0", + "proc-macro2", + "quote", + "syn 2.0.92", ] [[package]] -name = "async-process" -version = "1.7.0" +name = "async-signal" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a9d28b1d97e08915212e2e45310d47854eafa69600756fc735fb788f75199c9" +checksum = "637e00349800c0bdf8bfc21ebbc0b6524abea702b0da4168ac00d070d0c0b9f3" dependencies = [ - "async-io 1.13.0", - "async-lock 2.7.0", - "autocfg", - "blocking", + "async-io", + "async-lock", + "atomic-waker", "cfg-if", - "event-listener 2.5.3", - "futures-lite 1.13.0", - "rustix 0.37.25", - "signal-hook", - "windows-sys 0.48.0", + "futures-core", + "futures-io", + "rustix", + "signal-hook-registry", + "slab", + "windows-sys 0.59.0", ] [[package]] name = "async-std" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +checksum = "c634475f29802fde2b8f0b505b1bd00dfe4df7d4a000f0b36f7671197d5c3615" dependencies = [ "async-attributes", "async-channel 1.9.0", "async-global-executor", - "async-io 1.13.0", - "async-lock 2.7.0", + "async-io", + "async-lock", "async-process", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite 1.13.0", - "gloo-timers", + "futures-lite", + "gloo-timers 0.3.0", "kv-log-macro", "log", "memchr", @@ -440,9 +433,9 @@ dependencies = [ [[package]] name = "async-std-resolver" -version = "0.24.0" +version = "0.25.0-alpha.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c0ed2b6671c13d2c28756c5a64e04759c1e0b5d3d7ac031f521c3561e21fbcb" +checksum = "f42964492d88a2a555cc65d8ab30e5e1178c1776f40b2717643c1aebb4297a1a" dependencies = [ "async-std", "async-trait", @@ -450,14 +443,14 @@ dependencies = [ "futures-util", "hickory-resolver", "pin-utils", - "socket2 0.5.7", + "socket2", ] [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", @@ -466,30 +459,30 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", ] [[package]] name = "async-task" -version = "4.4.0" +version = "4.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.80" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", ] [[package]] @@ -507,9 +500,9 @@ dependencies = [ [[package]] name = "atomic-waker" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "attohttpc" @@ -517,16 +510,16 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d9a9bf8b79a749ee0b911b91b671cc2b6c670bdbc7e3dfd537576ddc94bb2a2" dependencies = [ - "http 0.2.9", + "http 0.2.12", "log", "url", ] [[package]] name = "autocfg" -version = "1.1.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "autonat-example" @@ -536,7 +529,6 @@ dependencies = [ "futures", "libp2p", "tokio", - "tracing", "tracing-subscriber", ] @@ -547,7 +539,6 @@ dependencies = [ "cfg-if", "clap", "libp2p", - "opentelemetry 0.21.0", "opentelemetry-jaeger", "opentelemetry_sdk 0.21.2", "rand 0.8.5", @@ -559,15 +550,15 @@ dependencies = [ [[package]] name = "axum" -version = "0.7.5" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" dependencies = [ "async-trait", "axum-core", "bytes", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body", "http-body-util", "hyper", @@ -583,9 +574,9 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_urlencoded", - "sync_wrapper 1.0.0", + "sync_wrapper", "tokio", - "tower", + "tower 0.5.2", "tower-layer", "tower-service", "tracing", @@ -593,20 +584,20 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" dependencies = [ "async-trait", "bytes", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body", "http-body-util", "mime", "pin-project-lite", "rustversion", - "sync_wrapper 0.1.2", + "sync_wrapper", "tower-layer", "tower-service", "tracing", @@ -614,17 +605,17 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.68" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -686,9 +677,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.1" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "blake2" @@ -728,17 +719,15 @@ dependencies = [ [[package]] name = "blocking" -version = "1.3.1" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77231a1c8f801696fc0123ec6150ce92cffb8e164a02afb9c8ddee0e9b65ad65" +checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" dependencies = [ - "async-channel 1.9.0", - "async-lock 2.7.0", + "async-channel 2.3.1", "async-task", - "atomic-waker", - "fastrand 1.9.0", - "futures-lite 1.13.0", - "log", + "futures-io", + "futures-lite", + "piper", ] [[package]] @@ -757,7 +746,7 @@ dependencies = [ "rust-embed", "tokio", "tokio-util", - "tower", + "tower 0.4.13", "tower-http", "tracing", "tracing-subscriber", @@ -778,9 +767,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.6.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05" +checksum = "786a307d683a5bf92e6fd5fd69a7eb613751668d1d8d67d802846dfe367c62c8" dependencies = [ "memchr", "serde", @@ -788,9 +777,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.13.0" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "byteorder" @@ -800,9 +789,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" dependencies = [ "serde", ] @@ -824,20 +813,20 @@ dependencies = [ [[package]] name = "cbor4ii" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b4c883b9cc4757b061600d39001d4d0232bece4a3174696cf8f58a14db107d" +checksum = "472931dd4dfcc785075b09be910147f9c6258883fc4591d0dac6116392b2daa6" dependencies = [ "serde", ] [[package]] name = "cc" -version = "1.0.83" +version = "1.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "8d6dbb628b8f8555f86d0323c2eb39e3ec81901f4b83e091db8a6a76d316a333" dependencies = [ - "libc", + "shlex", ] [[package]] @@ -858,6 +847,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chacha20" version = "0.9.1" @@ -886,19 +881,17 @@ dependencies = [ name = "chat-example" version = "0.1.0" dependencies = [ - "async-trait", "futures", "libp2p", "tokio", - "tracing", "tracing-subscriber", ] [[package]] name = "ciborium" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" dependencies = [ "ciborium-io", "ciborium-ll", @@ -907,15 +900,15 @@ dependencies = [ [[package]] name = "ciborium-io" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" [[package]] name = "ciborium-ll" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" dependencies = [ "ciborium-io", "half", @@ -934,9 +927,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.6" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9689a29b593160de5bc4aacab7b5d54fb52231de70122626c178e6a368994c7" +checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" dependencies = [ "clap_builder", "clap_derive", @@ -944,9 +937,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.6" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e5387378c84f6faa26890ebf9f0a92989f8873d4d380467bcd0d8d8620424df" +checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" dependencies = [ "anstream", "anstyle", @@ -956,33 +949,33 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.5" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c780290ccf4fb26629baa7a1081e68ced113f1d3ec302fa5948f1c381ebf06c6" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", ] [[package]] name = "clap_lex" -version = "0.7.0" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "colorchoice" -version = "1.0.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "combine" -version = "4.6.6" +version = "4.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35ed6e9d84f0b51a7f52daf1c7d71dd136fd7a3f41a8462b8cdb8c78d920fad4" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" dependencies = [ "bytes", "futures-core", @@ -1013,15 +1006,15 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.4" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "795bc6e66a8e340f075fcf6227e417a2dc976b92b91f3cdc778bb858778b6747" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "core-foundation" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", @@ -1029,9 +1022,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.4" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "core2" @@ -1044,27 +1037,27 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.9" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" +checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" dependencies = [ "libc", ] [[package]] name = "crc" -version = "3.0.1" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" +checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" dependencies = [ "crc-catalog", ] [[package]] name = "crc-catalog" -version = "2.2.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "criterion" @@ -1079,7 +1072,7 @@ dependencies = [ "criterion-plot", "futures", "is-terminal", - "itertools", + "itertools 0.10.5", "num-traits", "once_cell", "oorandom", @@ -1101,47 +1094,42 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", - "itertools", + "itertools 0.10.5", ] [[package]] name = "crossbeam-channel" -version = "0.5.13" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.3" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ - "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.9.15" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "autocfg", - "cfg-if", "crossbeam-utils", - "memoffset 0.9.0", - "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.20" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" @@ -1151,9 +1139,9 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-bigint" -version = "0.5.2" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4c2f4e1afd912bc40bfd6fed5d9dc1f288e0ba01bfcc835cc5bc3eb13efe15" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array", "rand_core 0.6.4", @@ -1220,13 +1208,13 @@ dependencies = [ [[package]] name = "curve25519-dalek-derive" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", ] [[package]] @@ -1237,9 +1225,9 @@ checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" [[package]] name = "data-encoding-macro" -version = "0.1.13" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c904b33cc60130e1aeea4956ab803d08a3f4a0ca82d64ed757afac3891f2bb99" +checksum = "f1559b6cba622276d6d63706db152618eeb15b89b3e4041446b05876e352e639" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -1247,9 +1235,9 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.11" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fdf3fce3ce863539ec1d7fd1b6dcc3c645663376b43ed376bbf887733e4f772" +checksum = "332d754c0af53bc87c108fed664d121ecf59207ec4196041f04d6ab9002ad33f" dependencies = [ "data-encoding", "syn 1.0.109", @@ -1263,7 +1251,6 @@ dependencies = [ "futures", "futures-timer", "libp2p", - "log", "tokio", "tracing", "tracing-subscriber", @@ -1271,9 +1258,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.7" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7ed52955ce76b1554f509074bb357d3fb8ac9b51288a65a3fd480d1dfba946" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ "const-oid", "pem-rfc7468", @@ -1300,7 +1287,7 @@ version = "9.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" dependencies = [ - "asn1-rs 0.6.1", + "asn1-rs 0.6.2", "displaydoc", "nom", "num-bigint", @@ -1340,44 +1327,43 @@ dependencies = [ [[package]] name = "dirs" -version = "4.0.0" +version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" dependencies = [ "dirs-sys", ] [[package]] name = "dirs-sys" -version = "0.3.7" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" dependencies = [ "libc", + "option-ext", "redox_users", - "winapi", + "windows-sys 0.48.0", ] [[package]] name = "displaydoc" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", ] [[package]] name = "distributed-key-value-store-example" version = "0.1.0" dependencies = [ - "async-trait", "futures", "libp2p", "tokio", - "tracing", "tracing-subscriber", ] @@ -1389,9 +1375,9 @@ checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" [[package]] name = "ecdsa" -version = "0.16.8" +version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4b1e0c257a9e9f25f90ff76d7a68360ed497ee519c8e428d1825ef0000799d4" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ "der", "digest 0.10.7", @@ -1403,9 +1389,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "2.2.2" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ "pkcs8", "signature", @@ -1428,9 +1414,9 @@ dependencies = [ [[package]] name = "either" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "elliptic-curve" @@ -1455,23 +1441,33 @@ dependencies = [ [[package]] name = "encoding_rs" -version = "0.8.32" +version = "0.8.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" dependencies = [ "cfg-if", ] [[package]] name = "enum-as-inner" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" +checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" dependencies = [ - "heck 0.4.1", + "heck", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", +] + +[[package]] +name = "env_filter" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "186e05a59d4c50738528153b83b0b0194d3a29507dfec16eccd4b342903397d0" +dependencies = [ + "log", + "regex", ] [[package]] @@ -1486,15 +1482,15 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.10.2" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580" +checksum = "dcaee3d8e3cfc3fd92428d477bc97fc29ec8716d180c0d74c643bb26166660e0" dependencies = [ + "anstream", + "anstyle", + "env_filter", "humantime", - "is-terminal", "log", - "regex", - "termcolor", ] [[package]] @@ -1505,12 +1501,12 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.8" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -1519,17 +1515,6 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" -[[package]] -name = "event-listener" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d93877bcde0eb80ca09131a08d23f0a5c18a620b01db137dba666d18cd9b30c2" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - [[package]] name = "event-listener" version = "5.3.1" @@ -1543,19 +1528,9 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96b852f1345da36d551b9473fa1e2b1eb5c5195585c6c018118bc92a8d91160" -dependencies = [ - "event-listener 3.1.0", - "pin-project-lite", -] - -[[package]] -name = "event-listener-strategy" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" dependencies = [ "event-listener 5.3.1", "pin-project-lite", @@ -1563,18 +1538,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] - -[[package]] -name = "fastrand" -version = "2.0.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "ff" @@ -1588,9 +1554,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.1" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0870c84016d4b481be5c9f323c24f65e31e901ae618f0e80f4308fb00de1d2d" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "file-sharing-example" @@ -1601,7 +1567,6 @@ dependencies = [ "libp2p", "serde", "tokio", - "tracing", "tracing-subscriber", ] @@ -1611,6 +1576,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" + [[package]] name = "foreign-types" version = "0.3.2" @@ -1637,9 +1608,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -1662,9 +1633,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -1672,15 +1643,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -1690,44 +1661,32 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" -version = "1.13.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +checksum = "cef40d21ae2c515b51041df9ed313ed21e572df340ea58a922a0aefe7e8891a1" dependencies = [ - "fastrand 1.9.0", + "fastrand", "futures-core", "futures-io", - "memchr", "parking", "pin-project-lite", - "waker-fn", -] - -[[package]] -name = "futures-lite" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3831c2651acb5177cbd83943f3d9c8912c5ad03c76afcc0e9511ba568ec5ebb" -dependencies = [ - "futures-core", - "pin-project-lite", ] [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", ] [[package]] @@ -1737,21 +1696,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.11", + "rustls 0.23.20", "rustls-pki-types", ] [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" @@ -1759,15 +1718,15 @@ version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" dependencies = [ - "gloo-timers", + "gloo-timers 0.2.6", "send_wrapper 0.4.0", ] [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -1830,9 +1789,9 @@ dependencies = [ [[package]] name = "ghash" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" +checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" dependencies = [ "opaque-debug", "polyval", @@ -1840,9 +1799,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.3" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "glob" @@ -1852,15 +1811,15 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "globset" -version = "0.4.11" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1391ab1f92ffcc08911957149833e682aa3fe252b9f45f966d2ef972274c97df" +checksum = "15f1ce686646e7f1e19bf7d5533fe443a45dbfb990e00629110797578b42fb19" dependencies = [ "aho-corasick", "bstr", - "fnv", "log", - "regex", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", ] [[package]] @@ -1875,6 +1834,18 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "gloo-timers" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "group" version = "0.13.0" @@ -1888,17 +1859,17 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.4" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "816ec7294445779408f36fe57bc5b7fc1cf59664059096c65f905c1c61f58069" +checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" dependencies = [ + "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "futures-util", - "http 1.1.0", - "indexmap 2.2.1", + "http 1.2.0", + "indexmap 2.7.0", "slab", "tokio", "tokio-util", @@ -1907,9 +1878,13 @@ dependencies = [ [[package]] name = "half" -version = "1.8.2" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if", + "crunchy", +] [[package]] name = "hashbrown" @@ -1919,19 +1894,32 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.14.3" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +dependencies = [ "allocator-api2", + "equivalent", + "foldhash", ] [[package]] -name = "heck" -version = "0.4.1" +name = "hashlink" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] [[package]] name = "heck" @@ -1941,12 +1929,18 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hermit-abi" -version = "0.3.2" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] -name = "hex" +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" + +[[package]] +name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" @@ -1965,10 +1959,11 @@ checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" [[package]] name = "hickory-proto" -version = "0.24.1" +version = "0.25.0-alpha.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07698b8420e2f0d6447a436ba999ec85d8fbf2a398bbd737b82cac4a2e96e512" +checksum = "d063c0692ee669aa6d261988aa19ca5510f1cc40e4f211024f50c888499a35d7" dependencies = [ + "async-recursion", "async-trait", "cfg-if", "data-encoding", @@ -1976,12 +1971,12 @@ dependencies = [ "futures-channel", "futures-io", "futures-util", - "idna 0.4.0", + "idna", "ipnet", "once_cell", "rand 0.8.5", - "socket2 0.5.7", - "thiserror 1.0.63", + "socket2", + "thiserror 2.0.9", "tinyvec", "tokio", "tracing", @@ -1990,21 +1985,21 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.1" +version = "0.25.0-alpha.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28757f23aa75c98f254cf0405e6d8c25b831b32921b050a66692427679b1f243" +checksum = "42bc352e4412fb657e795f79b4efcf2bd60b59ee5ca0187f3554194cd1107a27" dependencies = [ "cfg-if", "futures-util", "hickory-proto", "ipconfig", - "lru-cache", + "moka", "once_cell", "parking_lot", "rand 0.8.5", "resolv-conf", "smallvec", - "thiserror 1.0.63", + "thiserror 2.0.9", "tokio", "tracing", ] @@ -2054,7 +2049,7 @@ version = "0.1.0" dependencies = [ "anyhow", "either", - "env_logger 0.10.2", + "env_logger 0.11.6", "futures", "libp2p", "redis", @@ -2077,9 +2072,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.9" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", @@ -2088,9 +2083,9 @@ dependencies = [ [[package]] name = "http" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" dependencies = [ "bytes", "fnv", @@ -2099,44 +2094,44 @@ dependencies = [ [[package]] name = "http-body" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.1.0", + "http 1.2.0", ] [[package]] name = "http-body-util" -version = "0.1.0" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cb79eb393015dadd30fc252023adb0b2400a0caee0fa2a077e6e21a551e840" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ "bytes", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body", "pin-project-lite", ] [[package]] name = "http-range-header" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ce4ef31cda248bbdb6e6820603b82dfcd9e833db65a43e997a0ccec777d11fe" +checksum = "9171a2ea8a68358193d15dd5d70c1c10a2afc3e7e4c5bc92bc9f025cebd7359c" [[package]] name = "httparse" -version = "1.8.0" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "humantime" @@ -2146,15 +2141,15 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "1.4.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +checksum = "256fb8d4bd6413123cc9d91832d78325c48ff41677595be797d90f42969beae0" dependencies = [ "bytes", "futures-channel", "futures-util", "h2", - "http 1.1.0", + "http 1.2.0", "http-body", "httparse", "httpdate", @@ -2167,26 +2162,27 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.26.0" +version = "0.27.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" +checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" dependencies = [ "futures-util", - "http 1.1.0", + "http 1.2.0", "hyper", "hyper-util", - "rustls 0.22.4", + "rustls 0.23.20", "rustls-pki-types", "tokio", "tokio-rustls", "tower-service", + "webpki-roots 0.26.7", ] [[package]] name = "hyper-timeout" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ "hyper", "hyper-util", @@ -2213,23 +2209,141 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body", "hyper", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", "tower-service", "tracing", ] +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.92", +] + [[package]] name = "identify-example" version = "0.1.0" @@ -2237,28 +2351,28 @@ dependencies = [ "futures", "libp2p", "tokio", - "tracing", "tracing-subscriber", ] [[package]] name = "idna" -version = "0.4.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "idna_adapter", + "smallvec", + "utf8_iter", ] [[package]] -name = "idna" -version = "0.5.0" +name = "idna_adapter" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "icu_normalizer", + "icu_properties", ] [[package]] @@ -2273,22 +2387,26 @@ dependencies = [ [[package]] name = "if-watch" -version = "3.2.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6b0422c86d7ce0e97169cc42e04ae643caf278874a7a3c87b8150a220dc7e1e" +checksum = "cdf9d64cfcf380606e64f9a0bcf493616b65331199f984151a6fa11a7b3cde38" dependencies = [ - "async-io 2.3.3", + "async-io", "core-foundation", "fnv", "futures", "if-addrs", "ipnet", "log", + "netlink-packet-core", + "netlink-packet-route", + "netlink-proto", + "netlink-sys", "rtnetlink", "smol", "system-configuration", "tokio", - "windows 0.51.1", + "windows 0.53.0", ] [[package]] @@ -2301,7 +2419,7 @@ dependencies = [ "attohttpc", "bytes", "futures", - "http 1.1.0", + "http 1.2.0", "http-body-util", "hyper", "hyper-util", @@ -2324,12 +2442,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.1" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "433de089bd45971eecf4668ee0ee8f4cec17db4f8bd8f7bc3197a6ce37aa7d9b" +checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown 0.15.2", ] [[package]] @@ -2342,15 +2460,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "instant" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" -dependencies = [ - "cfg-if", -] - [[package]] name = "integer-encoding" version = "3.0.4" @@ -2368,8 +2477,8 @@ dependencies = [ "log", "rand 0.8.5", "rtcp", - "rtp", - "thiserror 1.0.63", + "rtp 0.9.0", + "thiserror 1.0.69", "tokio", "waitgroup", "webrtc-srtp", @@ -2383,7 +2492,6 @@ dependencies = [ "anyhow", "axum", "console_error_panic_hook", - "either", "futures", "futures-timer", "libp2p", @@ -2410,27 +2518,16 @@ dependencies = [ "web-time 1.1.0", ] -[[package]] -name = "io-lifetimes" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" -dependencies = [ - "hermit-abi", - "libc", - "windows-sys 0.48.0", -] - [[package]] name = "ipconfig" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.7", + "socket2", "widestring", "windows-sys 0.48.0", - "winreg 0.50.0", + "winreg", ] [[package]] @@ -2438,13 +2535,10 @@ name = "ipfs-kad-example" version = "0.1.0" dependencies = [ "anyhow", - "async-trait", "clap", - "env_logger 0.10.2", "futures", "libp2p", "tokio", - "tracing", "tracing-subscriber", ] @@ -2452,32 +2546,36 @@ dependencies = [ name = "ipfs-private-example" version = "0.1.0" dependencies = [ - "async-trait", "either", "futures", "libp2p", "tokio", - "tracing", "tracing-subscriber", ] [[package]] name = "ipnet" -version = "2.8.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "is-terminal" -version = "0.4.9" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" +checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" dependencies = [ - "hermit-abi", - "rustix 0.38.31", - "windows-sys 0.48.0", + "hermit-abi 0.4.0", + "libc", + "windows-sys 0.52.0", ] +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + [[package]] name = "itertools" version = "0.10.5" @@ -2487,26 +2585,36 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itoa" -version = "1.0.9" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" [[package]] name = "js-sys" -version = "0.3.70" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7" dependencies = [ + "once_cell", "wasm-bindgen", ] [[package]] name = "keccak" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" dependencies = [ "cpufeatures", ] @@ -2517,7 +2625,6 @@ version = "0.1.0" dependencies = [ "base64 0.22.1", "clap", - "libp2p-core", "libp2p-identity", "serde", "serde_json", @@ -2535,24 +2642,22 @@ dependencies = [ [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.155" +version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" [[package]] name = "libp2p" -version = "0.54.2" +version = "0.55.0" dependencies = [ "async-std", - "async-trait", "bytes", - "clap", "either", "futures", "futures-timer", @@ -2592,14 +2697,14 @@ dependencies = [ "multiaddr", "pin-project", "rw-stream-sink", - "thiserror 2.0.3", + "thiserror 2.0.9", "tokio", "tracing-subscriber", ] [[package]] name = "libp2p-allow-block-list" -version = "0.4.2" +version = "0.4.1" dependencies = [ "async-std", "libp2p-core", @@ -2615,7 +2720,6 @@ version = "0.13.1" dependencies = [ "async-trait", "asynchronous-codec", - "bytes", "either", "futures", "futures-bounded", @@ -2626,14 +2730,14 @@ dependencies = [ "libp2p-request-response", "libp2p-swarm", "libp2p-swarm-test", + "libp2p-test-utils", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", "rand_core 0.6.4", - "thiserror 2.0.3", + "thiserror 2.0.9", "tokio", "tracing", - "tracing-subscriber", "web-time 1.1.0", ] @@ -2672,14 +2776,11 @@ dependencies = [ "parking_lot", "pin-project", "quick-protobuf", - "quickcheck-ext", "rand 0.8.5", "rw-stream-sink", - "serde", - "smallvec", - "thiserror 2.0.3", + "thiserror 2.0.9", "tracing", - "unsigned-varint 0.8.0", + "unsigned-varint", "web-time 1.1.0", ] @@ -2688,37 +2789,32 @@ name = "libp2p-dcutr" version = "0.12.1" dependencies = [ "asynchronous-codec", - "clap", "either", "futures", "futures-bounded", "futures-timer", "libp2p-core", - "libp2p-dns", "libp2p-identify", "libp2p-identity", - "libp2p-noise", - "libp2p-ping", "libp2p-plaintext", "libp2p-relay", "libp2p-swarm", "libp2p-swarm-test", "libp2p-tcp", + "libp2p-test-utils", "libp2p-yamux", "lru", "quick-protobuf", "quick-protobuf-codec", - "rand 0.8.5", - "thiserror 2.0.3", + "thiserror 2.0.9", "tokio", "tracing", - "tracing-subscriber", "web-time 1.1.0", ] [[package]] name = "libp2p-dns" -version = "0.42.0" +version = "0.42.1" dependencies = [ "async-std", "async-std-resolver", @@ -2727,11 +2823,11 @@ dependencies = [ "hickory-resolver", "libp2p-core", "libp2p-identity", + "libp2p-test-utils", "parking_lot", "smallvec", "tokio", "tracing", - "tracing-subscriber", ] [[package]] @@ -2750,7 +2846,7 @@ dependencies = [ "quick-protobuf-codec", "rand 0.8.5", "smallvec", - "thiserror 2.0.3", + "thiserror 2.0.9", "tracing", ] @@ -2768,14 +2864,13 @@ dependencies = [ "futures", "futures-timer", "getrandom 0.2.15", - "hex", + "hashlink", "hex_fmt", "libp2p-core", "libp2p-identity", - "libp2p-noise", "libp2p-swarm", "libp2p-swarm-test", - "libp2p-yamux", + "libp2p-test-utils", "prometheus-client", "quick-protobuf", "quick-protobuf-codec", @@ -2784,10 +2879,8 @@ dependencies = [ "regex", "serde", "sha2 0.10.8", - "smallvec", "tokio", "tracing", - "tracing-subscriber", "web-time 1.1.0", ] @@ -2805,13 +2898,12 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "libp2p-swarm-test", - "lru", + "libp2p-test-utils", "quick-protobuf", "quick-protobuf-codec", "smallvec", - "thiserror 2.0.3", + "thiserror 2.0.9", "tracing", - "tracing-subscriber", ] [[package]] @@ -2819,7 +2911,6 @@ name = "libp2p-identity" version = "0.2.10" dependencies = [ "asn1_der", - "base64 0.22.1", "bs58", "criterion", "ed25519-dalek", @@ -2837,7 +2928,7 @@ dependencies = [ "serde", "serde_json", "sha2 0.10.8", - "thiserror 2.0.3", + "thiserror 2.0.9", "tracing", "zeroize", ] @@ -2846,7 +2937,6 @@ dependencies = [ name = "libp2p-kad" version = "0.47.0" dependencies = [ - "arrayvec", "async-std", "asynchronous-codec", "bytes", @@ -2861,6 +2951,7 @@ dependencies = [ "libp2p-noise", "libp2p-swarm", "libp2p-swarm-test", + "libp2p-test-utils", "libp2p-yamux", "quick-protobuf", "quick-protobuf-codec", @@ -2869,43 +2960,37 @@ dependencies = [ "serde", "sha2 0.10.8", "smallvec", - "thiserror 2.0.3", + "thiserror 2.0.9", "tracing", - "tracing-subscriber", "uint", "web-time 1.1.0", ] [[package]] name = "libp2p-mdns" -version = "0.46.0" +version = "0.46.1" dependencies = [ - "async-io 2.3.3", + "async-io", "async-std", - "data-encoding", "futures", "hickory-proto", "if-watch", "libp2p-core", "libp2p-identity", - "libp2p-noise", "libp2p-swarm", "libp2p-swarm-test", - "libp2p-tcp", - "libp2p-yamux", + "libp2p-test-utils", "rand 0.8.5", "smallvec", - "socket2 0.5.7", + "socket2", "tokio", "tracing", - "tracing-subscriber", ] [[package]] name = "libp2p-memory-connection-limits" version = "0.3.1" dependencies = [ - "async-std", "libp2p-core", "libp2p-identify", "libp2p-identity", @@ -2913,7 +2998,6 @@ dependencies = [ "libp2p-swarm-derive", "libp2p-swarm-test", "memory-stats", - "rand 0.8.5", "sysinfo", "tracing", ] @@ -2951,14 +3035,14 @@ dependencies = [ "libp2p-muxer-test-harness", "libp2p-plaintext", "libp2p-tcp", + "libp2p-test-utils", "nohash-hasher", "parking_lot", "quickcheck-ext", "rand 0.8.5", "smallvec", "tracing", - "tracing-subscriber", - "unsigned-varint 0.8.0", + "unsigned-varint", ] [[package]] @@ -2974,27 +3058,25 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.45.0" +version = "0.45.1" dependencies = [ "asynchronous-codec", "bytes", - "curve25519-dalek", "futures", "futures_ringbuf", "libp2p-core", "libp2p-identity", + "libp2p-test-utils", "multiaddr", "multihash", "once_cell", "quick-protobuf", "quickcheck-ext", "rand 0.8.5", - "sha2 0.10.8", "snow", "static_assertions", - "thiserror 2.0.3", + "thiserror 2.0.9", "tracing", - "tracing-subscriber", "x25519-dalek", "zeroize", ] @@ -3010,18 +3092,16 @@ dependencies = [ "futures-timer", "libp2p", "libp2p-core", - "libp2p-dns", "libp2p-identity", - "libp2p-quic", "libp2p-swarm", "libp2p-swarm-test", "libp2p-tcp", + "libp2p-test-utils", "libp2p-tls", "libp2p-yamux", - "rand 0.8.5", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.9", "tokio", "tracing", "tracing-subscriber", @@ -3032,7 +3112,6 @@ dependencies = [ name = "libp2p-ping" version = "0.45.1" dependencies = [ - "either", "futures", "futures-timer", "libp2p-core", @@ -3043,7 +3122,6 @@ dependencies = [ "rand 0.8.5", "tokio", "tracing", - "tracing-subscriber", "web-time 1.1.0", ] @@ -3057,12 +3135,11 @@ dependencies = [ "futures_ringbuf", "libp2p-core", "libp2p-identity", + "libp2p-test-utils", "quick-protobuf", "quick-protobuf-codec", "quickcheck-ext", - "rand 0.8.5", "tracing", - "tracing-subscriber", ] [[package]] @@ -3091,7 +3168,6 @@ name = "libp2p-quic" version = "0.11.2" dependencies = [ "async-std", - "bytes", "futures", "futures-timer", "if-watch", @@ -3100,19 +3176,18 @@ dependencies = [ "libp2p-muxer-test-harness", "libp2p-noise", "libp2p-tcp", + "libp2p-test-utils", "libp2p-tls", "libp2p-yamux", - "parking_lot", "quickcheck", "quinn", "rand 0.8.5", "ring 0.17.8", - "rustls 0.23.11", - "socket2 0.5.7", - "thiserror 2.0.3", + "rustls 0.23.20", + "socket2", + "thiserror 2.0.9", "tokio", "tracing", - "tracing-subscriber", ] [[package]] @@ -3131,21 +3206,21 @@ dependencies = [ "libp2p-plaintext", "libp2p-swarm", "libp2p-swarm-test", + "libp2p-test-utils", "libp2p-yamux", "quick-protobuf", "quick-protobuf-codec", "quickcheck-ext", "rand 0.8.5", "static_assertions", - "thiserror 2.0.3", + "thiserror 2.0.9", "tracing", - "tracing-subscriber", "web-time 1.1.0", ] [[package]] name = "libp2p-rendezvous" -version = "0.15.0" +version = "0.15.1" dependencies = [ "async-trait", "asynchronous-codec", @@ -3153,28 +3228,23 @@ dependencies = [ "futures", "futures-timer", "libp2p-core", - "libp2p-identify", "libp2p-identity", - "libp2p-noise", - "libp2p-ping", "libp2p-request-response", "libp2p-swarm", "libp2p-swarm-test", - "libp2p-tcp", - "libp2p-yamux", + "libp2p-test-utils", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", - "thiserror 2.0.3", + "thiserror 2.0.9", "tokio", "tracing", - "tracing-subscriber", "web-time 1.1.0", ] [[package]] name = "libp2p-request-response" -version = "0.27.1" +version = "0.28.0" dependencies = [ "anyhow", "async-std", @@ -3182,37 +3252,30 @@ dependencies = [ "cbor4ii", "futures", "futures-bounded", - "futures-timer", "futures_ringbuf", "libp2p-core", "libp2p-identity", - "libp2p-noise", "libp2p-swarm", "libp2p-swarm-test", - "libp2p-tcp", - "libp2p-yamux", + "libp2p-test-utils", "rand 0.8.5", "serde", "serde_json", "smallvec", "tracing", - "tracing-subscriber", - "web-time 1.1.0", ] [[package]] name = "libp2p-server" -version = "0.12.8" +version = "0.12.6" dependencies = [ "axum", "base64 0.22.1", "clap", "futures", - "futures-timer", "libp2p", "prometheus-client", "serde", - "serde_derive", "serde_json", "tokio", "tracing", @@ -3229,10 +3292,10 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "libp2p-swarm-test", + "libp2p-test-utils", "rand 0.8.5", "tokio", "tracing", - "tracing-subscriber", ] [[package]] @@ -3254,6 +3317,7 @@ dependencies = [ "libp2p-plaintext", "libp2p-swarm-derive", "libp2p-swarm-test", + "libp2p-test-utils", "libp2p-yamux", "lru", "multistream-select", @@ -3263,7 +3327,6 @@ dependencies = [ "smallvec", "tokio", "tracing", - "tracing-subscriber", "trybuild", "wasm-bindgen-futures", "web-time 1.1.0", @@ -3273,10 +3336,9 @@ dependencies = [ name = "libp2p-swarm-derive" version = "0.35.0" dependencies = [ - "heck 0.5.0", - "proc-macro2", + "heck", "quote", - "syn 2.0.89", + "syn 2.0.92", ] [[package]] @@ -3292,7 +3354,6 @@ dependencies = [ "libp2p-swarm", "libp2p-tcp", "libp2p-yamux", - "rand 0.8.5", "tracing", ] @@ -3300,17 +3361,23 @@ dependencies = [ name = "libp2p-tcp" version = "0.42.0" dependencies = [ - "async-io 2.3.3", + "async-io", "async-std", "futures", "futures-timer", "if-watch", "libc", "libp2p-core", - "libp2p-identity", - "socket2 0.5.7", + "libp2p-test-utils", + "socket2", "tokio", "tracing", +] + +[[package]] +name = "libp2p-test-utils" +version = "0.1.0" +dependencies = [ "tracing-subscriber", ] @@ -3320,7 +3387,6 @@ version = "0.5.0" dependencies = [ "futures", "futures-rustls", - "hex", "hex-literal", "libp2p-core", "libp2p-identity", @@ -3328,9 +3394,9 @@ dependencies = [ "libp2p-yamux", "rcgen", "ring 0.17.8", - "rustls 0.23.11", + "rustls 0.23.20", "rustls-webpki 0.101.7", - "thiserror 2.0.3", + "thiserror 2.0.9", "tokio", "x509-parser 0.16.0", "yasna", @@ -3366,7 +3432,6 @@ name = "libp2p-webrtc" version = "0.8.0-alpha" dependencies = [ "async-trait", - "bytes", "futures", "futures-timer", "hex", @@ -3374,20 +3439,19 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-noise", + "libp2p-test-utils", "libp2p-webrtc-utils", "multihash", "quickcheck", "rand 0.8.5", "rcgen", - "serde", - "stun 0.6.0", - "thiserror 2.0.3", - "tinytemplate", + "stun 0.7.0", + "thiserror 2.0.9", "tokio", "tokio-util", "tracing", - "tracing-subscriber", "webrtc", + "webrtc-ice", ] [[package]] @@ -3407,7 +3471,6 @@ dependencies = [ "rand 0.8.5", "serde", "sha2 0.10.8", - "thiserror 2.0.3", "tinytemplate", "tracing", ] @@ -3425,7 +3488,7 @@ dependencies = [ "libp2p-identity", "libp2p-webrtc-utils", "send_wrapper 0.6.0", - "thiserror 2.0.3", + "thiserror 2.0.9", "tracing", "wasm-bindgen", "wasm-bindgen-futures", @@ -3449,10 +3512,10 @@ dependencies = [ "rcgen", "rw-stream-sink", "soketto", - "thiserror 2.0.3", + "thiserror 2.0.9", "tracing", "url", - "webpki-roots 0.25.2", + "webpki-roots 0.25.4", ] [[package]] @@ -3466,9 +3529,8 @@ dependencies = [ "libp2p-identity", "libp2p-noise", "libp2p-yamux", - "parking_lot", "send_wrapper 0.6.0", - "thiserror 2.0.3", + "thiserror 2.0.9", "tracing", "wasm-bindgen", "web-sys", @@ -3476,7 +3538,7 @@ dependencies = [ [[package]] name = "libp2p-webtransport-websys" -version = "0.4.0" +version = "0.4.1" dependencies = [ "futures", "js-sys", @@ -3488,7 +3550,7 @@ dependencies = [ "multihash", "once_cell", "send_wrapper 0.6.0", - "thiserror 2.0.3", + "thiserror 2.0.9", "tracing", "wasm-bindgen", "wasm-bindgen-futures", @@ -3504,10 +3566,20 @@ dependencies = [ "futures", "libp2p-core", "libp2p-muxer-test-harness", - "thiserror 2.0.3", + "thiserror 2.0.9", "tracing", "yamux 0.12.1", - "yamux 0.13.3", + "yamux 0.13.4", +] + +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags 2.6.0", + "libc", ] [[package]] @@ -3558,29 +3630,23 @@ dependencies = [ "libsecp256k1-core", ] -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - [[package]] name = "linux-raw-sys" -version = "0.3.8" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] -name = "linux-raw-sys" -version = "0.4.12" +name = "litemap" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" [[package]] name = "lock_api" -version = "0.4.10" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -3588,29 +3654,20 @@ dependencies = [ [[package]] name = "log" -version = "0.4.21" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" dependencies = [ "value-bag", ] [[package]] name = "lru" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" -dependencies = [ - "hashbrown 0.14.3", -] - -[[package]] -name = "lru-cache" -version = "0.1.2" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "linked-hash-map", + "hashbrown 0.15.2", ] [[package]] @@ -3630,24 +3687,25 @@ dependencies = [ [[package]] name = "matchit" -version = "0.7.1" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67827e6ea8ee8a7c4a72227ef4fc08957040acffdb5f122733b24fa12daff41b" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "md-5" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" dependencies = [ + "cfg-if", "digest 0.10.7", ] [[package]] name = "memchr" -version = "2.6.3" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "memoffset" @@ -3658,23 +3716,14 @@ dependencies = [ "autocfg", ] -[[package]] -name = "memoffset" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" -dependencies = [ - "autocfg", -] - [[package]] name = "memory-stats" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34f79cf9964c5c9545493acda1263f1912f8d2c56c8a2ffee2606cb960acaacc" +checksum = "c73f5c649995a115e1a0220b35e4df0a1294500477f97a91d0660fb5abeb574a" dependencies = [ "libc", - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -3684,13 +3733,13 @@ dependencies = [ "axum", "futures", "libp2p", - "opentelemetry 0.25.0", + "opentelemetry 0.27.1", "opentelemetry-otlp", - "opentelemetry_sdk 0.25.0", + "opentelemetry_sdk 0.27.1", "prometheus-client", "tokio", "tracing", - "tracing-opentelemetry 0.26.0", + "tracing-opentelemetry 0.28.0", "tracing-subscriber", ] @@ -3702,9 +3751,9 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" -version = "2.0.4" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" dependencies = [ "mime", "unicase", @@ -3712,9 +3761,9 @@ dependencies = [ [[package]] name = "minicov" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c71e683cd655513b99affab7d317deb690528255a0d5f717f1024093c12b169" +checksum = "f27fe9f1cc3c22e1687f9446c2083c4c5fc7f0bcf1c7a86bdbded14985895b4b" dependencies = [ "cc", "walkdir", @@ -3728,29 +3777,49 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +checksum = "4ffbe83022cedc1d264172192511ae958937694cd57ce297164951b8b3568394" dependencies = [ - "adler", + "adler2", ] [[package]] name = "mio" -version = "0.8.11" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.48.0", + "windows-sys 0.52.0", +] + +[[package]] +name = "moka" +version = "0.12.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32cf62eb4dd975d2dde76432fb1075c49e3ee2331cf36f1f8fd4b66550d32b6f" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "once_cell", + "parking_lot", + "quanta", + "rustc_version", + "smallvec", + "tagptr", + "thiserror 1.0.69", + "triomphe", + "uuid", ] [[package]] name = "multiaddr" -version = "0.18.1" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b852bc02a2da5feed68cd14fa50d0774b92790a5bdbfa932a813926c8472070" +checksum = "fe6351f60b488e04c1d21bc69e56b89cb3f5e8f5d22557d6e8031bdfd79b6961" dependencies = [ "arrayref", "byteorder", @@ -3761,7 +3830,7 @@ dependencies = [ "percent-encoding", "serde", "static_assertions", - "unsigned-varint 0.7.2", + "unsigned-varint", "url", ] @@ -3778,16 +3847,16 @@ dependencies = [ [[package]] name = "multihash" -version = "0.19.1" +version = "0.19.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076d548d76a0e2a0d4ab471d0b1c36c577786dfc4471242035d97a12a735c492" +checksum = "6b430e7953c29dd6a09afc29ff0bb69c6e306329ee6794700aee27b76a1aea8d" dependencies = [ "arbitrary", "core2", "quickcheck", "rand 0.8.5", "serde", - "unsigned-varint 0.7.2", + "unsigned-varint", ] [[package]] @@ -3798,23 +3867,21 @@ dependencies = [ "bytes", "futures", "futures_ringbuf", + "libp2p-test-utils", "pin-project", "quickcheck-ext", - "rand 0.8.5", "rw-stream-sink", "smallvec", "tracing", - "tracing-subscriber", - "unsigned-varint 0.8.0", + "unsigned-varint", ] [[package]] name = "native-tls" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" dependencies = [ - "lazy_static", "libc", "log", "openssl", @@ -3828,21 +3895,20 @@ dependencies = [ [[package]] name = "netlink-packet-core" -version = "0.4.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345b8ab5bd4e71a2986663e88c56856699d060e78e152e6e9d7966fcd5491297" +checksum = "72724faf704479d67b388da142b186f916188505e7e0b26719019c525882eda4" dependencies = [ "anyhow", "byteorder", - "libc", "netlink-packet-utils", ] [[package]] name = "netlink-packet-route" -version = "0.12.0" +version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9ea4302b9759a7a88242299225ea3688e63c85ea136371bb6cf94fd674efaab" +checksum = "053998cea5a306971f88580d0829e90f270f940befd7cf928da179d4187a5a66" dependencies = [ "anyhow", "bitflags 1.3.2", @@ -3861,31 +3927,31 @@ dependencies = [ "anyhow", "byteorder", "paste", - "thiserror 1.0.63", + "thiserror 1.0.69", ] [[package]] name = "netlink-proto" -version = "0.10.0" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65b4b14489ab424703c092062176d52ba55485a89c076b4f9db05092b7223aa6" +checksum = "86b33524dc0968bfad349684447bfce6db937a9ac3332a1fe60c0c5a5ce63f21" dependencies = [ "bytes", "futures", "log", "netlink-packet-core", "netlink-sys", - "thiserror 1.0.63", + "thiserror 1.0.69", "tokio", ] [[package]] name = "netlink-sys" -version = "0.8.5" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6471bf08e7ac0135876a9581bf3217ef0333c191c128d34878079f42ee150411" +checksum = "16c903aa70590cb93691bf97a767c8d1d6122d2cc9070433deb3bbf36ce8bd23" dependencies = [ - "async-io 1.13.0", + "async-io", "bytes", "futures", "libc", @@ -3893,17 +3959,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "nix" -version = "0.24.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" -dependencies = [ - "bitflags 1.3.2", - "cfg-if", - "libc", -] - [[package]] name = "nix" version = "0.26.4" @@ -3913,7 +3968,7 @@ dependencies = [ "bitflags 1.3.2", "cfg-if", "libc", - "memoffset 0.7.1", + "memoffset", "pin-utils", ] @@ -3954,11 +4009,10 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.3" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ - "autocfg", "num-integer", "num-traits", ] @@ -3971,11 +4025,10 @@ checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" [[package]] name = "num-integer" -version = "0.1.45" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg", "num-traits", ] @@ -3994,15 +4047,15 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi", + "hermit-abi 0.3.9", "libc", ] [[package]] name = "object" -version = "0.31.1" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "memchr", ] @@ -4018,38 +4071,38 @@ dependencies = [ [[package]] name = "oid-registry" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c958dd45046245b9c3c2547369bb634eb461670b2e7e0de552905801a648d1d" +checksum = "a8d8034d9489cdaf79228eb9f6a3b8d7bb32ba00d6645ebd48eef4077ceb5bd9" dependencies = [ - "asn1-rs 0.6.1", + "asn1-rs 0.6.2", ] [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "oorandom" -version = "11.1.3" +version = "11.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" +checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "opaque-debug" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "openssl" -version = "0.10.66" +version = "0.10.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" +checksum = "6174bc48f102d208783c2c84bf931bb75927a617866870de8a4ea85597f871f5" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.6.0", "cfg-if", "foreign-types", "libc", @@ -4066,7 +4119,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", ] [[package]] @@ -4077,9 +4130,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.103" +version = "0.9.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" +checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" dependencies = [ "cc", "libc", @@ -4095,26 +4148,26 @@ checksum = "1e32339a5dc40459130b3bd269e9892439f55b33e772d2a9d402a789baaf4e8a" dependencies = [ "futures-core", "futures-sink", - "indexmap 2.2.1", + "indexmap 2.7.0", "js-sys", "once_cell", "pin-project-lite", - "thiserror 1.0.63", + "thiserror 1.0.69", "urlencoding", ] [[package]] name = "opentelemetry" -version = "0.25.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "803801d3d3b71cd026851a53f974ea03df3d179cb758b260136a6c9e22e196af" +checksum = "ab70038c28ed37b97d8ed414b6429d343a8bbf44c9f79ec854f3a643029ba6d7" dependencies = [ "futures-core", "futures-sink", "js-sys", - "once_cell", "pin-project-lite", - "thiserror 1.0.63", + "thiserror 1.0.69", + "tracing", ] [[package]] @@ -4135,30 +4188,31 @@ dependencies = [ [[package]] name = "opentelemetry-otlp" -version = "0.25.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "596b1719b3cab83addb20bcbffdf21575279d9436d9ccccfe651a3bf0ab5ab06" +checksum = "91cf61a1868dacc576bf2b2a1c3e9ab150af7272909e80085c3173384fe11f76" dependencies = [ "async-trait", "futures-core", - "http 1.1.0", - "opentelemetry 0.25.0", + "http 1.2.0", + "opentelemetry 0.27.1", "opentelemetry-proto", - "opentelemetry_sdk 0.25.0", + "opentelemetry_sdk 0.27.1", "prost", - "thiserror 1.0.63", + "thiserror 1.0.69", "tokio", "tonic", + "tracing", ] [[package]] name = "opentelemetry-proto" -version = "0.25.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c43620e8f93359eb7e627a3b16ee92d8585774986f24f2ab010817426c5ce61" +checksum = "a6e05acbfada5ec79023c85368af14abd0b307c015e9064d249b2a950ef459a6" dependencies = [ - "opentelemetry 0.25.0", - "opentelemetry_sdk 0.25.0", + "opentelemetry 0.27.1", + "opentelemetry_sdk 0.27.1", "prost", "tonic", ] @@ -4186,35 +4240,41 @@ dependencies = [ "glob", "once_cell", "opentelemetry 0.21.0", - "ordered-float 4.2.0", + "ordered-float 4.6.0", "percent-encoding", "rand 0.8.5", - "thiserror 1.0.63", + "thiserror 1.0.69", "tokio", "tokio-stream", ] [[package]] name = "opentelemetry_sdk" -version = "0.25.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0da0d6b47a3dbc6e9c9e36a0520e25cf943e046843818faaa3f87365a548c82" +checksum = "231e9d6ceef9b0b2546ddf52335785ce41252bc7474ee8ba05bfad277be13ab8" dependencies = [ "async-trait", "futures-channel", "futures-executor", "futures-util", "glob", - "once_cell", - "opentelemetry 0.25.0", + "opentelemetry 0.27.1", "percent-encoding", "rand 0.8.5", "serde_json", - "thiserror 1.0.63", + "thiserror 1.0.69", "tokio", "tokio-stream", + "tracing", ] +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + [[package]] name = "ordered-float" version = "2.10.1" @@ -4226,9 +4286,9 @@ dependencies = [ [[package]] name = "ordered-float" -version = "4.2.0" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76df7075c7d4d01fdcb46c912dd17fba5b60c78ea480b475f2b6ab6f666584e" +checksum = "7bb71e1b3fa6ca1c61f383464aaf2bb0e2f8e772a1f01d486832464de363b951" dependencies = [ "num-traits", ] @@ -4265,9 +4325,9 @@ dependencies = [ [[package]] name = "parking" -version = "2.1.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" @@ -4281,30 +4341,30 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.8" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.3.5", + "redox_syscall", "smallvec", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] name = "paste" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pem" -version = "3.0.2" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3163d2912b7c3b52d651a055f2c7eec9ba5cd22d26ef75b8dd3a59980b185923" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "serde", ] @@ -4325,29 +4385,29 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", ] [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pin-utils" @@ -4362,10 +4422,20 @@ dependencies = [ "futures", "libp2p", "tokio", - "tracing", "tracing-subscriber", ] +[[package]] +name = "piper" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" +dependencies = [ + "atomic-waker", + "fastrand", + "futures-io", +] + [[package]] name = "pkcs8" version = "0.10.2" @@ -4378,15 +4448,15 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "plotters" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" dependencies = [ "num-traits", "plotters-backend", @@ -4397,47 +4467,32 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" [[package]] name = "plotters-svg" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" dependencies = [ "plotters-backend", ] [[package]] name = "polling" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" -dependencies = [ - "autocfg", - "bitflags 1.3.2", - "cfg-if", - "concurrent-queue", - "libc", - "log", - "pin-project-lite", - "windows-sys 0.48.0", -] - -[[package]] -name = "polling" -version = "3.3.0" +version = "3.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e53b6af1f60f36f8c2ac2aad5459d75a5a9b4be1e8cdd40264f315d78193e531" +checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" dependencies = [ "cfg-if", "concurrent-queue", + "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.31", + "rustix", "tracing", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -4453,9 +4508,9 @@ dependencies = [ [[package]] name = "polyval" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" +checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" dependencies = [ "cfg-if", "cpufeatures", @@ -4465,9 +4520,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.6.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" +checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" [[package]] name = "powerfmt" @@ -4477,15 +4532,18 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] [[package]] name = "primeorder" -version = "0.13.2" +version = "0.13.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c2fcef82c0ec6eefcc179b978446c399b3cdf73c392c35604e399eee6df1ee3" +checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" dependencies = [ "elliptic-curve", ] @@ -4501,9 +4559,9 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.22.2" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1ca959da22a332509f2a73ae9e5f23f9dcfc31fd3a54d71f159495bd5909baa" +checksum = "504ee9ff529add891127c4827eb481bd69dc0ebc72e9a682e187db4caa60c3ca" dependencies = [ "dtoa", "itoa", @@ -4519,14 +4577,14 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", ] [[package]] name = "prost" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0487d90e047de87f984913713b85c601c05609aad5b0df4b4573fbf69aa13f" +checksum = "2c0fef6c4230e4ccf618a35c59d7ede15dea37de8427500f50aff708806e42ec" dependencies = [ "bytes", "prost-derive", @@ -4534,15 +4592,30 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" +checksum = "157c5a9d7ea5c2ed2d9fb8f495b64759f7816c7eaea54ba3978f0d63000162e3" dependencies = [ "anyhow", - "itertools", + "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", +] + +[[package]] +name = "quanta" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773ce68d0bb9bc7ef20be3536ffe94e223e1f365bd374108b2659fac0c65cfe6" +dependencies = [ + "crossbeam-utils", + "libc", + "once_cell", + "raw-cpuid", + "wasi 0.11.0+wasi-snapshot-preview1", + "web-sys", + "winapi", ] [[package]] @@ -4570,8 +4643,8 @@ dependencies = [ "futures", "quick-protobuf", "quickcheck-ext", - "thiserror 2.0.3", - "unsigned-varint 0.8.0", + "thiserror 2.0.9", + "unsigned-varint", ] [[package]] @@ -4595,59 +4668,64 @@ dependencies = [ [[package]] name = "quinn" -version = "0.11.2" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4ceeeeabace7857413798eb1ffa1e9c905a9946a57d81fb69b4b71c4d8eb3ad" +checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" dependencies = [ - "async-io 2.3.3", + "async-io", "async-std", "bytes", "futures-io", "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash 1.1.0", - "rustls 0.23.11", - "thiserror 1.0.63", + "rustc-hash", + "rustls 0.23.20", + "socket2", + "thiserror 2.0.9", "tokio", "tracing", ] [[package]] name = "quinn-proto" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" +checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ "bytes", + "getrandom 0.2.15", "rand 0.8.5", "ring 0.17.8", - "rustc-hash 2.0.0", - "rustls 0.23.11", + "rustc-hash", + "rustls 0.23.20", + "rustls-pki-types", "slab", - "thiserror 1.0.63", + "thiserror 2.0.9", "tinyvec", "tracing", + "web-time 1.1.0", ] [[package]] name = "quinn-udp" -version = "0.5.0" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7ad7bc932e4968523fa7d9c320ee135ff779de720e9350fee8728838551764" +checksum = "1c40286217b4ba3a71d644d752e6a0b71f13f1b6a2c5311acfcbe0c2418ed904" dependencies = [ + "cfg_aliases", "libc", "once_cell", - "socket2 0.5.7", + "socket2", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "quote" -version = "1.0.36" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" dependencies = [ "proc-macro2", ] @@ -4723,6 +4801,15 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "raw-cpuid" +version = "11.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ab240315c661615f2ee9f0f2cd32d5a7343a84d5ebcccb99d46e6637565e7b0" +dependencies = [ + "bitflags 2.6.0", +] + [[package]] name = "rayon" version = "1.10.0" @@ -4777,43 +4864,34 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_syscall" -version = "0.3.5" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", ] [[package]] name = "redox_users" -version = "0.4.3" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom 0.2.15", - "redox_syscall 0.2.16", - "thiserror 1.0.63", + "libredox", + "thiserror 1.0.69", ] [[package]] name = "regex" -version = "1.10.5" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.4", - "regex-syntax 0.8.2", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", ] [[package]] @@ -4827,13 +4905,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.4" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b7fa1134405e2ec9353fd416b17f8dacd46c473d7d3fd1cf202706a14eb792a" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.2", + "regex-syntax 0.8.5", ] [[package]] @@ -4844,9 +4922,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "relay-server-example" @@ -4856,7 +4934,6 @@ dependencies = [ "futures", "libp2p", "tokio", - "tracing", "tracing-subscriber", ] @@ -4873,9 +4950,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.4" +version = "0.12.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10" +checksum = "7fe060fe50f524be480214aba758c71f99f90ee8c83c5a36b5e9e1d568eb4eb3" dependencies = [ "base64 0.22.1", "bytes", @@ -4883,7 +4960,7 @@ dependencies = [ "futures-core", "futures-util", "h2", - "http 1.1.0", + "http 1.2.0", "http-body", "http-body-util", "hyper", @@ -4898,24 +4975,26 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.22.4", + "quinn", + "rustls 0.23.20", "rustls-pemfile", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 0.1.2", + "sync_wrapper", "system-configuration", "tokio", "tokio-native-tls", "tokio-rustls", + "tower 0.5.2", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots 0.26.1", - "winreg 0.52.0", + "webpki-roots 0.26.7", + "windows-registry", ] [[package]] @@ -5001,28 +5080,31 @@ dependencies = [ [[package]] name = "rtcp" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3677908cadfbecb4cc1da9a56a32524fae4ebdfa7c2ea93886e1b1e846488cb9" +checksum = "33648a781874466a62d89e265fee9f17e32bc7d05a256e6cca41bf97eadcd8aa" dependencies = [ "bytes", - "thiserror 1.0.63", + "thiserror 1.0.69", "webrtc-util 0.8.1", ] [[package]] name = "rtnetlink" -version = "0.10.1" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322c53fd76a18698f1c27381d58091de3a043d356aa5bd0d510608b565f469a0" +checksum = "7a552eb82d19f38c3beed3f786bd23aa434ceb9ac43ab44419ca6d67a7e186c0" dependencies = [ "async-global-executor", "futures", "log", + "netlink-packet-core", "netlink-packet-route", + "netlink-packet-utils", "netlink-proto", - "nix 0.24.3", - "thiserror 1.0.63", + "netlink-sys", + "nix", + "thiserror 1.0.69", "tokio", ] @@ -5035,15 +5117,28 @@ dependencies = [ "bytes", "rand 0.8.5", "serde", - "thiserror 1.0.63", + "thiserror 1.0.69", + "webrtc-util 0.8.1", +] + +[[package]] +name = "rtp" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47fca9bd66ae0b1f3f649b8f5003d6176433d7293b78b0fce7e1031816bdd99d" +dependencies = [ + "bytes", + "rand 0.8.5", + "serde", + "thiserror 1.0.69", "webrtc-util 0.8.1", ] [[package]] name = "rust-embed" -version = "8.4.0" +version = "8.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19549741604902eb99a7ed0ee177a0663ee1eda51a29f71401f166e47e77806a" +checksum = "fa66af4a4fdd5e7ebc276f115e895611a34739a9c1c01028383d612d550953c0" dependencies = [ "rust-embed-impl", "rust-embed-utils", @@ -5052,23 +5147,23 @@ dependencies = [ [[package]] name = "rust-embed-impl" -version = "8.4.0" +version = "8.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb9f96e283ec64401f30d3df8ee2aaeb2561f34c824381efa24a35f79bf40ee4" +checksum = "6125dbc8867951125eec87294137f4e9c2c96566e61bf72c45095a7c77761478" dependencies = [ "proc-macro2", "quote", "rust-embed-utils", "shellexpand", - "syn 2.0.89", + "syn 2.0.92", "walkdir", ] [[package]] name = "rust-embed-utils" -version = "8.4.0" +version = "8.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38c74a686185620830701348de757fd36bef4aa9680fd23c49fc539ddcc1af32" +checksum = "2e5347777e9aacb56039b0e1f28785929a8a3b709e87482e7442c72e7c12529d" dependencies = [ "globset", "sha2 0.10.8", @@ -5077,27 +5172,21 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" - -[[package]] -name = "rustc-hash" -version = "1.1.0" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" +checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver", ] @@ -5113,36 +5202,22 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4eb579851244c2c03e7c24f501c3432bed80b8f720af1d6e5b0e0f01555a035" -dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys 0.3.8", - "windows-sys 0.48.0", -] - -[[package]] -name = "rustix" -version = "0.38.31" +version = "0.38.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" +checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.6.0", "errno", "libc", - "linux-raw-sys 0.4.12", - "windows-sys 0.52.0", + "linux-raw-sys", + "windows-sys 0.59.0", ] [[package]] name = "rustls" -version = "0.21.11" +version = "0.21.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", "ring 0.17.8", @@ -5152,47 +5227,35 @@ dependencies = [ [[package]] name = "rustls" -version = "0.22.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" -dependencies = [ - "log", - "ring 0.17.8", - "rustls-pki-types", - "rustls-webpki 0.102.5", - "subtle", - "zeroize", -] - -[[package]] -name = "rustls" -version = "0.23.11" +version = "0.23.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4828ea528154ae444e5a642dbb7d5623354030dc9822b83fd9bb79683c7399d0" +checksum = "5065c3f250cbd332cd894be57c40fa52387247659b14a2d6041d121547903b1b" dependencies = [ "once_cell", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.5", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] [[package]] name = "rustls-pemfile" -version = "2.1.2" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.7.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" +checksum = "d2bf47e6ff922db3825eb750c4e2ff784c6ff8fb9e13046ef6a1d1c5401b0b37" +dependencies = [ + "web-time 1.1.0", +] [[package]] name = "rustls-webpki" @@ -5206,9 +5269,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.5" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a6fccd794a42c2c105b513a2f62bc3fd8f3ba57a4593677ceb0bd035164d78" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -5217,9 +5280,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" [[package]] name = "rw-stream-sink" @@ -5233,9 +5296,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.15" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "salsa20" @@ -5257,11 +5320,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.22" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -5278,23 +5341,23 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sct" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.16.20", - "untrusted 0.7.1", + "ring 0.17.8", + "untrusted 0.9.0", ] [[package]] name = "sdp" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4653054c30ebce63658762eb0d64e27673868a95564474811ae6c220cf767640" +checksum = "13254db766b17451aced321e7397ebf0a446ef0c8d2942b6e67a95815421093f" dependencies = [ "rand 0.8.5", "substring", - "thiserror 1.0.63", + "thiserror 1.0.69", "url", ] @@ -5314,11 +5377,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.2" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", "core-foundation", "core-foundation-sys", "libc", @@ -5327,9 +5390,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "1863fd3768cd83c56a7f60faa4dc0d403f1b6df0a38c3c25f44b7894e45370d5" dependencies = [ "core-foundation-sys", "libc", @@ -5337,9 +5400,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.18" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" +checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" [[package]] name = "send_wrapper" @@ -5358,41 +5421,42 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.210" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.210" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", ] [[package]] name = "serde_json" -version = "1.0.117" +version = "1.0.134" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" +checksum = "d00f4175c42ee48b15416f6193a959ba3a0d67fc699a0db9ad12df9f83991c7d" dependencies = [ - "indexmap 2.2.1", + "indexmap 2.7.0", "itoa", + "memchr", "ryu", "serde", ] [[package]] name = "serde_path_to_error" -version = "0.1.14" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" dependencies = [ "itoa", "serde", @@ -5406,14 +5470,14 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", ] [[package]] name = "serde_spanned" -version = "0.6.5" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" dependencies = [ "serde", ] @@ -5432,9 +5496,9 @@ dependencies = [ [[package]] name = "sha1" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", "cpufeatures", @@ -5477,9 +5541,9 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.1.4" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ "lazy_static", ] @@ -5494,29 +5558,25 @@ dependencies = [ ] [[package]] -name = "signal-hook" -version = "0.3.17" +name = "shlex" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" -dependencies = [ - "libc", - "signal-hook-registry", -] +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] [[package]] name = "signature" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest 0.10.7", "rand_core 0.6.4", @@ -5524,9 +5584,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] @@ -5539,26 +5599,26 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "smol" -version = "1.3.0" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13f2b548cd8447f8de0fdf1c592929f70f4fc7039a05e47404b0d096ec6987a1" +checksum = "a33bd3e260892199c3ccfc487c88b2da2265080acb316cd920da72fdfd7c599f" dependencies = [ - "async-channel 1.9.0", + "async-channel 2.3.1", "async-executor", "async-fs", - "async-io 1.13.0", - "async-lock 2.7.0", + "async-io", + "async-lock", "async-net", "async-process", "blocking", - "futures-lite 1.13.0", + "futures-lite", ] [[package]] name = "smol_str" -version = "0.2.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74212e6bbe9a4352329b2f68ba3130c15a3f26fe88ff22dbdc6cdd58fa85e99c" +checksum = "dd538fb6910ac1099850255cf94a94df6551fbdd602454387d0adb2d1ca6dead" dependencies = [ "serde", ] @@ -5582,19 +5642,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "socket2" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" dependencies = [ "libc", "windows-sys 0.52.0", @@ -5602,9 +5652,9 @@ dependencies = [ [[package]] name = "soketto" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37468c595637c10857701c990f93a40ce0e357cedb0953d1c26c8d8027f9bb53" +checksum = "2e859df029d160cb88608f5d7df7fb4753fd20fdfb4de5644f3d8b8440841721" dependencies = [ "base64 0.22.1", "bytes", @@ -5629,14 +5679,20 @@ checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" [[package]] name = "spki" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", "der", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "static_assertions" version = "1.1.0" @@ -5668,30 +5724,30 @@ dependencies = [ [[package]] name = "strsim" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "strum" -version = "0.26.2" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" dependencies = [ "strum_macros", ] [[package]] name = "strum_macros" -version = "0.26.2" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ - "heck 0.4.1", + "heck", "proc-macro2", "quote", "rustversion", - "syn 2.0.89", + "syn 2.0.92", ] [[package]] @@ -5707,7 +5763,7 @@ dependencies = [ "rand 0.8.5", "ring 0.17.8", "subtle", - "thiserror 1.0.63", + "thiserror 1.0.69", "tokio", "url", "webrtc-util 0.8.1", @@ -5715,21 +5771,21 @@ dependencies = [ [[package]] name = "stun" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28fad383a1cc63ae141e84e48eaef44a1063e9d9e55bcb8f51a99b886486e01b" +checksum = "ea256fb46a13f9204e9dee9982997b2c3097db175a9fddaa8350310d03c4d5a3" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "crc", "lazy_static", "md-5", "rand 0.8.5", "ring 0.17.8", "subtle", - "thiserror 1.0.63", + "thiserror 1.0.69", "tokio", "url", - "webrtc-util 0.9.0", + "webrtc-util 0.10.0", ] [[package]] @@ -5743,9 +5799,9 @@ dependencies = [ [[package]] name = "subtle" -version = "2.5.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" @@ -5760,9 +5816,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.89" +version = "2.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" +checksum = "70ae51629bf965c5c098cc9e87908a3df5301051a9e087d6f9bef5c9771ed126" dependencies = [ "proc-macro2", "quote", @@ -5771,15 +5827,12 @@ dependencies = [ [[package]] name = "sync_wrapper" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" - -[[package]] -name = "sync_wrapper" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "384595c11a4e2969895cad5a8c4029115f5ab956a9e5ef4de79d11a426e5f20c" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] [[package]] name = "synstructure" @@ -5801,62 +5854,74 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", ] [[package]] name = "sysinfo" -version = "0.30.12" +version = "0.33.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "732ffa00f53e6b2af46208fba5718d9662a421049204e156328b66791ffa15ae" +checksum = "4fc858248ea01b66f19d8e8a6d55f41deaf91e9d495246fd01368d99935c6c01" dependencies = [ - "cfg-if", "core-foundation-sys", "libc", + "memchr", "ntapi", - "once_cell", "rayon", - "windows 0.52.0", + "windows 0.57.0", ] [[package]] name = "system-configuration" -version = "0.5.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", "core-foundation", "system-configuration-sys", ] [[package]] name = "system-configuration-sys" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" dependencies = [ "core-foundation-sys", "libc", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + +[[package]] +name = "target-triple" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42a4d50cdb458045afc8131fd91b64904da29548bcb63c7236e0844936c13078" + [[package]] name = "tempfile" -version = "3.10.1" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" dependencies = [ "cfg-if", - "fastrand 2.0.1", - "rustix 0.38.31", - "windows-sys 0.52.0", + "fastrand", + "once_cell", + "rustix", + "windows-sys 0.59.0", ] [[package]] name = "termcolor" -version = "1.2.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] @@ -5870,8 +5935,8 @@ dependencies = [ "async-trait", "base64 0.22.1", "futures", - "http 1.1.0", - "indexmap 2.2.1", + "http 1.2.0", + "indexmap 2.7.0", "parking_lot", "paste", "reqwest", @@ -5881,7 +5946,7 @@ dependencies = [ "stringmatch", "strum", "thirtyfour-macros", - "thiserror 1.0.63", + "thiserror 1.0.69", "tokio", "tracing", "url", @@ -5895,54 +5960,54 @@ checksum = "b72d056365e368fc57a56d0cec9e41b02fb4a3474a61c8735262b1cfebe67425" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", ] [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ - "thiserror-impl 1.0.63", + "thiserror-impl 1.0.69", ] [[package]] name = "thiserror" -version = "2.0.3" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +checksum = "f072643fd0190df67a8bab670c20ef5d8737177d6ac6b2e9a236cb096206b2cc" dependencies = [ - "thiserror-impl 2.0.3", + "thiserror-impl 2.0.9", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", ] [[package]] name = "thiserror-impl" -version = "2.0.3" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" +checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", ] [[package]] name = "thread_local" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ "cfg-if", "once_cell", @@ -5972,9 +6037,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.36" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" dependencies = [ "deranged", "itoa", @@ -5993,14 +6058,24 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" dependencies = [ "num-conv", "time-core", ] +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinytemplate" version = "1.2.1" @@ -6013,9 +6088,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.6.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" dependencies = [ "tinyvec_macros", ] @@ -6028,32 +6103,31 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.38.0" +version = "1.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" +checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" dependencies = [ "backtrace", "bytes", "libc", "mio", - "num_cpus", "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.7", + "socket2", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", ] [[package]] @@ -6068,20 +6142,19 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.25.0" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" dependencies = [ - "rustls 0.22.4", - "rustls-pki-types", + "rustls 0.23.20", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" dependencies = [ "futures-core", "pin-project-lite", @@ -6090,9 +6163,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" dependencies = [ "bytes", "futures-core", @@ -6104,9 +6177,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.11" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af06656561d28735e9c1cd63dfd57132c8155426aa6af24f36a00a351f88c48e" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" dependencies = [ "serde", "serde_spanned", @@ -6116,20 +6189,20 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.5" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.22.7" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18769cd1cec395d70860ceb4d932812a0b4d06b1a4bb336745a4d21b9496e992" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.2.1", + "indexmap 2.7.0", "serde", "serde_spanned", "toml_datetime", @@ -6148,7 +6221,7 @@ dependencies = [ "base64 0.22.1", "bytes", "h2", - "http 1.1.0", + "http 1.2.0", "http-body", "http-body-util", "hyper", @@ -6157,10 +6230,10 @@ dependencies = [ "percent-encoding", "pin-project", "prost", - "socket2 0.5.7", + "socket2", "tokio", "tokio-stream", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -6186,16 +6259,32 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower-http" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.6.0", "bytes", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body", "http-body-util", "http-range-header", @@ -6213,21 +6302,21 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "log", "pin-project-lite", @@ -6237,20 +6326,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", ] [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", "valuable", @@ -6287,14 +6376,14 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.26.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eabc56d23707ad55ba2a0750fc24767125d5a0f51993ba41ad2c441cc7b8dea" +checksum = "97a971f6058498b5c0f1affa23e7ea202057a7301dbff68e968b2d578bcbd053" dependencies = [ "js-sys", "once_cell", - "opentelemetry 0.25.0", - "opentelemetry_sdk 0.25.0", + "opentelemetry 0.27.1", + "opentelemetry_sdk 0.27.1", "smallvec", "tracing", "tracing-core", @@ -6305,9 +6394,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ "matchers", "nu-ansi-term", @@ -6332,31 +6421,38 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "triomphe" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "859eb650cfee7434994602c3a68b25d77ad9e68c8a6cd491616ef86661382eb3" + [[package]] name = "try-lock" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "trybuild" -version = "1.0.96" +version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33a5f13f11071020bb12de7a16b925d2d58636175c20c11dc5f96cb64bb6c9b3" +checksum = "8dcd332a5496c026f1e14b7f3d2b7bd98e509660c04239c58b0ba38a12daded4" dependencies = [ "glob", "serde", "serde_derive", "serde_json", + "target-triple", "termcolor", "toml", ] [[package]] name = "turn" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58f4fcb97da0426e8146fe0e9b78cc13120161087256198701d12d9df77f7701" +checksum = "ffb2ac4f331064513ad510b7a36edc0df555bd61672986607f7c9ff46f98f415" dependencies = [ "async-trait", "base64 0.21.7", @@ -6364,24 +6460,25 @@ dependencies = [ "log", "md-5", "rand 0.8.5", - "ring 0.16.20", + "ring 0.17.8", "stun 0.5.1", - "thiserror 1.0.63", + "thiserror 1.0.69", "tokio", + "tokio-util", "webrtc-util 0.8.1", ] [[package]] name = "typenum" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "uint" -version = "0.9.5" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" +checksum = "909988d098b2f738727b161a106cfc7cab00c539c2687a8836f8e565976fb53e" dependencies = [ "byteorder", "crunchy", @@ -6391,39 +6488,21 @@ dependencies = [ [[package]] name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - -[[package]] -name = "unicode-bidi" -version = "0.3.13" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" - -[[package]] -name = "unicode-normalization" -version = "0.1.22" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" -dependencies = [ - "tinyvec", -] +checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" [[package]] name = "unicode-xid" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "universal-hash" @@ -6435,12 +6514,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "unsigned-varint" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" - [[package]] name = "unsigned-varint" version = "0.8.0" @@ -6475,12 +6548,12 @@ dependencies = [ [[package]] name = "url" -version = "2.5.2" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", - "idna 0.5.0", + "idna", "percent-encoding", ] @@ -6490,17 +6563,29 @@ version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.4.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" +checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" dependencies = [ "getrandom 0.2.15", ] @@ -6513,9 +6598,9 @@ checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "value-bag" -version = "1.7.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126e423afe2dd9ac52142e7e9d5ce4135d7e13776c529d27fd6bc49f19e3280b" +checksum = "3ef4c4aa54d5d05a279399bfa921ec387b7aba77caf7a682ae8d86785b8fdad2" [[package]] name = "vcpkg" @@ -6525,9 +6610,9 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "waitgroup" @@ -6538,17 +6623,11 @@ dependencies = [ "atomic-waker", ] -[[package]] -name = "waker-fn" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" - [[package]] name = "walkdir" -version = "2.3.3" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", @@ -6577,9 +6656,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.93" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396" dependencies = [ "cfg-if", "once_cell", @@ -6588,36 +6667,36 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.93" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79" dependencies = [ "bumpalo", "log", - "once_cell", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.43" +version = "0.4.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" +checksum = "38176d9b44ea84e9184eff0bc34cc167ed044f816accfe5922e54d84cf48eca2" dependencies = [ "cfg-if", "js-sys", + "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.93" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6625,30 +6704,29 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.93" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.93" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" +checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6" [[package]] name = "wasm-bindgen-test" -version = "0.3.43" +version = "0.3.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68497a05fb21143a08a7d24fc81763384a3072ee43c44e86aad1744d6adef9d9" +checksum = "c61d44563646eb934577f2772656c7ad5e9c90fac78aa8013d776fcdaf24625d" dependencies = [ - "console_error_panic_hook", "js-sys", "minicov", "scoped-tls", @@ -6659,13 +6737,13 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.43" +version = "0.3.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b8220be1fa9e4c889b30fd207d4906657e7e90b12e0e6b0c8b8d8709f5de021" +checksum = "54171416ce73aa0b9c377b51cc3cb542becee1cd678204812e8392e5b0e4a031" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", ] [[package]] @@ -6681,9 +6759,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.70" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" +checksum = "04dd7223427d52553d3702c004d3b2fe07c148165faa56313cb00211e31c12bc" dependencies = [ "js-sys", "wasm-bindgen", @@ -6711,15 +6789,15 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.25.2" +version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" +checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "webpki-roots" -version = "0.26.1" +version = "0.26.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3de34ae270483955a94f4b21bdaaeb83d508bb84a01435f393818edb0012009" +checksum = "5d642ff16b7e79272ae451b7322067cdc17cadf68c23264be9d94a32319efe7e" dependencies = [ "rustls-pki-types", ] @@ -6744,15 +6822,15 @@ dependencies = [ "regex", "ring 0.16.20", "rtcp", - "rtp", - "rustls 0.21.11", + "rtp 0.9.0", + "rustls 0.21.12", "sdp", "serde", "serde_json", "sha2 0.10.8", "smol_str", "stun 0.5.1", - "thiserror 1.0.63", + "thiserror 1.0.69", "time", "tokio", "turn", @@ -6770,13 +6848,13 @@ dependencies = [ [[package]] name = "webrtc-data" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a45d2461d0e0bf93f181e30eb0b40df32b8bf3efb89c53cebb1990e603e2067d" +checksum = "e8c08e648e10572b9edbe741074e0f4d3cb221aa7cdf9a814ee71606de312f33" dependencies = [ "bytes", "log", - "thiserror 1.0.63", + "thiserror 1.0.69", "tokio", "webrtc-sctp", "webrtc-util 0.8.1", @@ -6806,13 +6884,13 @@ dependencies = [ "rand_core 0.6.4", "rcgen", "ring 0.16.20", - "rustls 0.21.11", + "rustls 0.21.12", "sec1", "serde", "sha1", "sha2 0.10.8", "subtle", - "thiserror 1.0.63", + "thiserror 1.0.69", "tokio", "webrtc-util 0.8.1", "x25519-dalek", @@ -6833,7 +6911,7 @@ dependencies = [ "serde", "serde_json", "stun 0.5.1", - "thiserror 1.0.63", + "thiserror 1.0.69", "tokio", "turn", "url", @@ -6845,35 +6923,35 @@ dependencies = [ [[package]] name = "webrtc-mdns" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62bebbd40e7f8b630a0f1a74783dbfff1edfc0ccaae891c4689891156a8c4d8c" +checksum = "ce981f93104a8debb3563bb0cedfe4aa2f351fdf6b53f346ab50009424125c08" dependencies = [ "log", - "socket2 0.5.7", - "thiserror 1.0.63", + "socket2", + "thiserror 1.0.69", "tokio", "webrtc-util 0.8.1", ] [[package]] name = "webrtc-media" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfde3c7b9450b67d466bb2f02c6d9ff9514d33535eb9994942afd1f828839d1" +checksum = "280017b6b9625ef7329146332518b339c3cceff231cc6f6a9e0e6acab25ca4af" dependencies = [ "byteorder", "bytes", "rand 0.8.5", - "rtp", - "thiserror 1.0.63", + "rtp 0.10.0", + "thiserror 1.0.69", ] [[package]] name = "webrtc-sctp" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1af6116b7f9703560c3ad0b32f67220b171bb1b59633b03563db8404d0e482ea" +checksum = "df75ec042002fe995194712cbeb2029107a60a7eab646f1b789eb1be94d0e367" dependencies = [ "arc-swap", "async-trait", @@ -6881,7 +6959,7 @@ dependencies = [ "crc", "log", "rand 0.8.5", - "thiserror 1.0.63", + "thiserror 1.0.69", "tokio", "webrtc-util 0.8.1", ] @@ -6901,10 +6979,10 @@ dependencies = [ "hmac 0.12.1", "log", "rtcp", - "rtp", + "rtp 0.9.0", "sha1", "subtle", - "thiserror 1.0.63", + "thiserror 1.0.69", "tokio", "webrtc-util 0.8.1", ] @@ -6922,18 +7000,18 @@ dependencies = [ "lazy_static", "libc", "log", - "nix 0.26.4", + "nix", "rand 0.8.5", - "thiserror 1.0.63", + "thiserror 1.0.69", "tokio", "winapi", ] [[package]] name = "webrtc-util" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc8d9bc631768958ed97b8d68b5d301e63054ae90b09083d43e2fefb939fd77e" +checksum = "1438a8fd0d69c5775afb4a71470af92242dbd04059c61895163aa3c1ef933375" dependencies = [ "async-trait", "bitflags 1.3.2", @@ -6942,10 +7020,10 @@ dependencies = [ "lazy_static", "libc", "log", - "nix 0.26.4", + "nix", "portable-atomic", "rand 0.8.5", - "thiserror 1.0.63", + "thiserror 1.0.69", "tokio", "winapi", ] @@ -6970,9 +7048,9 @@ dependencies = [ [[package]] name = "widestring" -version = "1.0.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" +checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" [[package]] name = "winapi" @@ -6992,11 +7070,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.5" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "winapi", + "windows-sys 0.59.0", ] [[package]] @@ -7007,40 +7085,105 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.51.1" +version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" +checksum = "efc5cf48f83140dcaab716eeaea345f9e93d0018fb81162753a3f76c3397b538" dependencies = [ - "windows-core 0.51.1", - "windows-targets 0.48.5", + "windows-core 0.53.0", + "windows-targets 0.52.6", ] [[package]] name = "windows" -version = "0.52.0" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" +checksum = "12342cb4d8e3b046f3d80effd474a7a02447231330ef77d71daa6fbc40681143" dependencies = [ - "windows-core 0.52.0", - "windows-targets 0.52.0", + "windows-core 0.57.0", + "windows-targets 0.52.6", ] [[package]] name = "windows-core" -version = "0.51.1" +version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" +checksum = "9dcc5b895a6377f1ab9fa55acedab1fd5ac0db66ad1e6c7f47e28a22e446a5dd" dependencies = [ - "windows-targets 0.48.5", + "windows-result 0.1.2", + "windows-targets 0.52.6", ] [[package]] name = "windows-core" -version = "0.52.0" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-result 0.1.2", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-implement" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.92", +] + +[[package]] +name = "windows-interface" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.92", +] + +[[package]] +name = "windows-registry" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" dependencies = [ - "windows-targets 0.52.0", + "windows-result 0.2.0", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result 0.2.0", + "windows-targets 0.52.6", ] [[package]] @@ -7058,7 +7201,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", ] [[package]] @@ -7078,17 +7230,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -7099,9 +7252,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -7111,9 +7264,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -7123,9 +7276,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -7135,9 +7294,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -7147,9 +7306,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -7159,9 +7318,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -7171,15 +7330,15 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.5" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dffa400e67ed5a4dd237983829e66475f0a4a26938c4b04c21baede6262215b8" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -7195,14 +7354,16 @@ dependencies = [ ] [[package]] -name = "winreg" -version = "0.52.0" +name = "write16" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" [[package]] name = "x25519-dalek" @@ -7230,7 +7391,7 @@ dependencies = [ "oid-registry 0.6.1", "ring 0.16.20", "rusticata-macros", - "thiserror 1.0.63", + "thiserror 1.0.69", "time", ] @@ -7240,22 +7401,22 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" dependencies = [ - "asn1-rs 0.6.1", + "asn1-rs 0.6.2", "data-encoding", "der-parser 9.0.0", "lazy_static", "nom", - "oid-registry 0.7.0", + "oid-registry 0.7.1", "rusticata-macros", - "thiserror 1.0.63", + "thiserror 1.0.69", "time", ] [[package]] name = "xml-rs" -version = "0.8.17" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1eee6bf5926be7cf998d7381a9a23d833fd493f6a8034658a9505a4dc4b20444" +checksum = "ea8b391c9a790b496184c29f7f93b9ed5b16abb306c05415b68bcc16e4d06432" [[package]] name = "xmltree" @@ -7283,9 +7444,9 @@ dependencies = [ [[package]] name = "yamux" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31b5e376a8b012bee9c423acdbb835fc34d45001cfa3106236a624e4b738028" +checksum = "17610762a1207ee816c6fadc29220904753648aba0a9ed61c7b8336e80a559c4" dependencies = [ "futures", "log", @@ -7306,24 +7467,70 @@ dependencies = [ "time", ] +[[package]] +name = "yoke" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.92", + "synstructure 0.13.1", +] + [[package]] name = "zerocopy" -version = "0.7.32" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ + "byteorder", "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.32" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", +] + +[[package]] +name = "zerofrom" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.92", + "synstructure 0.13.1", ] [[package]] @@ -7343,5 +7550,27 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.92", +] + +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.92", ] diff --git a/Cargo.toml b/Cargo.toml index b631b587dee..bd793d4f2c1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,6 +30,7 @@ members = [ "misc/quickcheck-ext", "misc/rw-stream-sink", "misc/server", + "misc/test-utils", "misc/webrtc-utils", "muxers/mplex", "muxers/test-harness", @@ -69,35 +70,35 @@ members = [ resolver = "2" [workspace.package] -rust-version = "1.75.0" +rust-version = "1.83.0" [workspace.dependencies] -libp2p = { version = "0.54.2", path = "libp2p" } -libp2p-allow-block-list = { version = "0.4.2", path = "misc/allow-block-list" } +libp2p = { version = "0.55.0", path = "libp2p" } +libp2p-allow-block-list = { version = "0.4.1", path = "misc/allow-block-list" } libp2p-autonat = { version = "0.13.1", path = "protocols/autonat" } libp2p-connection-limits = { version = "0.4.1", path = "misc/connection-limits" } libp2p-core = { version = "0.42.1", path = "core" } libp2p-dcutr = { version = "0.12.1", path = "protocols/dcutr" } -libp2p-dns = { version = "0.42.0", path = "transports/dns" } +libp2p-dns = { version = "0.42.1", path = "transports/dns" } libp2p-floodsub = { version = "0.45.0", path = "protocols/floodsub" } libp2p-gossipsub = { version = "0.48.0", path = "protocols/gossipsub" } libp2p-identify = { version = "0.46.0", path = "protocols/identify" } libp2p-identity = { version = "0.2.10" } libp2p-kad = { version = "0.47.0", path = "protocols/kad" } -libp2p-mdns = { version = "0.46.0", path = "protocols/mdns" } +libp2p-mdns = { version = "0.46.1", path = "protocols/mdns" } libp2p-memory-connection-limits = { version = "0.3.1", path = "misc/memory-connection-limits" } libp2p-metrics = { version = "0.15.0", path = "misc/metrics" } libp2p-mplex = { version = "0.42.0", path = "muxers/mplex" } -libp2p-noise = { version = "0.45.0", path = "transports/noise" } +libp2p-noise = { version = "0.45.1", path = "transports/noise" } libp2p-perf = { version = "0.4.0", path = "protocols/perf" } libp2p-ping = { version = "0.45.1", path = "protocols/ping" } libp2p-plaintext = { version = "0.42.0", path = "transports/plaintext" } libp2p-pnet = { version = "0.25.0", path = "transports/pnet" } libp2p-quic = { version = "0.11.2", path = "transports/quic" } libp2p-relay = { version = "0.18.1", path = "protocols/relay" } -libp2p-rendezvous = { version = "0.15.0", path = "protocols/rendezvous" } -libp2p-request-response = { version = "0.27.1", path = "protocols/request-response" } -libp2p-server = { version = "0.12.8", path = "misc/server" } +libp2p-rendezvous = { version = "0.15.1", path = "protocols/rendezvous" } +libp2p-request-response = { version = "0.28.0", path = "protocols/request-response" } +libp2p-server = { version = "0.12.6", path = "misc/server" } libp2p-stream = { version = "0.2.0-alpha.1", path = "protocols/stream" } libp2p-swarm = { version = "0.45.2", path = "swarm" } libp2p-swarm-derive = { version = "=0.35.0", path = "swarm-derive" } # `libp2p-swarm-derive` may not be compatible with different `libp2p-swarm` non-breaking releases. E.g. `libp2p-swarm` might introduce a new enum variant `FromSwarm` (which is `#[non-exhaustive]`) in a non-breaking release. Older versions of `libp2p-swarm-derive` would not forward this enum variant within the `NetworkBehaviour` hierarchy. Thus the version pinning is required. @@ -111,14 +112,20 @@ libp2p-webrtc-utils = { version = "0.3.0", path = "misc/webrtc-utils" } libp2p-webrtc-websys = { version = "0.4.0-alpha.2", path = "transports/webrtc-websys" } libp2p-websocket = { version = "0.44.1", path = "transports/websocket" } libp2p-websocket-websys = { version = "0.4.1", path = "transports/websocket-websys" } -libp2p-webtransport-websys = { version = "0.4.0", path = "transports/webtransport-websys" } +libp2p-webtransport-websys = { version = "0.4.1", path = "transports/webtransport-websys" } libp2p-yamux = { version = "0.46.0", path = "muxers/yamux" } +libp2p-test-utils = { version = "0.1.0", path = "misc/test-utils" } # External dependencies +async-std-resolver = { version = "0.25.0-alpha.4", default-features = false } asynchronous-codec = { version = "0.7.0" } +env_logger = "0.11" futures = "0.3.30" futures-bounded = { version = "0.2.4" } futures-rustls = { version = "0.26.0", default-features = false } +getrandom = "0.2" +hickory-proto = { version = "0.25.0-alpha.4", default-features = false } +hickory-resolver = { version = "0.25.0-alpha.4", default-features = false } multiaddr = "0.18.1" multihash = "0.19.1" multistream-select = { version = "0.13.0", path = "misc/multistream-select" } @@ -130,10 +137,11 @@ ring = "0.17.8" rw-stream-sink = { version = "0.4.0", path = "misc/rw-stream-sink" } thiserror = "2" tokio = { version = "1.38", default-features = false } -tracing = "0.1.37" -tracing-subscriber = "0.3" +tracing = "0.1.41" +tracing-subscriber = "0.3.19" unsigned-varint = { version = "0.8.0" } web-time = "1.1.0" +hashlink = "0.9.0" [patch.crates-io] diff --git a/README.md b/README.md index d818c6ba7b4..24e88f62751 100644 --- a/README.md +++ b/README.md @@ -97,6 +97,7 @@ Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). - [OpenMina](https://github.com/openmina/openmina) - In-browser Mina Rust implementation. - [rust-ipfs](https://github.com/rs-ipfs/rust-ipfs) - IPFS implementation in Rust. - [Safe Network](https://github.com/maidsafe/safe_network) - Safe Network implementation in Rust. +- [SQD Network](https://github.com/subsquid/sqd-network) - A decentralized storage for Web3 data. - [Starcoin](https://github.com/starcoinorg/starcoin) - A smart contract blockchain network that scales by layering. - [Subspace](https://github.com/subspace/subspace) - Subspace Network reference implementation - [Substrate](https://github.com/paritytech/substrate) - Framework for blockchain innovation, diff --git a/ROADMAP.md b/ROADMAP.md index 0d422a6d385..a8df8242730 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -61,7 +61,7 @@ The project supports Wasm already today, though the developer experience is cumb Properly supporting Wasm opens rust-libp2p to a whole new set of use-cases. I would love for this to happen earlier. Though (a) I think we should prioritize improving existing functionality over new functionality and (b) we don't have high demand for this feature from the community. -(One could argue that that demand follows this roadmap item and not the other way round.) +(One could argue that the demand follows this roadmap item and not the other way round.) ### WebRTC in the browser via WASM diff --git a/core/Cargo.toml b/core/Cargo.toml index 8ec0b0fc197..162800b96c2 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -26,8 +26,6 @@ pin-project = "1.1.5" quick-protobuf = "0.8" rand = "0.8" rw-stream-sink = { workspace = true } -serde = { version = "1", optional = true, features = ["derive"] } -smallvec = "1.13.2" thiserror = { workspace = true } tracing = { workspace = true } unsigned-varint = { workspace = true } @@ -37,11 +35,10 @@ async-std = { version = "1.6.2", features = ["attributes"] } libp2p-mplex = { path = "../muxers/mplex" } # Using `path` here because this is a cyclic dev-dependency which otherwise breaks releasing. libp2p-noise = { path = "../transports/noise" } # Using `path` here because this is a cyclic dev-dependency which otherwise breaks releasing. multihash = { workspace = true, features = ["arb"] } -quickcheck = { workspace = true } libp2p-identity = { workspace = true, features = ["ed25519", "rand"] } [features] -serde = ["multihash/serde-codec", "dep:serde", "libp2p-identity/serde"] +serde = ["multihash/serde-codec", "libp2p-identity/serde"] # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/core/src/connection.rs b/core/src/connection.rs index bb6639842c9..8779f76c03c 100644 --- a/core/src/connection.rs +++ b/core/src/connection.rs @@ -70,18 +70,16 @@ pub enum ConnectedPoint { /// /// - [`Endpoint::Dialer`] represents the default non-overriding option. /// - /// - [`Endpoint::Listener`] represents the overriding option. - /// Realization depends on the transport protocol. E.g. in the case of - /// TCP, both endpoints dial each other, resulting in a _simultaneous - /// open_ TCP connection. On this new connection both endpoints assume - /// to be the dialer of the connection. This is problematic during the - /// connection upgrade process where an upgrade assumes one side to be - /// the listener. With the help of this option, both peers can - /// negotiate the roles (dialer and listener) for the new connection - /// ahead of time, through some external channel, e.g. the DCUtR - /// protocol, and thus have one peer dial the other and upgrade the - /// connection as a dialer and one peer dial the other and upgrade the - /// connection _as a listener_ overriding its role. + /// - [`Endpoint::Listener`] represents the overriding option. Realization depends on the + /// transport protocol. E.g. in the case of TCP, both endpoints dial each other, + /// resulting in a _simultaneous open_ TCP connection. On this new connection both + /// endpoints assume to be the dialer of the connection. This is problematic during the + /// connection upgrade process where an upgrade assumes one side to be the listener. With + /// the help of this option, both peers can negotiate the roles (dialer and listener) for + /// the new connection ahead of time, through some external channel, e.g. the DCUtR + /// protocol, and thus have one peer dial the other and upgrade the connection as a + /// dialer and one peer dial the other and upgrade the connection _as a listener_ + /// overriding its role. role_override: Endpoint, /// Whether the port for the outgoing connection was reused from a listener /// or a new port was allocated. This is useful for address translation. @@ -123,28 +121,18 @@ impl ConnectedPoint { /// Returns true if we are `Dialer`. pub fn is_dialer(&self) -> bool { - match self { - ConnectedPoint::Dialer { .. } => true, - ConnectedPoint::Listener { .. } => false, - } + matches!(self, ConnectedPoint::Dialer { .. }) } /// Returns true if we are `Listener`. pub fn is_listener(&self) -> bool { - match self { - ConnectedPoint::Dialer { .. } => false, - ConnectedPoint::Listener { .. } => true, - } + matches!(self, ConnectedPoint::Listener { .. }) } /// Returns true if the connection is relayed. pub fn is_relayed(&self) -> bool { match self { - ConnectedPoint::Dialer { - address, - role_override: _, - port_use: _, - } => address, + ConnectedPoint::Dialer { address, .. } => address, ConnectedPoint::Listener { local_addr, .. } => local_addr, } .iter() diff --git a/core/src/either.rs b/core/src/either.rs index 2593174290c..aa0340a46bf 100644 --- a/core/src/either.rs +++ b/core/src/either.rs @@ -18,17 +18,20 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::muxing::StreamMuxerEvent; -use crate::transport::DialOpts; -use crate::{ - muxing::StreamMuxer, - transport::{ListenerId, Transport, TransportError, TransportEvent}, - Multiaddr, +use std::{ + pin::Pin, + task::{Context, Poll}, }; + use either::Either; use futures::prelude::*; use pin_project::pin_project; -use std::{pin::Pin, task::Context, task::Poll}; + +use crate::{ + muxing::{StreamMuxer, StreamMuxerEvent}, + transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}, + Multiaddr, +}; impl StreamMuxer for future::Either where diff --git a/core/src/lib.rs b/core/src/lib.rs index ab5afbedae4..bbe42adc26a 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -22,14 +22,12 @@ //! //! The main concepts of libp2p-core are: //! -//! - The [`Transport`] trait defines how to reach a remote node or listen for -//! incoming remote connections. See the [`transport`] module. -//! - The [`StreamMuxer`] trait is implemented on structs that hold a connection -//! to a remote and can subdivide this connection into multiple substreams. -//! See the [`muxing`] module. -//! - The [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`] traits -//! define how to upgrade each individual substream to use a protocol. -//! See the `upgrade` module. +//! - The [`Transport`] trait defines how to reach a remote node or listen for incoming remote +//! connections. See the [`transport`] module. +//! - The [`StreamMuxer`] trait is implemented on structs that hold a connection to a remote and can +//! subdivide this connection into multiple substreams. See the [`muxing`] module. +//! - The [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`] traits define how to upgrade +//! each individual substream to use a protocol. See the `upgrade` module. #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] @@ -37,7 +35,8 @@ mod proto { #![allow(unreachable_pub)] include!("generated/mod.rs"); pub use self::{ - envelope_proto::*, peer_record_proto::mod_PeerRecord::*, peer_record_proto::PeerRecord, + envelope_proto::*, + peer_record_proto::{mod_PeerRecord::*, PeerRecord}, }; } diff --git a/core/src/muxing.rs b/core/src/muxing.rs index 477e1608073..60062f899f9 100644 --- a/core/src/muxing.rs +++ b/core/src/muxing.rs @@ -24,7 +24,7 @@ //! has ownership of a connection, lets you open and close substreams. //! //! > **Note**: You normally don't need to use the methods of the `StreamMuxer` directly, as this -//! > is managed by the library's internals. +//! > is managed by the library's internals. //! //! Each substream of a connection is an isolated stream of data. All the substreams are muxed //! together so that the data read from or written to each substream doesn't influence the other @@ -36,9 +36,9 @@ //! require maintaining long-lived channels of communication. //! //! > **Example**: The Kademlia protocol opens a new substream for each request it wants to -//! > perform. Multiple requests can be performed simultaneously by opening multiple -//! > substreams, without having to worry about associating responses with the -//! > right request. +//! > perform. Multiple requests can be performed simultaneously by opening multiple +//! > substreams, without having to worry about associating responses with the +//! > right request. //! //! # Implementing a muxing protocol //! @@ -50,21 +50,23 @@ //! The upgrade process will take ownership of the connection, which makes it possible for the //! implementation of `StreamMuxer` to control everything that happens on the wire. -use futures::{task::Context, task::Poll, AsyncRead, AsyncWrite}; +use std::{future::Future, pin::Pin}; + +use futures::{ + task::{Context, Poll}, + AsyncRead, AsyncWrite, +}; use multiaddr::Multiaddr; -use std::future::Future; -use std::pin::Pin; -pub use self::boxed::StreamMuxerBox; -pub use self::boxed::SubstreamBox; +pub use self::boxed::{StreamMuxerBox, SubstreamBox}; mod boxed; /// Provides multiplexing for a connection by allowing users to open substreams. /// -/// A substream created by a [`StreamMuxer`] is a type that implements [`AsyncRead`] and [`AsyncWrite`]. -/// The [`StreamMuxer`] itself is modelled closely after [`AsyncWrite`]. It features `poll`-style -/// functions that allow the implementation to make progress on various tasks. +/// A substream created by a [`StreamMuxer`] is a type that implements [`AsyncRead`] and +/// [`AsyncWrite`]. The [`StreamMuxer`] itself is modelled closely after [`AsyncWrite`]. It features +/// `poll`-style functions that allow the implementation to make progress on various tasks. pub trait StreamMuxer { /// Type of the object that represents the raw substream where data can be read and written. type Substream: AsyncRead + AsyncWrite; @@ -90,13 +92,13 @@ pub trait StreamMuxer { /// Poll to close this [`StreamMuxer`]. /// - /// After this has returned `Poll::Ready(Ok(()))`, the muxer has become useless and may be safely - /// dropped. + /// After this has returned `Poll::Ready(Ok(()))`, the muxer has become useless and may be + /// safely dropped. /// /// > **Note**: You are encouraged to call this method and wait for it to return `Ready`, so - /// > that the remote is properly informed of the shutdown. However, apart from - /// > properly informing the remote, there is no difference between this and - /// > immediately dropping the muxer. + /// > that the remote is properly informed of the shutdown. However, apart from + /// > properly informing the remote, there is no difference between this and + /// > immediately dropping the muxer. fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; /// Poll to allow the underlying connection to make progress. @@ -120,7 +122,8 @@ pub enum StreamMuxerEvent { /// Extension trait for [`StreamMuxer`]. pub trait StreamMuxerExt: StreamMuxer + Sized { - /// Convenience function for calling [`StreamMuxer::poll_inbound`] for [`StreamMuxer`]s that are `Unpin`. + /// Convenience function for calling [`StreamMuxer::poll_inbound`] + /// for [`StreamMuxer`]s that are `Unpin`. fn poll_inbound_unpin( &mut self, cx: &mut Context<'_>, @@ -131,7 +134,8 @@ pub trait StreamMuxerExt: StreamMuxer + Sized { Pin::new(self).poll_inbound(cx) } - /// Convenience function for calling [`StreamMuxer::poll_outbound`] for [`StreamMuxer`]s that are `Unpin`. + /// Convenience function for calling [`StreamMuxer::poll_outbound`] + /// for [`StreamMuxer`]s that are `Unpin`. fn poll_outbound_unpin( &mut self, cx: &mut Context<'_>, @@ -142,7 +146,8 @@ pub trait StreamMuxerExt: StreamMuxer + Sized { Pin::new(self).poll_outbound(cx) } - /// Convenience function for calling [`StreamMuxer::poll`] for [`StreamMuxer`]s that are `Unpin`. + /// Convenience function for calling [`StreamMuxer::poll`] + /// for [`StreamMuxer`]s that are `Unpin`. fn poll_unpin(&mut self, cx: &mut Context<'_>) -> Poll> where Self: Unpin, @@ -150,7 +155,8 @@ pub trait StreamMuxerExt: StreamMuxer + Sized { Pin::new(self).poll(cx) } - /// Convenience function for calling [`StreamMuxer::poll_close`] for [`StreamMuxer`]s that are `Unpin`. + /// Convenience function for calling [`StreamMuxer::poll_close`] + /// for [`StreamMuxer`]s that are `Unpin`. fn poll_close_unpin(&mut self, cx: &mut Context<'_>) -> Poll> where Self: Unpin, diff --git a/core/src/muxing/boxed.rs b/core/src/muxing/boxed.rs index e909fb9fbf1..8e76c32b73e 100644 --- a/core/src/muxing/boxed.rs +++ b/core/src/muxing/boxed.rs @@ -1,12 +1,15 @@ -use crate::muxing::{StreamMuxer, StreamMuxerEvent}; +use std::{ + error::Error, + fmt, io, + io::{IoSlice, IoSliceMut}, + pin::Pin, + task::{Context, Poll}, +}; + use futures::{AsyncRead, AsyncWrite}; use pin_project::pin_project; -use std::error::Error; -use std::fmt; -use std::io; -use std::io::{IoSlice, IoSliceMut}; -use std::pin::Pin; -use std::task::{Context, Poll}; + +use crate::muxing::{StreamMuxer, StreamMuxerEvent}; /// Abstract `StreamMuxer`. pub struct StreamMuxerBox { @@ -139,7 +142,8 @@ impl StreamMuxer for StreamMuxerBox { } impl SubstreamBox { - /// Construct a new [`SubstreamBox`] from something that implements [`AsyncRead`] and [`AsyncWrite`]. + /// Construct a new [`SubstreamBox`] from something + /// that implements [`AsyncRead`] and [`AsyncWrite`]. pub fn new(stream: S) -> Self { Self(Box::pin(stream)) } diff --git a/core/src/peer_record.rs b/core/src/peer_record.rs index ac488338cc6..9c6b7f73f05 100644 --- a/core/src/peer_record.rs +++ b/core/src/peer_record.rs @@ -1,18 +1,16 @@ -use crate::signed_envelope::SignedEnvelope; -use crate::{proto, signed_envelope, DecodeError, Multiaddr}; -use libp2p_identity::Keypair; -use libp2p_identity::PeerId; -use libp2p_identity::SigningError; +use libp2p_identity::{Keypair, PeerId, SigningError}; use quick_protobuf::{BytesReader, Writer}; use web_time::SystemTime; +use crate::{proto, signed_envelope, signed_envelope::SignedEnvelope, DecodeError, Multiaddr}; + const PAYLOAD_TYPE: &str = "/libp2p/routing-state-record"; const DOMAIN_SEP: &str = "libp2p-routing-state"; /// Represents a peer routing record. /// -/// Peer records are designed to be distributable and carry a signature by being wrapped in a signed envelope. -/// For more information see RFC0003 of the libp2p specifications: +/// Peer records are designed to be distributable and carry a signature by being wrapped in a signed +/// envelope. For more information see RFC0003 of the libp2p specifications: #[derive(Debug, PartialEq, Eq, Clone)] pub struct PeerRecord { peer_id: PeerId, @@ -21,14 +19,16 @@ pub struct PeerRecord { /// A signed envelope representing this [`PeerRecord`]. /// - /// If this [`PeerRecord`] was constructed from a [`SignedEnvelope`], this is the original instance. + /// If this [`PeerRecord`] was constructed from a [`SignedEnvelope`], this is the original + /// instance. envelope: SignedEnvelope, } impl PeerRecord { /// Attempt to re-construct a [`PeerRecord`] from a [`SignedEnvelope`]. /// - /// If this function succeeds, the [`SignedEnvelope`] contained a peer record with a valid signature and can hence be considered authenticated. + /// If this function succeeds, the [`SignedEnvelope`] contained a peer record with a valid + /// signature and can hence be considered authenticated. pub fn from_signed_envelope(envelope: SignedEnvelope) -> Result { use quick_protobuf::MessageRead; @@ -60,7 +60,8 @@ impl PeerRecord { /// Construct a new [`PeerRecord`] by authenticating the provided addresses with the given key. /// - /// This is the same key that is used for authenticating every libp2p connection of your application, i.e. what you use when setting up your [`crate::transport::Transport`]. + /// This is the same key that is used for authenticating every libp2p connection of your + /// application, i.e. what you use when setting up your [`crate::transport::Transport`]. pub fn new(key: &Keypair, addresses: Vec) -> Result { use quick_protobuf::MessageWrite; diff --git a/core/src/signed_envelope.rs b/core/src/signed_envelope.rs index 19a0cac4f82..754d6ec204d 100644 --- a/core/src/signed_envelope.rs +++ b/core/src/signed_envelope.rs @@ -1,11 +1,13 @@ -use crate::{proto, DecodeError}; -use libp2p_identity::SigningError; -use libp2p_identity::{Keypair, PublicKey}; -use quick_protobuf::{BytesReader, Writer}; use std::fmt; + +use libp2p_identity::{Keypair, PublicKey, SigningError}; +use quick_protobuf::{BytesReader, Writer}; use unsigned_varint::encode::usize_buffer; -/// A signed envelope contains an arbitrary byte string payload, a signature of the payload, and the public key that can be used to verify the signature. +use crate::{proto, DecodeError}; + +/// A signed envelope contains an arbitrary byte string payload, a signature of the payload, and the +/// public key that can be used to verify the signature. /// /// For more details see libp2p RFC0002: #[derive(Debug, Clone, PartialEq, Eq)] @@ -46,8 +48,9 @@ impl SignedEnvelope { /// Extract the payload and signing key of this [`SignedEnvelope`]. /// - /// You must provide the correct domain-separation string and expected payload type in order to get the payload. - /// This guards against accidental mis-use of the payload where the signature was created for a different purpose or payload type. + /// You must provide the correct domain-separation string and expected payload type in order to + /// get the payload. This guards against accidental mis-use of the payload where the + /// signature was created for a different purpose or payload type. /// /// It is the caller's responsibility to check that the signing key is what /// is expected. For example, checking that the signing key is from a @@ -156,7 +159,8 @@ pub enum DecodingError { /// Errors that occur whilst extracting the payload of a [`SignedEnvelope`]. #[derive(Debug)] pub enum ReadPayloadError { - /// The signature on the signed envelope does not verify with the provided domain separation string. + /// The signature on the signed envelope does not verify + /// with the provided domain separation string. InvalidSignature, /// The payload contained in the envelope is not of the expected type. UnexpectedPayloadType { expected: Vec, got: Vec }, diff --git a/core/src/transport.rs b/core/src/transport.rs index 28ce2dbf650..ecd332f28cc 100644 --- a/core/src/transport.rs +++ b/core/src/transport.rs @@ -25,8 +25,6 @@ //! any desired protocols. The rest of the module defines combinators for //! modifying a transport through composition with other transports or protocol upgrades. -use futures::prelude::*; -use multiaddr::Multiaddr; use std::{ error::Error, fmt, @@ -35,6 +33,9 @@ use std::{ task::{Context, Poll}, }; +use futures::prelude::*; +use multiaddr::Multiaddr; + pub mod and_then; pub mod choice; pub mod dummy; @@ -48,14 +49,12 @@ pub mod upgrade; mod boxed; mod optional; +pub use self::{ + boxed::Boxed, choice::OrTransport, memory::MemoryTransport, optional::OptionalTransport, + upgrade::Upgrade, +}; use crate::{ConnectedPoint, Endpoint}; -pub use self::boxed::Boxed; -pub use self::choice::OrTransport; -pub use self::memory::MemoryTransport; -pub use self::optional::OptionalTransport; -pub use self::upgrade::Upgrade; - static NEXT_LISTENER_ID: AtomicUsize = AtomicUsize::new(1); /// The port use policy for a new connection. @@ -75,8 +74,9 @@ pub enum PortUse { pub struct DialOpts { /// The endpoint establishing a new connection. /// - /// When attempting a hole-punch, both parties simultaneously "dial" each other but one party has to be the "listener" on the final connection. - /// This option specifies the role of this node in the final connection. + /// When attempting a hole-punch, both parties simultaneously "dial" each other but one party + /// has to be the "listener" on the final connection. This option specifies the role of + /// this node in the final connection. pub role: Endpoint, /// The port use policy for a new connection. pub port_use: PortUse, @@ -161,10 +161,10 @@ pub trait Transport { /// Poll for [`TransportEvent`]s. /// - /// A [`TransportEvent::Incoming`] should be produced whenever a connection is received at the lowest - /// level of the transport stack. The item must be a [`ListenerUpgrade`](Transport::ListenerUpgrade) - /// future that resolves to an [`Output`](Transport::Output) value once all protocol upgrades have - /// been applied. + /// A [`TransportEvent::Incoming`] should be produced whenever a connection is received at the + /// lowest level of the transport stack. The item must be a + /// [`ListenerUpgrade`](Transport::ListenerUpgrade) future that resolves to an + /// [`Output`](Transport::Output) value once all protocol upgrades have been applied. /// /// Transports are expected to produce [`TransportEvent::Incoming`] events only for /// listen addresses which have previously been announced via diff --git a/core/src/transport/and_then.rs b/core/src/transport/and_then.rs index e85703f77fb..5d2b7d91553 100644 --- a/core/src/transport/and_then.rs +++ b/core/src/transport/and_then.rs @@ -18,14 +18,21 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::{ - connection::ConnectedPoint, - transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}, +use std::{ + error, + marker::PhantomPinned, + pin::Pin, + task::{Context, Poll}, }; + use either::Either; use futures::prelude::*; use multiaddr::Multiaddr; -use std::{error, marker::PhantomPinned, pin::Pin, task::Context, task::Poll}; + +use crate::{ + connection::ConnectedPoint, + transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}, +}; /// See the [`Transport::and_then`] method. #[pin_project::pin_project] diff --git a/core/src/transport/boxed.rs b/core/src/transport/boxed.rs index 596ab262221..6894d9876aa 100644 --- a/core/src/transport/boxed.rs +++ b/core/src/transport/boxed.rs @@ -18,9 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; -use futures::{prelude::*, stream::FusedStream}; -use multiaddr::Multiaddr; use std::{ error::Error, fmt, io, @@ -28,6 +25,11 @@ use std::{ task::{Context, Poll}, }; +use futures::{prelude::*, stream::FusedStream}; +use multiaddr::Multiaddr; + +use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; + /// Creates a new [`Boxed`] transport from the given transport. pub(crate) fn boxed(transport: T) -> Boxed where diff --git a/core/src/transport/choice.rs b/core/src/transport/choice.rs index 4339f6bba71..251091f2008 100644 --- a/core/src/transport/choice.rs +++ b/core/src/transport/choice.rs @@ -18,12 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::either::EitherFuture; -use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + use either::Either; use futures::future; use multiaddr::Multiaddr; -use std::{pin::Pin, task::Context, task::Poll}; + +use crate::{ + either::EitherFuture, + transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}, +}; /// Struct returned by `or_transport()`. #[derive(Debug, Copy, Clone)] diff --git a/core/src/transport/dummy.rs b/core/src/transport/dummy.rs index 72558d34a79..85c5815fd37 100644 --- a/core/src/transport/dummy.rs +++ b/core/src/transport/dummy.rs @@ -18,11 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; -use crate::Multiaddr; -use futures::{prelude::*, task::Context, task::Poll}; use std::{fmt, io, marker::PhantomData, pin::Pin}; +use futures::{ + prelude::*, + task::{Context, Poll}, +}; + +use crate::{ + transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}, + Multiaddr, +}; + /// Implementation of `Transport` that doesn't support any multiaddr. /// /// Useful for testing purposes, or as a fallback implementation when no protocol is available. diff --git a/core/src/transport/global_only.rs b/core/src/transport/global_only.rs index 83774f37004..00df6457412 100644 --- a/core/src/transport/global_only.rs +++ b/core/src/transport/global_only.rs @@ -18,15 +18,16 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::{ - multiaddr::{Multiaddr, Protocol}, - transport::{DialOpts, ListenerId, TransportError, TransportEvent}, -}; use std::{ pin::Pin, task::{Context, Poll}, }; +use crate::{ + multiaddr::{Multiaddr, Protocol}, + transport::{DialOpts, ListenerId, TransportError, TransportEvent}, +}; + /// Dropping all dial requests to non-global IP addresses. #[derive(Debug, Clone, Default)] pub struct Transport { @@ -104,7 +105,8 @@ mod ipv4_global { /// Returns [`true`] if the address appears to be globally reachable /// as specified by the [IANA IPv4 Special-Purpose Address Registry]. - /// Whether or not an address is practically reachable will depend on your network configuration. + /// Whether or not an address is practically reachable will depend on your network + /// configuration. /// /// Most IPv4 addresses are globally reachable; /// unless they are specifically defined as *not* globally reachable. @@ -121,7 +123,8 @@ mod ipv4_global { /// - Reserved addresses ([`is_reserved`](Ipv4Addr::is_reserved)) /// - The [broadcast address] ([`is_broadcast`](Ipv4Addr::is_broadcast)) /// - /// For the complete overview of which addresses are globally reachable, see the table at the [IANA IPv4 Special-Purpose Address Registry]. + /// For the complete overview of which addresses are globally reachable, see the table at the + /// [IANA IPv4 Special-Purpose Address Registry]. /// /// [IANA IPv4 Special-Purpose Address Registry]: https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml /// [unspecified address]: Ipv4Addr::UNSPECIFIED @@ -154,9 +157,10 @@ mod ipv6_global { /// Returns `true` if the address is a unicast address with link-local scope, /// as defined in [RFC 4291]. /// - /// A unicast address has link-local scope if it has the prefix `fe80::/10`, as per [RFC 4291 section 2.4]. - /// Note that this encompasses more addresses than those defined in [RFC 4291 section 2.5.6], - /// which describes "Link-Local IPv6 Unicast Addresses" as having the following stricter format: + /// A unicast address has link-local scope if it has the prefix `fe80::/10`, as per [RFC 4291 + /// section 2.4]. Note that this encompasses more addresses than those defined in [RFC 4291 + /// section 2.5.6], which describes "Link-Local IPv6 Unicast Addresses" as having the + /// following stricter format: /// /// ```text /// | 10 bits | 54 bits | 64 bits | @@ -164,12 +168,14 @@ mod ipv6_global { /// |1111111010| 0 | interface ID | /// +----------+-------------------------+----------------------------+ /// ``` - /// So while currently the only addresses with link-local scope an application will encounter are all in `fe80::/64`, - /// this might change in the future with the publication of new standards. More addresses in `fe80::/10` could be allocated, - /// and those addresses will have link-local scope. + /// So while currently the only addresses with link-local scope an application will encounter + /// are all in `fe80::/64`, this might change in the future with the publication of new + /// standards. More addresses in `fe80::/10` could be allocated, and those addresses will + /// have link-local scope. /// - /// Also note that while [RFC 4291 section 2.5.3] mentions about the [loopback address] (`::1`) that "it is treated as having Link-Local scope", - /// this does not mean that the loopback address actually has link-local scope and this method will return `false` on it. + /// Also note that while [RFC 4291 section 2.5.3] mentions about the [loopback address] (`::1`) + /// that "it is treated as having Link-Local scope", this does not mean that the loopback + /// address actually has link-local scope and this method will return `false` on it. /// /// [RFC 4291]: https://tools.ietf.org/html/rfc4291 /// [RFC 4291 section 2.4]: https://tools.ietf.org/html/rfc4291#section-2.4 @@ -207,7 +213,8 @@ mod ipv6_global { /// Returns [`true`] if the address appears to be globally reachable /// as specified by the [IANA IPv6 Special-Purpose Address Registry]. - /// Whether or not an address is practically reachable will depend on your network configuration. + /// Whether or not an address is practically reachable will depend on your network + /// configuration. /// /// Most IPv6 addresses are globally reachable; /// unless they are specifically defined as *not* globally reachable. @@ -219,13 +226,15 @@ mod ipv6_global { /// - Addresses reserved for benchmarking /// - Addresses reserved for documentation ([`is_documentation`](Ipv6Addr::is_documentation)) /// - Unique local addresses ([`is_unique_local`](Ipv6Addr::is_unique_local)) - /// - Unicast addresses with link-local scope ([`is_unicast_link_local`](Ipv6Addr::is_unicast_link_local)) + /// - Unicast addresses with link-local scope + /// ([`is_unicast_link_local`](Ipv6Addr::is_unicast_link_local)) /// - /// For the complete overview of which addresses are globally reachable, see the table at the [IANA IPv6 Special-Purpose Address Registry]. + /// For the complete overview of which addresses are globally reachable, see the table at the + /// [IANA IPv6 Special-Purpose Address Registry]. /// /// Note that an address having global scope is not the same as being globally reachable, - /// and there is no direct relation between the two concepts: There exist addresses with global scope - /// that are not globally reachable (for example unique local addresses), + /// and there is no direct relation between the two concepts: There exist addresses with global + /// scope that are not globally reachable (for example unique local addresses), /// and addresses that are globally reachable without having global scope /// (multicast addresses with non-global scope). /// diff --git a/core/src/transport/map.rs b/core/src/transport/map.rs index 9aab84ba8b1..4f6910b141f 100644 --- a/core/src/transport/map.rs +++ b/core/src/transport/map.rs @@ -18,16 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::transport::DialOpts; -use crate::{ - connection::ConnectedPoint, - transport::{Transport, TransportError, TransportEvent}, +use std::{ + pin::Pin, + task::{Context, Poll}, }; + use futures::prelude::*; use multiaddr::Multiaddr; -use std::{pin::Pin, task::Context, task::Poll}; use super::ListenerId; +use crate::{ + connection::ConnectedPoint, + transport::{DialOpts, Transport, TransportError, TransportEvent}, +}; /// See `Transport::map`. #[derive(Debug, Copy, Clone)] diff --git a/core/src/transport/map_err.rs b/core/src/transport/map_err.rs index 5d44af9af2e..f47f5713225 100644 --- a/core/src/transport/map_err.rs +++ b/core/src/transport/map_err.rs @@ -18,10 +18,16 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; +use std::{ + error, + pin::Pin, + task::{Context, Poll}, +}; + use futures::prelude::*; use multiaddr::Multiaddr; -use std::{error, pin::Pin, task::Context, task::Poll}; + +use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; /// See `Transport::map_err`. #[derive(Debug, Copy, Clone)] diff --git a/core/src/transport/memory.rs b/core/src/transport/memory.rs index 85680265e8b..19197ddf714 100644 --- a/core/src/transport/memory.rs +++ b/core/src/transport/memory.rs @@ -18,13 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; -use fnv::FnvHashMap; -use futures::{channel::mpsc, future::Ready, prelude::*, task::Context, task::Poll}; -use multiaddr::{Multiaddr, Protocol}; -use once_cell::sync::Lazy; -use parking_lot::Mutex; -use rw_stream_sink::RwStreamSink; use std::{ collections::{hash_map::Entry, VecDeque}, error, fmt, io, @@ -32,6 +25,20 @@ use std::{ pin::Pin, }; +use fnv::FnvHashMap; +use futures::{ + channel::mpsc, + future::Ready, + prelude::*, + task::{Context, Poll}, +}; +use multiaddr::{Multiaddr, Protocol}; +use once_cell::sync::Lazy; +use parking_lot::Mutex; +use rw_stream_sink::RwStreamSink; + +use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; + static HUB: Lazy = Lazy::new(|| Hub(Mutex::new(FnvHashMap::default()))); struct Hub(Mutex>); @@ -398,9 +405,8 @@ impl Drop for Chan { #[cfg(test)] mod tests { - use crate::{transport::PortUse, Endpoint}; - use super::*; + use crate::{transport::PortUse, Endpoint}; #[test] fn parse_memory_addr_works() { diff --git a/core/src/transport/optional.rs b/core/src/transport/optional.rs index f18bfa441b0..262f84f3095 100644 --- a/core/src/transport/optional.rs +++ b/core/src/transport/optional.rs @@ -18,9 +18,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + use multiaddr::Multiaddr; -use std::{pin::Pin, task::Context, task::Poll}; + +use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; /// Transport that is possibly disabled. /// diff --git a/core/src/transport/timeout.rs b/core/src/transport/timeout.rs index 830ed099629..ce494216279 100644 --- a/core/src/transport/timeout.rs +++ b/core/src/transport/timeout.rs @@ -24,14 +24,20 @@ //! underlying `Transport`. // TODO: add example -use crate::transport::DialOpts; -use crate::{ - transport::{ListenerId, TransportError, TransportEvent}, - Multiaddr, Transport, +use std::{ + error, fmt, io, + pin::Pin, + task::{Context, Poll}, + time::Duration, }; + use futures::prelude::*; use futures_timer::Delay; -use std::{error, fmt, io, pin::Pin, task::Context, task::Poll, time::Duration}; + +use crate::{ + transport::{DialOpts, ListenerId, TransportError, TransportEvent}, + Multiaddr, Transport, +}; /// A `TransportTimeout` is a `Transport` that wraps another `Transport` and adds /// timeouts to all inbound and outbound connection attempts. diff --git a/core/src/transport/upgrade.rs b/core/src/transport/upgrade.rs index 66b9e7509af..480c2710020 100644 --- a/core/src/transport/upgrade.rs +++ b/core/src/transport/upgrade.rs @@ -20,15 +20,25 @@ //! Configuration of transport protocol upgrades. -pub use crate::upgrade::Version; +use std::{ + error::Error, + fmt, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{prelude::*, ready}; +use libp2p_identity::PeerId; +use multiaddr::Multiaddr; -use crate::transport::DialOpts; +pub use crate::upgrade::Version; use crate::{ connection::ConnectedPoint, muxing::{StreamMuxer, StreamMuxerBox}, transport::{ - and_then::AndThen, boxed::boxed, timeout::TransportTimeout, ListenerId, Transport, - TransportError, TransportEvent, + and_then::AndThen, boxed::boxed, timeout::TransportTimeout, DialOpts, ListenerId, + Transport, TransportError, TransportEvent, }, upgrade::{ self, apply_inbound, apply_outbound, InboundConnectionUpgrade, InboundUpgradeApply, @@ -36,16 +46,6 @@ use crate::{ }, Negotiated, }; -use futures::{prelude::*, ready}; -use libp2p_identity::PeerId; -use multiaddr::Multiaddr; -use std::{ - error::Error, - fmt, - pin::Pin, - task::{Context, Poll}, - time::Duration, -}; /// A `Builder` facilitates upgrading of a [`Transport`] for use with /// a `Swarm`. @@ -59,13 +59,13 @@ use std::{ /// It thus enforces the following invariants on every transport /// obtained from [`multiplex`](Authenticated::multiplex): /// -/// 1. The transport must be [authenticated](Builder::authenticate) -/// and [multiplexed](Authenticated::multiplex). +/// 1. The transport must be [authenticated](Builder::authenticate) and +/// [multiplexed](Authenticated::multiplex). /// 2. Authentication must precede the negotiation of a multiplexer. /// 3. Applying a multiplexer is the last step in the upgrade process. -/// 4. The [`Transport::Output`] conforms to the requirements of a `Swarm`, -/// namely a tuple of a [`PeerId`] (from the authentication upgrade) and a -/// [`StreamMuxer`] (from the multiplexing upgrade). +/// 4. The [`Transport::Output`] conforms to the requirements of a `Swarm`, namely a tuple of a +/// [`PeerId`] (from the authentication upgrade) and a [`StreamMuxer`] (from the multiplexing +/// upgrade). #[derive(Clone)] pub struct Builder { inner: T, diff --git a/core/src/upgrade.rs b/core/src/upgrade.rs index 7a1fd3724d0..93039705938 100644 --- a/core/src/upgrade.rs +++ b/core/src/upgrade.rs @@ -29,8 +29,8 @@ //! connection or substream. //! //! > **Note**: Multiple versions of the same protocol are treated as different protocols. -//! > For example, `/foo/1.0.0` and `/foo/1.1.0` are totally unrelated as far as -//! > upgrading is concerned. +//! > For example, `/foo/1.0.0` and `/foo/1.1.0` are totally unrelated as far as +//! > upgrading is concerned. //! //! # Upgrade process //! @@ -55,7 +55,6 @@ //! > connection or substream. However if you use the recommended `Swarm` or //! > `ConnectionHandler` APIs, the upgrade is automatically handled for you and you don't //! > need to use these methods. -//! mod apply; mod denied; @@ -70,12 +69,12 @@ pub(crate) use apply::{ }; pub(crate) use error::UpgradeError; use futures::future::Future; +pub use multistream_select::{NegotiatedComplete, NegotiationError, ProtocolError, Version}; pub use self::{ denied::DeniedUpgrade, pending::PendingUpgrade, ready::ReadyUpgrade, select::SelectUpgrade, }; pub use crate::Negotiated; -pub use multistream_select::{NegotiatedComplete, NegotiationError, ProtocolError, Version}; /// Common trait for upgrades that can be applied on inbound substreams, outbound substreams, /// or both. diff --git a/core/src/upgrade/apply.rs b/core/src/upgrade/apply.rs index f84aaaac9fa..9e090267b0c 100644 --- a/core/src/upgrade/apply.rs +++ b/core/src/upgrade/apply.rs @@ -18,13 +18,21 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeError}; -use crate::{connection::ConnectedPoint, Negotiated}; +use std::{ + mem, + pin::Pin, + task::{Context, Poll}, +}; + use futures::{future::Either, prelude::*}; +pub(crate) use multistream_select::Version; use multistream_select::{DialerSelectFuture, ListenerSelectFuture}; -use std::{mem, pin::Pin, task::Context, task::Poll}; -pub(crate) use multistream_select::Version; +use crate::{ + connection::ConnectedPoint, + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeError}, + Negotiated, +}; // TODO: Still needed? /// Applies an upgrade to the inbound and outbound direction of a connection or substream. diff --git a/core/src/upgrade/denied.rs b/core/src/upgrade/denied.rs index 568bbfb056d..9bea6fb023b 100644 --- a/core/src/upgrade/denied.rs +++ b/core/src/upgrade/denied.rs @@ -18,10 +18,11 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use std::{convert::Infallible, iter}; + use futures::future; -use std::convert::Infallible; -use std::iter; + +use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; /// Dummy implementation of `UpgradeInfo`/`InboundUpgrade`/`OutboundUpgrade` that doesn't support /// any protocol. diff --git a/core/src/upgrade/either.rs b/core/src/upgrade/either.rs index db62f8d6558..9970dcb0b1d 100644 --- a/core/src/upgrade/either.rs +++ b/core/src/upgrade/either.rs @@ -18,13 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::iter::Map; + +use either::Either; +use futures::future; + use crate::{ either::EitherFuture, upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}, }; -use either::Either; -use futures::future; -use std::iter::Map; impl UpgradeInfo for Either where diff --git a/core/src/upgrade/error.rs b/core/src/upgrade/error.rs index 3d349587c2c..c81ed7cf75b 100644 --- a/core/src/upgrade/error.rs +++ b/core/src/upgrade/error.rs @@ -18,9 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use multistream_select::NegotiationError; use std::fmt; +use multistream_select::NegotiationError; + /// Error that can happen when upgrading a connection or substream to use a protocol. #[derive(Debug)] pub enum UpgradeError { diff --git a/core/src/upgrade/pending.rs b/core/src/upgrade/pending.rs index 5e3c65422f1..60a9fb9aba1 100644 --- a/core/src/upgrade/pending.rs +++ b/core/src/upgrade/pending.rs @@ -19,10 +19,11 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use std::{convert::Infallible, iter}; + use futures::future; -use std::convert::Infallible; -use std::iter; + +use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; /// Implementation of [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`] that always /// returns a pending upgrade. diff --git a/core/src/upgrade/ready.rs b/core/src/upgrade/ready.rs index 13270aa8b6d..22708d726e7 100644 --- a/core/src/upgrade/ready.rs +++ b/core/src/upgrade/ready.rs @@ -19,12 +19,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use std::{convert::Infallible, iter}; + use futures::future; -use std::convert::Infallible; -use std::iter; -/// Implementation of [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`] that directly yields the substream. +use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; + +/// Implementation of [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`] +/// that directly yields the substream. #[derive(Debug, Copy, Clone)] pub struct ReadyUpgrade

{ protocol_name: P, diff --git a/core/src/upgrade/select.rs b/core/src/upgrade/select.rs index 037045a2f29..b7fe4a53a7f 100644 --- a/core/src/upgrade/select.rs +++ b/core/src/upgrade/select.rs @@ -18,14 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::either::EitherFuture; -use crate::upgrade::{ - InboundConnectionUpgrade, InboundUpgrade, OutboundConnectionUpgrade, OutboundUpgrade, - UpgradeInfo, -}; +use std::iter::{Chain, Map}; + use either::Either; use futures::future; -use std::iter::{Chain, Map}; + +use crate::{ + either::EitherFuture, + upgrade::{ + InboundConnectionUpgrade, InboundUpgrade, OutboundConnectionUpgrade, OutboundUpgrade, + UpgradeInfo, + }, +}; /// Upgrade that combines two upgrades into one. Supports all the protocols supported by either /// sub-upgrade. diff --git a/core/tests/transport_upgrade.rs b/core/tests/transport_upgrade.rs index d8bec6f2b59..b9733e38322 100644 --- a/core/tests/transport_upgrade.rs +++ b/core/tests/transport_upgrade.rs @@ -18,18 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{io, pin::Pin}; + use futures::prelude::*; -use libp2p_core::transport::{DialOpts, ListenerId, MemoryTransport, PortUse, Transport}; -use libp2p_core::upgrade::{ - self, InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo, +use libp2p_core::{ + transport::{DialOpts, ListenerId, MemoryTransport, PortUse, Transport}, + upgrade::{self, InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}, + Endpoint, }; -use libp2p_core::Endpoint; use libp2p_identity as identity; use libp2p_mplex::MplexConfig; use libp2p_noise as noise; use multiaddr::{Multiaddr, Protocol}; use rand::random; -use std::{io, pin::Pin}; #[derive(Clone)] struct HelloUpgrade {} diff --git a/deny.toml b/deny.toml index 5be86107edf..47487553028 100644 --- a/deny.toml +++ b/deny.toml @@ -43,6 +43,8 @@ allow = [ "MIT", "MPL-2.0", "Unlicense", + "Unicode-3.0", + "Zlib", ] # The confidence threshold for detecting a license from license text. # The higher the value, the more closely the license text must be to the diff --git a/docs/coding-guidelines.md b/docs/coding-guidelines.md index bacbfe9509e..473d7020fcf 100644 --- a/docs/coding-guidelines.md +++ b/docs/coding-guidelines.md @@ -236,7 +236,7 @@ Concurrency adds complexity. Concurrency adds overhead due to synchronization. Thus unless proven to be a bottleneck, don't make things concurrent. As an example the hierarchical `NetworkBehaviour` state machine runs sequentially. It is easy to debug as it runs sequentially. Thus far there has been no proof that -shows a speed up when running it concurrently. +shows a speed-up when running it concurrently. ## Use `async/await` for sequential execution only diff --git a/examples/autonat/Cargo.toml b/examples/autonat/Cargo.toml index 010b76623e0..7c06b48a105 100644 --- a/examples/autonat/Cargo.toml +++ b/examples/autonat/Cargo.toml @@ -13,7 +13,6 @@ tokio = { workspace = true, features = ["full"] } clap = { version = "4.5.6", features = ["derive"] } futures = { workspace = true } libp2p = { path = "../../libp2p", features = ["tokio", "tcp", "noise", "yamux", "autonat", "identify", "macros"] } -tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } [lints] diff --git a/examples/autonat/src/bin/autonat_client.rs b/examples/autonat/src/bin/autonat_client.rs index def66c4823b..768a2052c80 100644 --- a/examples/autonat/src/bin/autonat_client.rs +++ b/examples/autonat/src/bin/autonat_client.rs @@ -20,15 +20,17 @@ #![doc = include_str!("../../README.md")] +use std::{error::Error, net::Ipv4Addr, time::Duration}; + use clap::Parser; use futures::StreamExt; -use libp2p::core::multiaddr::Protocol; -use libp2p::core::Multiaddr; -use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; -use libp2p::{autonat, identify, identity, noise, tcp, yamux, PeerId}; -use std::error::Error; -use std::net::Ipv4Addr; -use std::time::Duration; +use libp2p::{ + autonat, + core::{multiaddr::Protocol, Multiaddr}, + identify, identity, noise, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, PeerId, +}; use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] @@ -60,7 +62,6 @@ async fn main() -> Result<(), Box> { yamux::Config::default, )? .with_behaviour(|key| Behaviour::new(key.public()))? - .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) .build(); swarm.listen_on( diff --git a/examples/autonat/src/bin/autonat_server.rs b/examples/autonat/src/bin/autonat_server.rs index 389cc0fa26f..f3bb6b6a439 100644 --- a/examples/autonat/src/bin/autonat_server.rs +++ b/examples/autonat/src/bin/autonat_server.rs @@ -20,14 +20,17 @@ #![doc = include_str!("../../README.md")] +use std::{error::Error, net::Ipv4Addr}; + use clap::Parser; use futures::StreamExt; -use libp2p::core::{multiaddr::Protocol, Multiaddr}; -use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; -use libp2p::{autonat, identify, identity, noise, tcp, yamux}; -use std::error::Error; -use std::net::Ipv4Addr; -use std::time::Duration; +use libp2p::{ + autonat, + core::{multiaddr::Protocol, Multiaddr}, + identify, identity, noise, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, +}; use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] @@ -53,7 +56,6 @@ async fn main() -> Result<(), Box> { yamux::Config::default, )? .with_behaviour(|key| Behaviour::new(key.public()))? - .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) .build(); swarm.listen_on( diff --git a/examples/autonatv2/Cargo.toml b/examples/autonatv2/Cargo.toml index 6c862ee22e4..d400c53e7fd 100644 --- a/examples/autonatv2/Cargo.toml +++ b/examples/autonatv2/Cargo.toml @@ -19,17 +19,15 @@ libp2p = { workspace = true, features = ["macros", "tokio", "tcp", "noise", "yam clap = { version = "4.4.18", features = ["derive"] } tokio = { version = "1.35.1", features = ["macros", "rt-multi-thread"] } tracing = "0.1.40" -tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } +tracing-subscriber = { workspace = true, features = ["env-filter"] } rand = "0.8.5" -opentelemetry = { version = "0.21.0", optional = true } opentelemetry_sdk = { version = "0.21.1", optional = true, features = ["rt-tokio"] } tracing-opentelemetry = { version = "0.22.0", optional = true } opentelemetry-jaeger = { version = "0.20.0", optional = true, features = ["rt-tokio"] } cfg-if = "1.0.0" [features] -jaeger = ["opentelemetry", "opentelemetry_sdk", "tracing-opentelemetry", "opentelemetry-jaeger"] -opentelemetry = ["dep:opentelemetry"] +jaeger = ["opentelemetry_sdk", "tracing-opentelemetry", "opentelemetry-jaeger"] opentelemetry_sdk = ["dep:opentelemetry_sdk"] tracing-opentelemetry = ["dep:tracing-opentelemetry"] opentelemetry-jaeger = ["dep:opentelemetry-jaeger"] diff --git a/examples/autonatv2/Dockerfile b/examples/autonatv2/Dockerfile index 6bc92e4d11b..083f9f5c113 100644 --- a/examples/autonatv2/Dockerfile +++ b/examples/autonatv2/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.81-alpine as builder +FROM rust:1.83-alpine as builder RUN apk add musl-dev diff --git a/examples/browser-webrtc/src/lib.rs b/examples/browser-webrtc/src/lib.rs index 9499ccbd158..e2d884cb445 100644 --- a/examples/browser-webrtc/src/lib.rs +++ b/examples/browser-webrtc/src/lib.rs @@ -1,13 +1,11 @@ #![cfg(target_arch = "wasm32")] +use std::{io, time::Duration}; + use futures::StreamExt; use js_sys::Date; -use libp2p::core::Multiaddr; -use libp2p::ping; -use libp2p::swarm::SwarmEvent; +use libp2p::{core::Multiaddr, ping, swarm::SwarmEvent}; use libp2p_webrtc_websys as webrtc_websys; -use std::io; -use std::time::Duration; use wasm_bindgen::prelude::*; use web_sys::{Document, HtmlElement}; diff --git a/examples/browser-webrtc/src/main.rs b/examples/browser-webrtc/src/main.rs index 7f06b0d0d99..52222dc882b 100644 --- a/examples/browser-webrtc/src/main.rs +++ b/examples/browser-webrtc/src/main.rs @@ -1,23 +1,24 @@ #![allow(non_upper_case_globals)] +use std::net::{Ipv4Addr, SocketAddr}; + use anyhow::Result; -use axum::extract::{Path, State}; -use axum::http::header::CONTENT_TYPE; -use axum::http::StatusCode; -use axum::response::{Html, IntoResponse}; -use axum::{http::Method, routing::get, Router}; +use axum::{ + extract::{Path, State}, + http::{header::CONTENT_TYPE, Method, StatusCode}, + response::{Html, IntoResponse}, + routing::get, + Router, +}; use futures::StreamExt; use libp2p::{ - core::muxing::StreamMuxerBox, - core::Transport, + core::{muxing::StreamMuxerBox, Transport}, multiaddr::{Multiaddr, Protocol}, ping, swarm::SwarmEvent, }; use libp2p_webrtc as webrtc; use rand::thread_rng; -use std::net::{Ipv4Addr, SocketAddr}; -use std::time::Duration; use tokio::net::TcpListener; use tower_http::cors::{Any, CorsLayer}; @@ -37,11 +38,6 @@ async fn main() -> anyhow::Result<()> { .map(|(peer_id, conn), _| (peer_id, StreamMuxerBox::new(conn)))) })? .with_behaviour(|_| ping::Behaviour::default())? - .with_swarm_config(|cfg| { - cfg.with_idle_connection_timeout( - Duration::from_secs(u64::MAX), // Allows us to observe the pings. - ) - }) .build(); let address_webrtc = Multiaddr::from(Ipv4Addr::UNSPECIFIED) @@ -127,7 +123,8 @@ struct Libp2pEndpoint(Multiaddr); /// Serves the index.html file for our client. /// /// Our server listens on a random UDP port for the WebRTC transport. -/// To allow the client to connect, we replace the `__LIBP2P_ENDPOINT__` placeholder with the actual address. +/// To allow the client to connect, we replace the `__LIBP2P_ENDPOINT__` +/// placeholder with the actual address. async fn get_index( State(Libp2pEndpoint(libp2p_endpoint)): State, ) -> Result, StatusCode> { diff --git a/examples/chat/Cargo.toml b/examples/chat/Cargo.toml index a1d32956825..031f84b6f95 100644 --- a/examples/chat/Cargo.toml +++ b/examples/chat/Cargo.toml @@ -10,10 +10,8 @@ release = false [dependencies] tokio = { workspace = true, features = ["full"] } -async-trait = "0.1" futures = { workspace = true } libp2p = { path = "../../libp2p", features = [ "tokio", "gossipsub", "mdns", "noise", "macros", "tcp", "yamux", "quic"] } -tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } [lints] diff --git a/examples/chat/src/main.rs b/examples/chat/src/main.rs index c785d301c2f..b0dcc767b6f 100644 --- a/examples/chat/src/main.rs +++ b/examples/chat/src/main.rs @@ -20,12 +20,19 @@ #![doc = include_str!("../README.md")] +use std::{ + collections::hash_map::DefaultHasher, + error::Error, + hash::{Hash, Hasher}, + time::Duration, +}; + use futures::stream::StreamExt; -use libp2p::{gossipsub, mdns, noise, swarm::NetworkBehaviour, swarm::SwarmEvent, tcp, yamux}; -use std::collections::hash_map::DefaultHasher; -use std::error::Error; -use std::hash::{Hash, Hasher}; -use std::time::Duration; +use libp2p::{ + gossipsub, mdns, noise, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, +}; use tokio::{io, io::AsyncBufReadExt, select}; use tracing_subscriber::EnvFilter; @@ -61,7 +68,8 @@ async fn main() -> Result<(), Box> { // Set a custom gossipsub configuration let gossipsub_config = gossipsub::ConfigBuilder::default() .heartbeat_interval(Duration::from_secs(10)) // This is set to aid debugging by not cluttering the log space - .validation_mode(gossipsub::ValidationMode::Strict) // This sets the kind of message validation. The default is Strict (enforce message signing) + .validation_mode(gossipsub::ValidationMode::Strict) // This sets the kind of message validation. The default is Strict (enforce message + // signing) .message_id_fn(message_id_fn) // content-address messages. No two messages of the same content will be propagated. .build() .map_err(|msg| io::Error::new(io::ErrorKind::Other, msg))?; // Temporary hack because `build` does not return a proper `std::error::Error`. @@ -76,7 +84,6 @@ async fn main() -> Result<(), Box> { mdns::tokio::Behaviour::new(mdns::Config::default(), key.public().to_peer_id())?; Ok(MyBehaviour { gossipsub, mdns }) })? - .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) .build(); // Create a Gossipsub topic diff --git a/examples/dcutr/Cargo.toml b/examples/dcutr/Cargo.toml index c1b4bbc6e7e..67edf04e2b0 100644 --- a/examples/dcutr/Cargo.toml +++ b/examples/dcutr/Cargo.toml @@ -13,7 +13,6 @@ clap = { version = "4.5.6", features = ["derive"] } futures = { workspace = true } futures-timer = "3.0" libp2p = { path = "../../libp2p", features = [ "dns", "dcutr", "identify", "macros", "noise", "ping", "quic", "relay", "rendezvous", "tcp", "tokio", "yamux"] } -log = "0.4" tokio = { workspace = true, features = ["macros", "net", "rt", "signal"] } tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } diff --git a/examples/dcutr/src/main.rs b/examples/dcutr/src/main.rs index 630d4b2b1f3..3f403d534e7 100644 --- a/examples/dcutr/src/main.rs +++ b/examples/dcutr/src/main.rs @@ -20,6 +20,8 @@ #![doc = include_str!("../README.md")] +use std::{error::Error, str::FromStr}; + use clap::Parser; use futures::{executor::block_on, future::FutureExt, stream::StreamExt}; use libp2p::{ @@ -28,8 +30,6 @@ use libp2p::{ swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, PeerId, }; -use std::str::FromStr; -use std::{error::Error, time::Duration}; use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] @@ -105,7 +105,6 @@ async fn main() -> Result<(), Box> { )), dcutr: dcutr::Behaviour::new(keypair.public().to_peer_id()), })? - .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) .build(); swarm diff --git a/examples/distributed-key-value-store/Cargo.toml b/examples/distributed-key-value-store/Cargo.toml index 3846e54c8d3..8e30dd2c75d 100644 --- a/examples/distributed-key-value-store/Cargo.toml +++ b/examples/distributed-key-value-store/Cargo.toml @@ -10,10 +10,8 @@ release = false [dependencies] tokio = { workspace = true, features = ["full"] } -async-trait = "0.1" futures = { workspace = true } libp2p = { path = "../../libp2p", features = [ "tokio", "dns", "kad", "mdns", "noise", "macros", "tcp", "yamux"] } -tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } [lints] diff --git a/examples/distributed-key-value-store/src/main.rs b/examples/distributed-key-value-store/src/main.rs index 6b7947b7eb3..3522c84c720 100644 --- a/examples/distributed-key-value-store/src/main.rs +++ b/examples/distributed-key-value-store/src/main.rs @@ -20,17 +20,16 @@ #![doc = include_str!("../README.md")] +use std::error::Error; + use futures::stream::StreamExt; -use libp2p::kad; -use libp2p::kad::store::MemoryStore; -use libp2p::kad::Mode; use libp2p::{ + kad, + kad::{store::MemoryStore, Mode}, mdns, noise, swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, }; -use std::error::Error; -use std::time::Duration; use tokio::{ io::{self, AsyncBufReadExt}, select, @@ -69,7 +68,6 @@ async fn main() -> Result<(), Box> { )?, }) })? - .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) .build(); swarm.behaviour_mut().kademlia.set_mode(Some(Mode::Server)); diff --git a/examples/file-sharing/Cargo.toml b/examples/file-sharing/Cargo.toml index d098ce44317..021215c003b 100644 --- a/examples/file-sharing/Cargo.toml +++ b/examples/file-sharing/Cargo.toml @@ -14,7 +14,6 @@ tokio = { workspace = true, features = ["full"] } clap = { version = "4.5.6", features = ["derive"] } futures = { workspace = true } libp2p = { path = "../../libp2p", features = [ "tokio", "cbor", "dns", "kad", "noise", "macros", "request-response", "tcp", "websocket", "yamux"] } -tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } [lints] diff --git a/examples/file-sharing/src/main.rs b/examples/file-sharing/src/main.rs index 5f6be83dc11..1e3b80a330c 100644 --- a/examples/file-sharing/src/main.rs +++ b/examples/file-sharing/src/main.rs @@ -22,15 +22,12 @@ mod network; -use clap::Parser; -use tokio::task::spawn; +use std::{error::Error, io::Write, path::PathBuf}; -use futures::prelude::*; -use futures::StreamExt; +use clap::Parser; +use futures::{prelude::*, StreamExt}; use libp2p::{core::Multiaddr, multiaddr::Protocol}; -use std::error::Error; -use std::io::Write; -use std::path::PathBuf; +use tokio::task::spawn; use tracing_subscriber::EnvFilter; #[tokio::main] diff --git a/examples/file-sharing/src/network.rs b/examples/file-sharing/src/network.rs index a74afd1c0da..409255ee9ec 100644 --- a/examples/file-sharing/src/network.rs +++ b/examples/file-sharing/src/network.rs @@ -1,7 +1,14 @@ -use futures::channel::{mpsc, oneshot}; -use futures::prelude::*; -use futures::StreamExt; +use std::{ + collections::{hash_map, HashMap, HashSet}, + error::Error, + time::Duration, +}; +use futures::{ + channel::{mpsc, oneshot}, + prelude::*, + StreamExt, +}; use libp2p::{ core::Multiaddr, identity, kad, @@ -9,19 +16,13 @@ use libp2p::{ noise, request_response::{self, OutboundRequestId, ProtocolSupport, ResponseChannel}, swarm::{NetworkBehaviour, Swarm, SwarmEvent}, - tcp, yamux, PeerId, + tcp, yamux, PeerId, StreamProtocol, }; - -use libp2p::StreamProtocol; use serde::{Deserialize, Serialize}; -use std::collections::{hash_map, HashMap, HashSet}; -use std::error::Error; -use std::time::Duration; /// Creates the network components, namely: /// -/// - The network client to interact with the network layer from anywhere -/// within your application. +/// - The network client to interact with the network layer from anywhere within your application. /// /// - The network event stream, e.g. for incoming requests. /// diff --git a/examples/identify/Cargo.toml b/examples/identify/Cargo.toml index 8d12699afa7..c18f71a0386 100644 --- a/examples/identify/Cargo.toml +++ b/examples/identify/Cargo.toml @@ -12,7 +12,6 @@ release = false tokio = { version = "1.37.0", features = ["full"] } futures = { workspace = true } libp2p = { path = "../../libp2p", features = ["identify", "noise", "tcp", "tokio", "yamux"] } -tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } [lints] diff --git a/examples/identify/src/main.rs b/examples/identify/src/main.rs index 22474061da6..3f08ac01e23 100644 --- a/examples/identify/src/main.rs +++ b/examples/identify/src/main.rs @@ -20,9 +20,10 @@ #![doc = include_str!("../README.md")] +use std::error::Error; + use futures::StreamExt; use libp2p::{core::multiaddr::Multiaddr, identify, noise, swarm::SwarmEvent, tcp, yamux}; -use std::{error::Error, time::Duration}; use tracing_subscriber::EnvFilter; #[tokio::main] @@ -44,7 +45,6 @@ async fn main() -> Result<(), Box> { key.public(), )) })? - .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) .build(); // Tell the swarm to listen on all interfaces and a random, OS-assigned diff --git a/examples/ipfs-kad/Cargo.toml b/examples/ipfs-kad/Cargo.toml index 115c604269f..fa04da4edcf 100644 --- a/examples/ipfs-kad/Cargo.toml +++ b/examples/ipfs-kad/Cargo.toml @@ -10,13 +10,10 @@ release = false [dependencies] tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } -async-trait = "0.1" clap = { version = "4.5.6", features = ["derive"] } -env_logger = "0.10" futures = { workspace = true } anyhow = "1.0.86" libp2p = { path = "../../libp2p", features = [ "tokio", "dns", "kad", "noise", "tcp", "yamux", "rsa"] } -tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } [lints] diff --git a/examples/ipfs-kad/src/main.rs b/examples/ipfs-kad/src/main.rs index 95921d6fa35..8d9a289bdd1 100644 --- a/examples/ipfs-kad/src/main.rs +++ b/examples/ipfs-kad/src/main.rs @@ -20,15 +20,21 @@ #![doc = include_str!("../README.md")] -use std::num::NonZeroUsize; -use std::ops::Add; -use std::time::{Duration, Instant}; +use std::{ + num::NonZeroUsize, + ops::Add, + time::{Duration, Instant}, +}; use anyhow::{bail, Result}; use clap::Parser; use futures::StreamExt; -use libp2p::swarm::{StreamProtocol, SwarmEvent}; -use libp2p::{bytes::BufMut, identity, kad, noise, tcp, yamux, PeerId}; +use libp2p::{ + bytes::BufMut, + identity, kad, noise, + swarm::{StreamProtocol, SwarmEvent}, + tcp, yamux, PeerId, +}; use tracing_subscriber::EnvFilter; const BOOTNODES: [&str; 4] = [ @@ -64,7 +70,6 @@ async fn main() -> Result<()> { let store = kad::store::MemoryStore::new(key.public().to_peer_id()); kad::Behaviour::with_config(key.public().to_peer_id(), store, cfg) })? - .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) .build(); // Add the bootnodes to the local routing table. `libp2p-dns` built diff --git a/examples/ipfs-private/Cargo.toml b/examples/ipfs-private/Cargo.toml index 0813dba56e0..4dfe596d609 100644 --- a/examples/ipfs-private/Cargo.toml +++ b/examples/ipfs-private/Cargo.toml @@ -10,11 +10,9 @@ release = false [dependencies] tokio = { workspace = true, features = ["rt-multi-thread", "macros", "io-std"] } -async-trait = "0.1" either = "1.12" futures = { workspace = true } libp2p = { path = "../../libp2p", features = [ "tokio", "gossipsub", "dns", "identify", "kad", "macros", "noise", "ping", "pnet", "tcp", "websocket", "yamux"] } -tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } [lints] diff --git a/examples/ipfs-private/src/main.rs b/examples/ipfs-private/src/main.rs index a57bfd465e0..6d8f9beb75d 100644 --- a/examples/ipfs-private/src/main.rs +++ b/examples/ipfs-private/src/main.rs @@ -20,6 +20,8 @@ #![doc = include_str!("../README.md")] +use std::{env, error::Error, fs, path::Path, str::FromStr}; + use either::Either; use futures::prelude::*; use libp2p::{ @@ -31,7 +33,6 @@ use libp2p::{ swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, Multiaddr, Transport, }; -use std::{env, error::Error, fs, path::Path, str::FromStr, time::Duration}; use tokio::{io, io::AsyncBufReadExt, select}; use tracing_subscriber::EnvFilter; @@ -151,7 +152,6 @@ async fn main() -> Result<(), Box> { ping: ping::Behaviour::new(ping::Config::new()), }) })? - .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) .build(); println!("Subscribing to {gossipsub_topic:?}"); diff --git a/examples/metrics/Cargo.toml b/examples/metrics/Cargo.toml index 129b1abb1f3..ad2941e3761 100644 --- a/examples/metrics/Cargo.toml +++ b/examples/metrics/Cargo.toml @@ -12,13 +12,13 @@ release = false futures = { workspace = true } axum = "0.7" libp2p = { path = "../../libp2p", features = ["tokio", "metrics", "ping", "noise", "identify", "tcp", "yamux", "macros"] } -opentelemetry = { version = "0.25.0", features = ["metrics"] } -opentelemetry-otlp = { version = "0.25.0", features = ["metrics"] } -opentelemetry_sdk = { version = "0.25.0", features = ["rt-tokio", "metrics"] } +opentelemetry = { version = "0.27.0", features = ["metrics"] } +opentelemetry-otlp = { version = "0.27.0", features = ["metrics"] } +opentelemetry_sdk = { version = "0.27.0", features = ["rt-tokio", "metrics"] } prometheus-client = { workspace = true } tokio = { workspace = true, features = ["full"] } tracing = { workspace = true } -tracing-opentelemetry = "0.26.0" +tracing-opentelemetry = "0.28.0" tracing-subscriber = { workspace = true, features = ["env-filter"] } [lints] diff --git a/examples/metrics/src/http_service.rs b/examples/metrics/src/http_service.rs index 4a9c9785bb3..f1485832d86 100644 --- a/examples/metrics/src/http_service.rs +++ b/examples/metrics/src/http_service.rs @@ -18,15 +18,13 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use axum::extract::State; -use axum::http::StatusCode; -use axum::response::IntoResponse; -use axum::routing::get; -use axum::Router; -use prometheus_client::encoding::text::encode; -use prometheus_client::registry::Registry; -use std::net::SocketAddr; -use std::sync::{Arc, Mutex}; +use std::{ + net::SocketAddr, + sync::{Arc, Mutex}, +}; + +use axum::{extract::State, http::StatusCode, response::IntoResponse, routing::get, Router}; +use prometheus_client::{encoding::text::encode, registry::Registry}; use tokio::net::TcpListener; const METRICS_CONTENT_TYPE: &str = "application/openmetrics-text;charset=utf-8;version=1.0.0"; diff --git a/examples/metrics/src/main.rs b/examples/metrics/src/main.rs index 1755c769053..6f6e9d08e31 100644 --- a/examples/metrics/src/main.rs +++ b/examples/metrics/src/main.rs @@ -20,18 +20,22 @@ #![doc = include_str!("../README.md")] +use std::error::Error; + use futures::StreamExt; -use libp2p::core::Multiaddr; -use libp2p::metrics::{Metrics, Recorder}; -use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; -use libp2p::{identify, identity, noise, ping, tcp, yamux}; -use opentelemetry::{trace::TracerProvider, KeyValue}; +use libp2p::{ + core::Multiaddr, + identify, identity, + metrics::{Metrics, Recorder}, + noise, ping, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, +}; +use opentelemetry::{trace::TracerProvider as _, KeyValue}; +use opentelemetry_otlp::SpanExporter; +use opentelemetry_sdk::{runtime, trace::TracerProvider}; use prometheus_client::registry::Registry; -use std::error::Error; -use std::time::Duration; -use tracing_subscriber::layer::SubscriberExt; -use tracing_subscriber::util::SubscriberInitExt; -use tracing_subscriber::{EnvFilter, Layer}; +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter, Layer}; mod http_service; @@ -50,7 +54,6 @@ async fn main() -> Result<(), Box> { )? .with_bandwidth_metrics(&mut metric_registry) .with_behaviour(|key| Behaviour::new(key.public()))? - .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX))) .build(); swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?; @@ -90,14 +93,16 @@ async fn main() -> Result<(), Box> { } fn setup_tracing() -> Result<(), Box> { - let provider = opentelemetry_otlp::new_pipeline() - .tracing() - .with_exporter(opentelemetry_otlp::new_exporter().tonic()) - .with_trace_config(opentelemetry_sdk::trace::Config::default().with_resource( - opentelemetry_sdk::Resource::new(vec![KeyValue::new("service.name", "libp2p")]), - )) - .install_batch(opentelemetry_sdk::runtime::Tokio)?; - + let provider = TracerProvider::builder() + .with_batch_exporter( + SpanExporter::builder().with_tonic().build()?, + runtime::Tokio, + ) + .with_resource(opentelemetry_sdk::Resource::new(vec![KeyValue::new( + "service.name", + "libp2p", + )])) + .build(); tracing_subscriber::registry() .with(tracing_subscriber::fmt::layer().with_filter(EnvFilter::from_default_env())) .with( diff --git a/examples/ping/Cargo.toml b/examples/ping/Cargo.toml index 633f043de56..acc3b2affed 100644 --- a/examples/ping/Cargo.toml +++ b/examples/ping/Cargo.toml @@ -12,7 +12,6 @@ release = false futures = { workspace = true } libp2p = { path = "../../libp2p", features = ["noise", "ping", "tcp", "tokio", "yamux"] } tokio = { workspace = true, features = ["full"] } -tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } [lints] diff --git a/examples/ping/src/main.rs b/examples/ping/src/main.rs index 911b0384f89..565ef057c0d 100644 --- a/examples/ping/src/main.rs +++ b/examples/ping/src/main.rs @@ -20,9 +20,10 @@ #![doc = include_str!("../README.md")] +use std::{error::Error, time::Duration}; + use futures::prelude::*; use libp2p::{noise, ping, swarm::SwarmEvent, tcp, yamux, Multiaddr}; -use std::{error::Error, time::Duration}; use tracing_subscriber::EnvFilter; #[tokio::main] diff --git a/examples/relay-server/Cargo.toml b/examples/relay-server/Cargo.toml index 7385cf6c033..3bdaf89b04f 100644 --- a/examples/relay-server/Cargo.toml +++ b/examples/relay-server/Cargo.toml @@ -13,7 +13,6 @@ clap = { version = "4.5.6", features = ["derive"] } tokio = { version = "1.37.0", features = ["full"] } futures = { workspace = true } libp2p = { path = "../../libp2p", features = ["tokio", "noise", "macros", "ping", "tcp", "identify", "yamux", "relay", "quic"] } -tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } [lints] diff --git a/examples/relay-server/src/main.rs b/examples/relay-server/src/main.rs index 46a122d0717..b7868418fb0 100644 --- a/examples/relay-server/src/main.rs +++ b/examples/relay-server/src/main.rs @@ -21,17 +21,19 @@ #![doc = include_str!("../README.md")] +use std::{ + error::Error, + net::{Ipv4Addr, Ipv6Addr}, +}; + use clap::Parser; use futures::StreamExt; use libp2p::{ - core::multiaddr::Protocol, - core::Multiaddr, + core::{multiaddr::Protocol, Multiaddr}, identify, identity, noise, ping, relay, swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, }; -use std::error::Error; -use std::net::{Ipv4Addr, Ipv6Addr}; use tracing_subscriber::EnvFilter; #[tokio::main] diff --git a/examples/rendezvous/src/bin/rzv-discover.rs b/examples/rendezvous/src/bin/rzv-discover.rs index edd3d10a0ce..bdf9aeafdab 100644 --- a/examples/rendezvous/src/bin/rzv-discover.rs +++ b/examples/rendezvous/src/bin/rzv-discover.rs @@ -18,6 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{error::Error, time::Duration}; + use futures::StreamExt; use libp2p::{ multiaddr::Protocol, @@ -25,8 +27,6 @@ use libp2p::{ swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, Multiaddr, }; -use std::error::Error; -use std::time::Duration; use tracing_subscriber::EnvFilter; const NAMESPACE: &str = "rendezvous"; @@ -53,7 +53,6 @@ async fn main() -> Result<(), Box> { rendezvous: rendezvous::client::Behaviour::new(key.clone()), ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), })? - .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(5))) .build(); swarm.dial(rendezvous_point_address.clone()).unwrap(); diff --git a/examples/rendezvous/src/bin/rzv-identify.rs b/examples/rendezvous/src/bin/rzv-identify.rs index ff637aa6f49..00e94627292 100644 --- a/examples/rendezvous/src/bin/rzv-identify.rs +++ b/examples/rendezvous/src/bin/rzv-identify.rs @@ -18,13 +18,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::time::Duration; + use futures::StreamExt; use libp2p::{ identify, noise, ping, rendezvous, swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, Multiaddr, }; -use std::time::Duration; use tracing_subscriber::EnvFilter; #[tokio::main] @@ -55,7 +56,6 @@ async fn main() { ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), }) .unwrap() - .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(5))) .build(); let _ = swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()); diff --git a/examples/rendezvous/src/bin/rzv-register.rs b/examples/rendezvous/src/bin/rzv-register.rs index bd848238d4a..f70eda5d55e 100644 --- a/examples/rendezvous/src/bin/rzv-register.rs +++ b/examples/rendezvous/src/bin/rzv-register.rs @@ -18,13 +18,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::time::Duration; + use futures::StreamExt; use libp2p::{ noise, ping, rendezvous, swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, Multiaddr, }; -use std::time::Duration; use tracing_subscriber::EnvFilter; #[tokio::main] @@ -51,11 +52,10 @@ async fn main() { ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), }) .unwrap() - .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(5))) .build(); - // In production the external address should be the publicly facing IP address of the rendezvous point. - // This address is recorded in the registration entry by the rendezvous point. + // In production the external address should be the publicly facing IP address of the rendezvous + // point. This address is recorded in the registration entry by the rendezvous point. let external_address = "/ip4/127.0.0.1/tcp/0".parse::().unwrap(); swarm.add_external_address(external_address); diff --git a/examples/rendezvous/src/main.rs b/examples/rendezvous/src/main.rs index a15bc1ca2d3..a345d0faed9 100644 --- a/examples/rendezvous/src/main.rs +++ b/examples/rendezvous/src/main.rs @@ -20,14 +20,14 @@ #![doc = include_str!("../README.md")] +use std::{error::Error, time::Duration}; + use futures::StreamExt; use libp2p::{ identify, noise, ping, rendezvous, swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, }; -use std::error::Error; -use std::time::Duration; use tracing_subscriber::EnvFilter; #[tokio::main] @@ -55,7 +55,6 @@ async fn main() -> Result<(), Box> { rendezvous: rendezvous::server::Behaviour::new(rendezvous::server::Config::default()), ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), })? - .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(5))) .build(); let _ = swarm.listen_on("/ip4/0.0.0.0/tcp/62649".parse().unwrap()); diff --git a/examples/stream/src/main.rs b/examples/stream/src/main.rs index 872ab8c3b98..71d2d2fcc76 100644 --- a/examples/stream/src/main.rs +++ b/examples/stream/src/main.rs @@ -44,12 +44,14 @@ async fn main() -> Result<()> { // Deal with incoming streams. // Spawning a dedicated task is just one way of doing this. // libp2p doesn't care how you handle incoming streams but you _must_ handle them somehow. - // To mitigate DoS attacks, libp2p will internally drop incoming streams if your application cannot keep up processing them. + // To mitigate DoS attacks, libp2p will internally drop incoming streams if your application + // cannot keep up processing them. tokio::spawn(async move { // This loop handles incoming streams _sequentially_ but that doesn't have to be the case. // You can also spawn a dedicated task per stream if you want to. - // Be aware that this breaks backpressure though as spawning new tasks is equivalent to an unbounded buffer. - // Each task needs memory meaning an aggressive remote peer may force you OOM this way. + // Be aware that this breaks backpressure though as spawning new tasks is equivalent to an + // unbounded buffer. Each task needs memory meaning an aggressive remote peer may + // force you OOM this way. while let Some((peer, stream)) = incoming_streams.next().await { match echo(stream).await { @@ -102,7 +104,8 @@ async fn connection_handler(peer: PeerId, mut control: stream::Control) { } Err(error) => { // Other errors may be temporary. - // In production, something like an exponential backoff / circuit-breaker may be more appropriate. + // In production, something like an exponential backoff / circuit-breaker may be + // more appropriate. tracing::debug!(%peer, %error); continue; } diff --git a/examples/upnp/src/main.rs b/examples/upnp/src/main.rs index fd0764990d1..19de8d773ae 100644 --- a/examples/upnp/src/main.rs +++ b/examples/upnp/src/main.rs @@ -20,9 +20,10 @@ #![doc = include_str!("../README.md")] +use std::error::Error; + use futures::prelude::*; use libp2p::{noise, swarm::SwarmEvent, upnp, yamux, Multiaddr}; -use std::error::Error; use tracing_subscriber::EnvFilter; #[tokio::main] diff --git a/hole-punching-tests/Cargo.toml b/hole-punching-tests/Cargo.toml index 79728f9535c..c4f36d2a990 100644 --- a/hole-punching-tests/Cargo.toml +++ b/hole-punching-tests/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" [dependencies] anyhow = "1" -env_logger = "0.10.2" +env_logger = { workspace = true } futures = { workspace = true } libp2p = { path = "../libp2p", features = ["tokio", "dcutr", "identify", "macros", "noise", "ping", "relay", "tcp", "yamux", "quic"] } tracing = { workspace = true } diff --git a/hole-punching-tests/Dockerfile b/hole-punching-tests/Dockerfile index 403cc301fc6..30c8e0a6414 100644 --- a/hole-punching-tests/Dockerfile +++ b/hole-punching-tests/Dockerfile @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:1.5-labs -FROM rust:1.81.0 as builder +FROM rust:1.83.0 as builder # Run with access to the target cache to speed up builds WORKDIR /workspace diff --git a/hole-punching-tests/src/main.rs b/hole-punching-tests/src/main.rs index 02229e16262..bc5a1bae4f5 100644 --- a/hole-punching-tests/src/main.rs +++ b/hole-punching-tests/src/main.rs @@ -18,24 +18,27 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + collections::HashMap, + fmt, io, + net::{IpAddr, Ipv4Addr}, + str::FromStr, + time::Duration, +}; + use anyhow::{Context, Result}; use either::Either; use futures::stream::StreamExt; -use libp2p::core::transport::ListenerId; -use libp2p::swarm::dial_opts::DialOpts; -use libp2p::swarm::ConnectionId; use libp2p::{ - core::multiaddr::{Multiaddr, Protocol}, + core::{ + multiaddr::{Multiaddr, Protocol}, + transport::ListenerId, + }, dcutr, identify, noise, ping, relay, - swarm::{NetworkBehaviour, SwarmEvent}, + swarm::{dial_opts::DialOpts, ConnectionId, NetworkBehaviour, SwarmEvent}, tcp, yamux, Swarm, }; use redis::AsyncCommands; -use std::collections::HashMap; -use std::net::{IpAddr, Ipv4Addr}; -use std::str::FromStr; -use std::time::Duration; -use std::{fmt, io}; /// The redis key we push the relay's TCP listen address to. const RELAY_TCP_ADDRESS: &str = "RELAY_TCP_ADDRESS"; diff --git a/identity/Cargo.toml b/identity/Cargo.toml index cc41abb3e24..b13229c5826 100644 --- a/identity/Cargo.toml +++ b/identity/Cargo.toml @@ -41,7 +41,6 @@ rand = ["dep:rand", "ed25519-dalek?/rand_core"] [dev-dependencies] quickcheck = { workspace = true } -base64 = "0.22.1" serde_json = "1.0" rmp-serde = "1.3" criterion = "0.5" diff --git a/identity/src/ecdsa.rs b/identity/src/ecdsa.rs index 922675097df..11cdaced795 100644 --- a/identity/src/ecdsa.rs +++ b/identity/src/ecdsa.rs @@ -20,10 +20,9 @@ //! ECDSA keys with secp256r1 curve support. -use super::error::DecodingError; -use core::cmp; -use core::fmt; -use core::hash; +use core::{cmp, fmt, hash}; +use std::convert::Infallible; + use p256::{ ecdsa::{ signature::{Signer, Verifier}, @@ -32,9 +31,10 @@ use p256::{ EncodedPoint, }; use sec1::{DecodeEcPrivateKey, EncodeEcPrivateKey}; -use std::convert::Infallible; use zeroize::Zeroize; +use super::error::DecodingError; + /// An ECDSA keypair generated using `secp256r1` curve. #[derive(Clone)] pub struct Keypair { @@ -158,7 +158,8 @@ impl PublicKey { self.0.verify(msg, &sig).is_ok() } - /// Try to parse a public key from a byte buffer containing raw components of a key with or without compression. + /// Try to parse a public key from a byte buffer containing raw + /// components of a key with or without compression. pub fn try_from_bytes(k: &[u8]) -> Result { let enc_pt = EncodedPoint::from_bytes(k) .map_err(|e| DecodingError::failed_to_parse("ecdsa p256 encoded point", e))?; @@ -168,7 +169,8 @@ impl PublicKey { .map(PublicKey) } - /// Convert a public key into a byte buffer containing raw components of the key without compression. + /// Convert a public key into a byte buffer containing + /// raw components of the key without compression. pub fn to_bytes(&self) -> Vec { self.0.to_encoded_point(false).as_bytes().to_owned() } diff --git a/identity/src/ed25519.rs b/identity/src/ed25519.rs index d77c44547d6..5a1a53dd4af 100644 --- a/identity/src/ed25519.rs +++ b/identity/src/ed25519.rs @@ -20,13 +20,13 @@ //! Ed25519 keys. -use super::error::DecodingError; -use core::cmp; -use core::fmt; -use core::hash; +use core::{cmp, fmt, hash}; + use ed25519_dalek::{self as ed25519, Signer as _, Verifier as _}; use zeroize::Zeroize; +use super::error::DecodingError; + /// An Ed25519 keypair. #[derive(Clone)] pub struct Keypair(ed25519::SigningKey); @@ -152,7 +152,8 @@ impl PublicKey { self.0.to_bytes() } - /// Try to parse a public key from a byte array containing the actual key as produced by `to_bytes`. + /// Try to parse a public key from a byte array containing + /// the actual key as produced by `to_bytes`. pub fn try_from_bytes(k: &[u8]) -> Result { let k = <[u8; 32]>::try_from(k) .map_err(|e| DecodingError::failed_to_parse("Ed25519 public key", e))?; @@ -206,9 +207,10 @@ impl SecretKey { #[cfg(test)] mod tests { - use super::*; use quickcheck::*; + use super::*; + fn eq_keypairs(kp1: &Keypair, kp2: &Keypair) -> bool { kp1.public() == kp2.public() && kp1.0.to_bytes() == kp2.0.to_bytes() } diff --git a/identity/src/error.rs b/identity/src/error.rs index 71cd78fe1ea..6e8c4d02caa 100644 --- a/identity/src/error.rs +++ b/identity/src/error.rs @@ -20,8 +20,7 @@ //! Errors during identity key operations. -use std::error::Error; -use std::fmt; +use std::{error::Error, fmt}; use crate::KeyType; diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index f1e8a7c2142..a1bbba00fa9 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -24,40 +24,40 @@ feature = "ed25519", feature = "rsa" ))] -#[cfg(feature = "ed25519")] -use crate::ed25519; +use quick_protobuf::{BytesReader, Writer}; + +#[cfg(feature = "ecdsa")] +use crate::ecdsa; #[cfg(any( feature = "ecdsa", feature = "secp256k1", feature = "ed25519", feature = "rsa" ))] -use crate::error::OtherVariantError; -use crate::error::{DecodingError, SigningError}; +#[cfg(feature = "ed25519")] +use crate::ed25519; #[cfg(any( feature = "ecdsa", feature = "secp256k1", feature = "ed25519", feature = "rsa" ))] -use crate::proto; +use crate::error::OtherVariantError; #[cfg(any( feature = "ecdsa", feature = "secp256k1", feature = "ed25519", feature = "rsa" ))] -use quick_protobuf::{BytesReader, Writer}; - +use crate::proto; #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] use crate::rsa; - #[cfg(feature = "secp256k1")] use crate::secp256k1; - -#[cfg(feature = "ecdsa")] -use crate::ecdsa; -use crate::KeyType; +use crate::{ + error::{DecodingError, SigningError}, + KeyType, +}; /// Identity keypair of a node. /// @@ -75,7 +75,6 @@ use crate::KeyType; /// let mut bytes = std::fs::read("private.pk8").unwrap(); /// let keypair = Keypair::rsa_from_pkcs8(&mut bytes); /// ``` -/// #[derive(Debug, Clone)] pub struct Keypair { keypair: KeyPairInner, @@ -341,7 +340,8 @@ impl Keypair { } } - /// Deterministically derive a new secret from this [`Keypair`], taking into account the provided domain. + /// Deterministically derive a new secret from this [`Keypair`], + /// taking into account the provided domain. /// /// This works for all key types except RSA where it returns `None`. /// @@ -352,10 +352,11 @@ impl Keypair { /// # use libp2p_identity as identity; /// let key = identity::Keypair::generate_ed25519(); /// - /// let new_key = key.derive_secret(b"my encryption key").expect("can derive secret for ed25519"); + /// let new_key = key + /// .derive_secret(b"my encryption key") + /// .expect("can derive secret for ed25519"); /// # } /// ``` - /// #[cfg(any( feature = "ecdsa", feature = "secp256k1", @@ -904,9 +905,10 @@ mod tests { #[test] fn public_key_implements_hash() { - use crate::PublicKey; use std::hash::Hash; + use crate::PublicKey; + fn assert_implements_hash() {} assert_implements_hash::(); @@ -914,9 +916,10 @@ mod tests { #[test] fn public_key_implements_ord() { - use crate::PublicKey; use std::cmp::Ord; + use crate::PublicKey; + fn assert_implements_ord() {} assert_implements_ord::(); diff --git a/identity/src/peer_id.rs b/identity/src/peer_id.rs index 8ae6d99ae32..7f6d1f44eab 100644 --- a/identity/src/peer_id.rs +++ b/identity/src/peer_id.rs @@ -18,17 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{fmt, str::FromStr}; + #[cfg(feature = "rand")] use rand::Rng; use sha2::Digest as _; -use std::{fmt, str::FromStr}; use thiserror::Error; /// Local type-alias for multihash. /// /// Must be big enough to accommodate for `MAX_INLINE_KEY_LENGTH`. /// 64 satisfies that and can hold 512 bit hashes which is what the ecosystem typically uses. -/// Given that this appears in our type-signature, using a "common" number here makes us more compatible. +/// Given that this appears in our type-signature, +/// using a "common" number here makes us more compatible. type Multihash = multihash::Multihash<64>; #[cfg(feature = "serde")] diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index 5eb78a4af75..b14d8c66d86 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -20,15 +20,20 @@ //! RSA keys. -use super::error::*; -use asn1_der::typed::{DerDecodable, DerEncodable, DerTypeView, Sequence}; -use asn1_der::{Asn1DerError, Asn1DerErrorVariant, DerObject, Sink, VecBacking}; -use ring::rand::SystemRandom; -use ring::signature::KeyPair; -use ring::signature::{self, RsaKeyPair, RSA_PKCS1_2048_8192_SHA256, RSA_PKCS1_SHA256}; use std::{fmt, sync::Arc}; + +use asn1_der::{ + typed::{DerDecodable, DerEncodable, DerTypeView, Sequence}, + Asn1DerError, Asn1DerErrorVariant, DerObject, Sink, VecBacking, +}; +use ring::{ + rand::SystemRandom, + signature::{self, KeyPair, RsaKeyPair, RSA_PKCS1_2048_8192_SHA256, RSA_PKCS1_SHA256}, +}; use zeroize::Zeroize; +use super::error::*; + /// An RSA keypair. #[derive(Clone)] pub struct Keypair(Arc); @@ -315,9 +320,10 @@ impl DerDecodable<'_> for Asn1SubjectPublicKeyInfo { #[cfg(test)] mod tests { - use super::*; use quickcheck::*; + use super::*; + const KEY1: &[u8] = include_bytes!("test/rsa-2048.pk8"); const KEY2: &[u8] = include_bytes!("test/rsa-3072.pk8"); const KEY3: &[u8] = include_bytes!("test/rsa-4096.pk8"); diff --git a/identity/src/secp256k1.rs b/identity/src/secp256k1.rs index a6e9e923268..e884cf1385d 100644 --- a/identity/src/secp256k1.rs +++ b/identity/src/secp256k1.rs @@ -20,15 +20,15 @@ //! Secp256k1 keys. -use super::error::DecodingError; +use core::{cmp, fmt, hash}; + use asn1_der::typed::{DerDecodable, Sequence}; -use core::cmp; -use core::fmt; -use core::hash; use libsecp256k1::{Message, Signature}; use sha2::{Digest as ShaDigestTrait, Sha256}; use zeroize::Zeroize; +use super::error::DecodingError; + /// A Secp256k1 keypair. #[derive(Clone)] pub struct Keypair { diff --git a/interop-tests/Cargo.toml b/interop-tests/Cargo.toml index 0eb32bb4975..8f12275668d 100644 --- a/interop-tests/Cargo.toml +++ b/interop-tests/Cargo.toml @@ -13,7 +13,6 @@ crate-type = ["cdylib", "rlib"] [dependencies] anyhow = "1" -either = "1.11.0" futures = { workspace = true } rand = "0.8.5" serde = { version = "1", features = ["derive"] } diff --git a/interop-tests/Dockerfile.chromium b/interop-tests/Dockerfile.chromium index 86edbc5b9d2..4ccb142b4a3 100644 --- a/interop-tests/Dockerfile.chromium +++ b/interop-tests/Dockerfile.chromium @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:1.5-labs -FROM rust:1.81 as chef +FROM rust:1.83 as chef RUN rustup target add wasm32-unknown-unknown RUN wget -q -O- https://github.com/rustwasm/wasm-pack/releases/download/v0.12.1/wasm-pack-v0.12.1-x86_64-unknown-linux-musl.tar.gz | tar -zx -C /usr/local/bin --strip-components 1 --wildcards "wasm-pack-*/wasm-pack" RUN wget -q -O- https://github.com/WebAssembly/binaryen/releases/download/version_115/binaryen-version_115-x86_64-linux.tar.gz | tar -zx -C /usr/local/bin --strip-components 2 --wildcards "binaryen-version_*/bin/wasm-opt" diff --git a/interop-tests/Dockerfile.native b/interop-tests/Dockerfile.native index 499c73437fc..f0b078d9492 100644 --- a/interop-tests/Dockerfile.native +++ b/interop-tests/Dockerfile.native @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:1.5-labs -FROM lukemathwalker/cargo-chef:0.1.67-rust-bullseye as chef +FROM lukemathwalker/cargo-chef:0.1.68-rust-bullseye as chef WORKDIR /app FROM chef AS planner @@ -15,7 +15,7 @@ COPY . . RUN RUSTFLAGS='-C target-feature=+crt-static' cargo build --release --package interop-tests --target $(rustc -vV | grep host | awk '{print $2}') --bin native_ping RUN cp /app/target/$(rustc -vV | grep host | awk '{print $2}')/release/native_ping /usr/local/bin/testplan -FROM scratch +FROM debian:bullseye COPY --from=builder /usr/local/bin/testplan /usr/local/bin/testplan ENV RUST_BACKTRACE=1 ENTRYPOINT ["testplan"] diff --git a/interop-tests/src/arch.rs b/interop-tests/src/arch.rs index df36f8e5baf..91fc69dc215 100644 --- a/interop-tests/src/arch.rs +++ b/interop-tests/src/arch.rs @@ -1,7 +1,6 @@ // Native re-exports #[cfg(not(target_arch = "wasm32"))] pub(crate) use native::{build_swarm, init_logger, sleep, Instant, RedisClient}; - // Wasm re-exports #[cfg(target_arch = "wasm32")] pub(crate) use wasm::{build_swarm, init_logger, sleep, Instant, RedisClient}; @@ -11,11 +10,13 @@ pub(crate) mod native { use std::time::Duration; use anyhow::{bail, Context, Result}; - use futures::future::BoxFuture; - use futures::FutureExt; - use libp2p::identity::Keypair; - use libp2p::swarm::{NetworkBehaviour, Swarm}; - use libp2p::{noise, tcp, tls, yamux}; + use futures::{future::BoxFuture, FutureExt}; + use libp2p::{ + identity::Keypair, + noise, + swarm::{NetworkBehaviour, Swarm}, + tcp, tls, yamux, + }; use libp2p_mplex as mplex; use libp2p_webrtc as webrtc; use redis::AsyncCommands; @@ -48,7 +49,6 @@ pub(crate) mod native { .with_tokio() .with_quic() .with_behaviour(behaviour_constructor)? - .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) .build(), format!("/ip4/{ip}/udp/0/quic-v1"), ), @@ -61,7 +61,6 @@ pub(crate) mod native { mplex::MplexConfig::default, )? .with_behaviour(behaviour_constructor)? - .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) .build(), format!("/ip4/{ip}/tcp/0"), ), @@ -74,7 +73,6 @@ pub(crate) mod native { yamux::Config::default, )? .with_behaviour(behaviour_constructor)? - .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) .build(), format!("/ip4/{ip}/tcp/0"), ), @@ -87,7 +85,6 @@ pub(crate) mod native { mplex::MplexConfig::default, )? .with_behaviour(behaviour_constructor)? - .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) .build(), format!("/ip4/{ip}/tcp/0"), ), @@ -100,7 +97,6 @@ pub(crate) mod native { yamux::Config::default, )? .with_behaviour(behaviour_constructor)? - .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) .build(), format!("/ip4/{ip}/tcp/0"), ), @@ -110,7 +106,6 @@ pub(crate) mod native { .with_websocket(tls::Config::new, mplex::MplexConfig::default) .await? .with_behaviour(behaviour_constructor)? - .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) .build(), format!("/ip4/{ip}/tcp/0/ws"), ), @@ -120,7 +115,6 @@ pub(crate) mod native { .with_websocket(tls::Config::new, yamux::Config::default) .await? .with_behaviour(behaviour_constructor)? - .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) .build(), format!("/ip4/{ip}/tcp/0/ws"), ), @@ -130,7 +124,6 @@ pub(crate) mod native { .with_websocket(noise::Config::new, mplex::MplexConfig::default) .await? .with_behaviour(behaviour_constructor)? - .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) .build(), format!("/ip4/{ip}/tcp/0/ws"), ), @@ -140,7 +133,6 @@ pub(crate) mod native { .with_websocket(noise::Config::new, yamux::Config::default) .await? .with_behaviour(behaviour_constructor)? - .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) .build(), format!("/ip4/{ip}/tcp/0/ws"), ), @@ -154,7 +146,6 @@ pub(crate) mod native { )) })? .with_behaviour(behaviour_constructor)? - .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) .build(), format!("/ip4/{ip}/udp/0/webrtc-direct"), ), @@ -186,15 +177,19 @@ pub(crate) mod native { #[cfg(target_arch = "wasm32")] pub(crate) mod wasm { + use std::time::Duration; + use anyhow::{bail, Context, Result}; use futures::future::{BoxFuture, FutureExt}; - use libp2p::core::upgrade::Version; - use libp2p::identity::Keypair; - use libp2p::swarm::{NetworkBehaviour, Swarm}; - use libp2p::{noise, websocket_websys, webtransport_websys, yamux, Transport as _}; + use libp2p::{ + core::upgrade::Version, + identity::Keypair, + noise, + swarm::{NetworkBehaviour, Swarm}, + websocket_websys, webtransport_websys, yamux, Transport as _, + }; use libp2p_mplex as mplex; use libp2p_webrtc_websys as webrtc_websys; - use std::time::Duration; use crate::{BlpopRequest, Muxer, SecProtocol, Transport}; diff --git a/interop-tests/src/bin/wasm_ping.rs b/interop-tests/src/bin/wasm_ping.rs index 0d697a0e2a3..7730b869456 100644 --- a/interop-tests/src/bin/wasm_ping.rs +++ b/interop-tests/src/bin/wasm_ping.rs @@ -1,26 +1,27 @@ #![allow(non_upper_case_globals)] -use std::future::IntoFuture; -use std::process::Stdio; -use std::time::Duration; +use std::{future::IntoFuture, process::Stdio, time::Duration}; use anyhow::{bail, Context, Result}; -use axum::http::{header, Uri}; -use axum::response::{Html, IntoResponse, Response}; -use axum::routing::get; -use axum::{extract::State, http::StatusCode, routing::post, Json, Router}; +use axum::{ + extract::State, + http::{header, StatusCode, Uri}, + response::{Html, IntoResponse, Response}, + routing::{get, post}, + Json, Router, +}; +use interop_tests::{BlpopRequest, Report}; use redis::{AsyncCommands, Client}; use thirtyfour::prelude::*; -use tokio::io::{AsyncBufReadExt, BufReader}; -use tokio::net::TcpListener; -use tokio::process::Child; -use tokio::sync::mpsc; -use tower_http::cors::CorsLayer; -use tower_http::trace::TraceLayer; +use tokio::{ + io::{AsyncBufReadExt, BufReader}, + net::TcpListener, + process::Child, + sync::mpsc, +}; +use tower_http::{cors::CorsLayer, trace::TraceLayer}; use tracing_subscriber::{fmt, prelude::*, EnvFilter}; -use interop_tests::{BlpopRequest, Report}; - mod config; const BIND_ADDR: &str = "127.0.0.1:8080"; diff --git a/interop-tests/src/lib.rs b/interop-tests/src/lib.rs index 0154bec51a4..a16dc4b8228 100644 --- a/interop-tests/src/lib.rs +++ b/interop-tests/src/lib.rs @@ -1,11 +1,14 @@ -use std::str::FromStr; -use std::time::Duration; +use std::{str::FromStr, time::Duration}; use anyhow::{bail, Context, Result}; use futures::{FutureExt, StreamExt}; -use libp2p::identity::Keypair; -use libp2p::swarm::SwarmEvent; -use libp2p::{identify, ping, swarm::NetworkBehaviour, Multiaddr}; +use libp2p::{ + identify, + identity::Keypair, + ping, + swarm::{NetworkBehaviour, SwarmEvent}, + Multiaddr, +}; #[cfg(target_arch = "wasm32")] use wasm_bindgen::prelude::*; diff --git a/libp2p/CHANGELOG.md b/libp2p/CHANGELOG.md index e383cfd0cdc..59bf2e81383 100644 --- a/libp2p/CHANGELOG.md +++ b/libp2p/CHANGELOG.md @@ -1,8 +1,27 @@ -## 0.54.2 +## 0.55.0 + +- Raise MSRV to 1.83.0. + See [PR 5650](https://github.com/libp2p/rust-libp2p/pull/5650). + +- Add `with_connection_timeout` on `SwarmBuilder` to allow configuration of the connection_timeout parameter. + See [PR 5575](https://github.com/libp2p/rust-libp2p/pull/5575). - Deprecate `void` crate. See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). +- Update default for idle-connection-timeout to 10s. + See [PR 4967](https://github.com/libp2p/rust-libp2p/pull/4967). + +- Expose swarm builder phase errors. + See [PR 5726](https://github.com/libp2p/rust-libp2p/pull/5726). + +- Deprecate `ConnectionHandler::{InboundOpenInfo, OutboundOpenInfo}` associated type. + Previously, users could tag pending sub streams with custom data and retrieve the data + after the substream has been negotiated. + But substreams themselves are completely interchangeable, users should instead track + additional data inside `ConnectionHandler` after negotiation. + See [PR 5242](https://github.com/libp2p/rust-libp2p/pull/5242). + ## 0.54.1 - Update individual crates. diff --git a/libp2p/Cargo.toml b/libp2p/Cargo.toml index 79f4b8fbb9a..39d01a5c5c7 100644 --- a/libp2p/Cargo.toml +++ b/libp2p/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p" edition = "2021" rust-version = { workspace = true } description = "Peer-to-peer networking library" -version = "0.54.2" +version = "0.55.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -94,7 +94,7 @@ bytes = "1" either = "1.9.0" futures = { workspace = true } futures-timer = "3.0.2" # Explicit dependency to be used in `wasm-bindgen` feature -getrandom = "0.2.3" # Explicit dependency to be used in `wasm-bindgen` feature +getrandom = { workspace = true } # Explicit dependency to be used in `wasm-bindgen` feature # TODO feature flag? rw-stream-sink = { workspace = true } @@ -137,12 +137,9 @@ libp2p-websocket = { workspace = true, optional = true } [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } -async-trait = "0.1" -clap = { version = "4.1.6", features = ["derive"] } tokio = { workspace = true, features = [ "io-util", "io-std", "macros", "rt", "rt-multi-thread"] } libp2p-mplex = { workspace = true } -libp2p-noise = { workspace = true } libp2p-tcp = { workspace = true, features = ["tokio"] } tracing-subscriber = { workspace = true, features = ["env-filter"] } diff --git a/libp2p/src/bandwidth.rs b/libp2p/src/bandwidth.rs index 8931c5c4166..ac668e26b3f 100644 --- a/libp2p/src/bandwidth.rs +++ b/libp2p/src/bandwidth.rs @@ -20,13 +20,6 @@ #![allow(deprecated)] -use crate::core::muxing::{StreamMuxer, StreamMuxerEvent}; - -use futures::{ - io::{IoSlice, IoSliceMut}, - prelude::*, - ready, -}; use std::{ convert::TryFrom as _, io, @@ -38,6 +31,14 @@ use std::{ task::{Context, Poll}, }; +use futures::{ + io::{IoSlice, IoSliceMut}, + prelude::*, + ready, +}; + +use crate::core::muxing::{StreamMuxer, StreamMuxerEvent}; + /// Wraps around a [`StreamMuxer`] and counts the number of bytes that go through all the opened /// streams. #[derive(Clone)] @@ -123,7 +124,7 @@ impl BandwidthSinks { /// Returns the total number of bytes that have been downloaded on all the streams. /// /// > **Note**: This method is by design subject to race conditions. The returned value should - /// > only ever be used for statistics purposes. + /// > only ever be used for statistics purposes. pub fn total_inbound(&self) -> u64 { self.inbound.load(Ordering::Relaxed) } @@ -131,7 +132,7 @@ impl BandwidthSinks { /// Returns the total number of bytes that have been uploaded on all the streams. /// /// > **Note**: This method is by design subject to race conditions. The returned value should - /// > only ever be used for statistics purposes. + /// > only ever be used for statistics purposes. pub fn total_outbound(&self) -> u64 { self.outbound.load(Ordering::Relaxed) } diff --git a/libp2p/src/builder.rs b/libp2p/src/builder.rs index de003314cca..ae4d0b0d4e4 100644 --- a/libp2p/src/builder.rs +++ b/libp2p/src/builder.rs @@ -4,6 +4,10 @@ mod phase; mod select_muxer; mod select_security; +#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))] +pub use phase::WebsocketError; +pub use phase::{BehaviourError, TransportError}; + /// Build a [`Swarm`](libp2p_swarm::Swarm) by combining an identity, a set of /// [`Transport`](libp2p_core::Transport)s and a /// [`NetworkBehaviour`](libp2p_swarm::NetworkBehaviour). @@ -33,31 +37,31 @@ mod select_security; /// # relay: libp2p_relay::client::Behaviour, /// # } /// -/// let swarm = SwarmBuilder::with_new_identity() -/// .with_tokio() -/// .with_tcp( -/// Default::default(), -/// (libp2p_tls::Config::new, libp2p_noise::Config::new), -/// libp2p_yamux::Config::default, -/// )? -/// .with_quic() -/// .with_other_transport(|_key| DummyTransport::<(PeerId, StreamMuxerBox)>::new())? -/// .with_dns()? -/// .with_websocket( -/// (libp2p_tls::Config::new, libp2p_noise::Config::new), -/// libp2p_yamux::Config::default, -/// ) -/// .await? -/// .with_relay_client( -/// (libp2p_tls::Config::new, libp2p_noise::Config::new), -/// libp2p_yamux::Config::default, -/// )? -/// .with_behaviour(|_key, relay| MyBehaviour { relay })? -/// .with_swarm_config(|cfg| { -/// // Edit cfg here. -/// cfg -/// }) -/// .build(); +/// let swarm = SwarmBuilder::with_new_identity() +/// .with_tokio() +/// .with_tcp( +/// Default::default(), +/// (libp2p_tls::Config::new, libp2p_noise::Config::new), +/// libp2p_yamux::Config::default, +/// )? +/// .with_quic() +/// .with_other_transport(|_key| DummyTransport::<(PeerId, StreamMuxerBox)>::new())? +/// .with_dns()? +/// .with_websocket( +/// (libp2p_tls::Config::new, libp2p_noise::Config::new), +/// libp2p_yamux::Config::default, +/// ) +/// .await? +/// .with_relay_client( +/// (libp2p_tls::Config::new, libp2p_noise::Config::new), +/// libp2p_yamux::Config::default, +/// )? +/// .with_behaviour(|_key, relay| MyBehaviour { relay })? +/// .with_swarm_config(|cfg| { +/// // Edit cfg here. +/// cfg +/// }) +/// .build(); /// # /// # Ok(()) /// # } @@ -70,11 +74,12 @@ pub struct SwarmBuilder { #[cfg(test)] mod tests { - use crate::SwarmBuilder; use libp2p_core::{muxing::StreamMuxerBox, transport::dummy::DummyTransport}; use libp2p_identity::PeerId; use libp2p_swarm::NetworkBehaviour; + use crate::SwarmBuilder; + #[test] #[cfg(all( feature = "tokio", diff --git a/libp2p/src/builder/phase.rs b/libp2p/src/builder/phase.rs index c9679a46767..f8f1672f952 100644 --- a/libp2p/src/builder/phase.rs +++ b/libp2p/src/builder/phase.rs @@ -16,23 +16,26 @@ mod websocket; use bandwidth_logging::*; use bandwidth_metrics::*; +pub use behaviour::BehaviourError; use behaviour::*; use build::*; use dns::*; +use libp2p_core::{muxing::StreamMuxerBox, Transport}; +use libp2p_identity::Keypair; +pub use other_transport::TransportError; use other_transport::*; use provider::*; use quic::*; use relay::*; use swarm::*; use tcp::*; +#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))] +pub use websocket::WebsocketError; use websocket::*; -use super::select_muxer::SelectMuxerUpgrade; -use super::select_security::SelectSecurityUpgrade; -use super::SwarmBuilder; - -use libp2p_core::{muxing::StreamMuxerBox, Transport}; -use libp2p_identity::Keypair; +use super::{ + select_muxer::SelectMuxerUpgrade, select_security::SelectSecurityUpgrade, SwarmBuilder, +}; #[allow(unreachable_pub)] pub trait IntoSecurityUpgrade { diff --git a/libp2p/src/builder/phase/bandwidth_logging.rs b/libp2p/src/builder/phase/bandwidth_logging.rs index cee9498fcaa..f24df5f3df5 100644 --- a/libp2p/src/builder/phase/bandwidth_logging.rs +++ b/libp2p/src/builder/phase/bandwidth_logging.rs @@ -1,10 +1,9 @@ +use std::{marker::PhantomData, sync::Arc}; + use super::*; #[allow(deprecated)] use crate::bandwidth::BandwidthSinks; -use crate::transport_ext::TransportExt; -use crate::SwarmBuilder; -use std::marker::PhantomData; -use std::sync::Arc; +use crate::{transport_ext::TransportExt, SwarmBuilder}; pub struct BandwidthLoggingPhase { pub(crate) relay_behaviour: R, diff --git a/libp2p/src/builder/phase/bandwidth_metrics.rs b/libp2p/src/builder/phase/bandwidth_metrics.rs index 52daa731ddd..ddd292c140e 100644 --- a/libp2p/src/builder/phase/bandwidth_metrics.rs +++ b/libp2p/src/builder/phase/bandwidth_metrics.rs @@ -1,10 +1,9 @@ +use std::{marker::PhantomData, sync::Arc}; + use super::*; #[allow(deprecated)] use crate::bandwidth::BandwidthSinks; -use crate::transport_ext::TransportExt; -use crate::SwarmBuilder; -use std::marker::PhantomData; -use std::sync::Arc; +use crate::{transport_ext::TransportExt, SwarmBuilder}; pub struct BandwidthMetricsPhase { pub(crate) relay_behaviour: R, diff --git a/libp2p/src/builder/phase/behaviour.rs b/libp2p/src/builder/phase/behaviour.rs index 939db935c80..22f8c617051 100644 --- a/libp2p/src/builder/phase/behaviour.rs +++ b/libp2p/src/builder/phase/behaviour.rs @@ -1,8 +1,9 @@ +use std::{convert::Infallible, marker::PhantomData}; + +use libp2p_swarm::NetworkBehaviour; + use super::*; use crate::SwarmBuilder; -use libp2p_swarm::NetworkBehaviour; -use std::convert::Infallible; -use std::marker::PhantomData; pub struct BehaviourPhase { pub(crate) relay_behaviour: R, diff --git a/libp2p/src/builder/phase/build.rs b/libp2p/src/builder/phase/build.rs index 80a83994eeb..d3138cb8b8d 100644 --- a/libp2p/src/builder/phase/build.rs +++ b/libp2p/src/builder/phase/build.rs @@ -1,28 +1,31 @@ +use std::time::Duration; + +use libp2p_core::{transport::timeout::TransportTimeout, Transport}; +use libp2p_swarm::Swarm; + #[allow(unused_imports)] use super::*; - use crate::SwarmBuilder; -use libp2p_core::Transport; -use libp2p_swarm::Swarm; pub struct BuildPhase { pub(crate) behaviour: B, pub(crate) transport: T, pub(crate) swarm_config: libp2p_swarm::Config, + pub(crate) connection_timeout: Duration, } -const CONNECTION_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); - impl SwarmBuilder> { + /// Timeout of the [`TransportTimeout`] wrapping the transport. + pub fn with_connection_timeout(mut self, connection_timeout: Duration) -> Self { + self.phase.connection_timeout = connection_timeout; + self + } + pub fn build(self) -> Swarm { Swarm::new( - libp2p_core::transport::timeout::TransportTimeout::new( - self.phase.transport, - CONNECTION_TIMEOUT, - ) - .boxed(), + TransportTimeout::new(self.phase.transport, self.phase.connection_timeout).boxed(), self.phase.behaviour, self.keypair.public().to_peer_id(), self.phase.swarm_config, diff --git a/libp2p/src/builder/phase/dns.rs b/libp2p/src/builder/phase/dns.rs index 638064d58bb..83653836a34 100644 --- a/libp2p/src/builder/phase/dns.rs +++ b/libp2p/src/builder/phase/dns.rs @@ -1,6 +1,7 @@ +use std::marker::PhantomData; + use super::*; use crate::SwarmBuilder; -use std::marker::PhantomData; pub struct DnsPhase { pub(crate) transport: T, diff --git a/libp2p/src/builder/phase/identity.rs b/libp2p/src/builder/phase/identity.rs index ceb86819dc7..e2511267cd3 100644 --- a/libp2p/src/builder/phase/identity.rs +++ b/libp2p/src/builder/phase/identity.rs @@ -1,6 +1,7 @@ +use std::marker::PhantomData; + use super::*; use crate::SwarmBuilder; -use std::marker::PhantomData; pub struct IdentityPhase {} diff --git a/libp2p/src/builder/phase/other_transport.rs b/libp2p/src/builder/phase/other_transport.rs index e04621b2e3f..c3b951c8c75 100644 --- a/libp2p/src/builder/phase/other_transport.rs +++ b/libp2p/src/builder/phase/other_transport.rs @@ -1,20 +1,19 @@ -use std::convert::Infallible; -use std::marker::PhantomData; -use std::sync::Arc; +use std::{convert::Infallible, marker::PhantomData, sync::Arc}; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; -use libp2p_core::Transport; +use libp2p_core::{ + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, + Transport, +}; #[cfg(feature = "relay")] use libp2p_core::{Negotiated, UpgradeInfo}; #[cfg(feature = "relay")] use libp2p_identity::PeerId; +use super::*; #[allow(deprecated)] use crate::bandwidth::BandwidthSinks; use crate::SwarmBuilder; -use super::*; - pub struct OtherTransportPhase { pub(crate) transport: T, } diff --git a/libp2p/src/builder/phase/provider.rs b/libp2p/src/builder/phase/provider.rs index 2a9154cda74..00a79e14a30 100644 --- a/libp2p/src/builder/phase/provider.rs +++ b/libp2p/src/builder/phase/provider.rs @@ -1,13 +1,15 @@ +use std::marker::PhantomData; + #[allow(unused_imports)] use super::*; use crate::SwarmBuilder; -use std::marker::PhantomData; /// Represents the phase where a provider is not yet specified. -/// This is a marker type used in the type-state pattern to ensure compile-time checks of the builder's state. +/// This is a marker type used in the type-state pattern to ensure compile-time checks of the +/// builder's state. pub enum NoProviderSpecified {} -// Define enums for each of the possible runtime environments. These are used as markers in the type-state pattern, -// allowing compile-time checks for the appropriate environment configuration. +// Define enums for each of the possible runtime environments. These are used as markers in the +// type-state pattern, allowing compile-time checks for the appropriate environment configuration. #[cfg(all(not(target_arch = "wasm32"), feature = "async-std"))] /// Represents the AsyncStd runtime environment. @@ -26,7 +28,8 @@ pub struct ProviderPhase {} impl SwarmBuilder { /// Configures the SwarmBuilder to use the AsyncStd runtime. - /// This method is only available when compiling for non-Wasm targets with the `async-std` feature enabled. + /// This method is only available when compiling for non-Wasm + /// targets with the `async-std` feature enabled. #[cfg(all(not(target_arch = "wasm32"), feature = "async-std"))] pub fn with_async_std(self) -> SwarmBuilder { SwarmBuilder { @@ -37,7 +40,8 @@ impl SwarmBuilder { } /// Configures the SwarmBuilder to use the Tokio runtime. - /// This method is only available when compiling for non-Wasm targets with the `tokio` feature enabled + /// This method is only available when compiling for non-Wasm + /// targets with the `tokio` feature enabled #[cfg(all(not(target_arch = "wasm32"), feature = "tokio"))] pub fn with_tokio(self) -> SwarmBuilder { SwarmBuilder { diff --git a/libp2p/src/builder/phase/quic.rs b/libp2p/src/builder/phase/quic.rs index e030e9493bb..1b6329c1095 100644 --- a/libp2p/src/builder/phase/quic.rs +++ b/libp2p/src/builder/phase/quic.rs @@ -1,5 +1,5 @@ -use super::*; -use crate::SwarmBuilder; +use std::{marker::PhantomData, sync::Arc}; + #[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))] use libp2p_core::muxing::StreamMuxer; use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; @@ -8,7 +8,9 @@ use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; all(not(target_arch = "wasm32"), feature = "websocket") ))] use libp2p_core::{InboundUpgrade, Negotiated, OutboundUpgrade, UpgradeInfo}; -use std::{marker::PhantomData, sync::Arc}; + +use super::*; +use crate::SwarmBuilder; pub struct QuicPhase { pub(crate) transport: T, diff --git a/libp2p/src/builder/phase/relay.rs b/libp2p/src/builder/phase/relay.rs index f8305f9d246..33dbf1fb54c 100644 --- a/libp2p/src/builder/phase/relay.rs +++ b/libp2p/src/builder/phase/relay.rs @@ -10,9 +10,8 @@ use libp2p_core::{InboundUpgrade, Negotiated, OutboundUpgrade, StreamMuxer, Upgr #[cfg(feature = "relay")] use libp2p_identity::PeerId; -use crate::SwarmBuilder; - use super::*; +use crate::SwarmBuilder; pub struct RelayPhase { pub(crate) transport: T, diff --git a/libp2p/src/builder/phase/swarm.rs b/libp2p/src/builder/phase/swarm.rs index ee456ced927..e751ad672e4 100644 --- a/libp2p/src/builder/phase/swarm.rs +++ b/libp2p/src/builder/phase/swarm.rs @@ -1,6 +1,9 @@ #[allow(unused_imports)] use super::*; +#[allow(unused)] // used below but due to feature flag combinations, clippy gives an unnecessary warning. +const DEFAULT_CONNECTION_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); + #[allow(dead_code)] pub struct SwarmPhase { pub(crate) behaviour: B, @@ -20,6 +23,7 @@ macro_rules! impl_with_swarm_config { behaviour: self.phase.behaviour, transport: self.phase.transport, swarm_config: constructor($config), + connection_timeout: DEFAULT_CONNECTION_TIMEOUT, }, keypair: self.keypair, phantom: std::marker::PhantomData, diff --git a/libp2p/src/builder/phase/tcp.rs b/libp2p/src/builder/phase/tcp.rs index 4b7cf29b3d2..f38f52441e5 100644 --- a/libp2p/src/builder/phase/tcp.rs +++ b/libp2p/src/builder/phase/tcp.rs @@ -1,5 +1,5 @@ -use super::*; -use crate::SwarmBuilder; +use std::marker::PhantomData; + #[cfg(all( not(target_arch = "wasm32"), any(feature = "tcp", feature = "websocket") @@ -14,7 +14,9 @@ use libp2p_core::Transport; use libp2p_core::{ upgrade::InboundConnectionUpgrade, upgrade::OutboundConnectionUpgrade, Negotiated, UpgradeInfo, }; -use std::marker::PhantomData; + +use super::*; +use crate::SwarmBuilder; pub struct TcpPhase {} diff --git a/libp2p/src/builder/phase/websocket.rs b/libp2p/src/builder/phase/websocket.rs index 68a85bb77b7..a23c6eca854 100644 --- a/libp2p/src/builder/phase/websocket.rs +++ b/libp2p/src/builder/phase/websocket.rs @@ -1,5 +1,5 @@ -use super::*; -use crate::SwarmBuilder; +use std::marker::PhantomData; + #[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))] use libp2p_core::muxing::{StreamMuxer, StreamMuxerBox}; use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; @@ -15,7 +15,9 @@ use libp2p_core::{InboundUpgrade, Negotiated, OutboundUpgrade, UpgradeInfo}; feature = "relay" ))] use libp2p_identity::PeerId; -use std::marker::PhantomData; + +use super::*; +use crate::SwarmBuilder; pub struct WebsocketPhase { pub(crate) transport: T, @@ -126,8 +128,8 @@ impl_websocket_builder!( impl_websocket_builder!( "tokio", super::provider::Tokio, - // Note this is an unnecessary await for Tokio Websocket (i.e. tokio dns) in order to be consistent - // with above AsyncStd construction. + // Note this is an unnecessary await for Tokio Websocket (i.e. tokio dns) in order to be + // consistent with above AsyncStd construction. futures::future::ready(libp2p_dns::tokio::Transport::system( libp2p_tcp::tokio::Transport::new(libp2p_tcp::Config::default()) )), diff --git a/libp2p/src/builder/select_muxer.rs b/libp2p/src/builder/select_muxer.rs index c93ba9d9991..93ae0547269 100644 --- a/libp2p/src/builder/select_muxer.rs +++ b/libp2p/src/builder/select_muxer.rs @@ -20,12 +20,15 @@ #![allow(unreachable_pub)] +use std::iter::{Chain, Map}; + use either::Either; use futures::future; -use libp2p_core::either::EitherFuture; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; -use libp2p_core::UpgradeInfo; -use std::iter::{Chain, Map}; +use libp2p_core::{ + either::EitherFuture, + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, + UpgradeInfo, +}; #[derive(Debug, Clone)] pub struct SelectMuxerUpgrade(A, B); diff --git a/libp2p/src/builder/select_security.rs b/libp2p/src/builder/select_security.rs index d6c7f8c172f..1ed760feb1b 100644 --- a/libp2p/src/builder/select_security.rs +++ b/libp2p/src/builder/select_security.rs @@ -21,13 +21,15 @@ #![allow(unreachable_pub)] +use std::iter::{Chain, Map}; + use either::Either; -use futures::future::MapOk; -use futures::{future, TryFutureExt}; -use libp2p_core::either::EitherFuture; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}; +use futures::{future, future::MapOk, TryFutureExt}; +use libp2p_core::{ + either::EitherFuture, + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}, +}; use libp2p_identity::PeerId; -use std::iter::{Chain, Map}; /// Upgrade that combines two upgrades into one. Supports all the protocols supported by either /// sub-upgrade. diff --git a/libp2p/src/lib.rs b/libp2p/src/lib.rs index 58f911e9445..47e1142d0e9 100644 --- a/libp2p/src/lib.rs +++ b/libp2p/src/lib.rs @@ -34,11 +34,6 @@ pub use bytes; pub use futures; -#[doc(inline)] -pub use libp2p_core::multihash; -#[doc(inline)] -pub use multiaddr; - #[doc(inline)] pub use libp2p_allow_block_list as allow_block_list; #[cfg(feature = "autonat")] @@ -48,6 +43,8 @@ pub use libp2p_autonat as autonat; pub use libp2p_connection_limits as connection_limits; #[doc(inline)] pub use libp2p_core as core; +#[doc(inline)] +pub use libp2p_core::multihash; #[cfg(feature = "dcutr")] #[doc(inline)] pub use libp2p_dcutr as dcutr; @@ -140,6 +137,8 @@ pub use libp2p_webtransport_websys as webtransport_websys; #[cfg(feature = "yamux")] #[doc(inline)] pub use libp2p_yamux as yamux; +#[doc(inline)] +pub use multiaddr; mod builder; mod transport_ext; @@ -149,15 +148,23 @@ pub mod bandwidth; #[cfg(doc)] pub mod tutorials; -pub use self::builder::SwarmBuilder; -pub use self::core::{ - transport::TransportError, - upgrade::{InboundUpgrade, OutboundUpgrade}, - Transport, -}; -pub use self::multiaddr::{multiaddr as build_multiaddr, Multiaddr}; -pub use self::swarm::Swarm; -pub use self::transport_ext::TransportExt; +#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))] +pub use builder::WebsocketError as WebsocketBuilderError; pub use libp2p_identity as identity; pub use libp2p_identity::PeerId; pub use libp2p_swarm::{Stream, StreamProtocol}; + +pub use self::{ + builder::{ + BehaviourError as BehaviourBuilderError, SwarmBuilder, + TransportError as TransportBuilderError, + }, + core::{ + transport::TransportError, + upgrade::{InboundUpgrade, OutboundUpgrade}, + Transport, + }, + multiaddr::{multiaddr as build_multiaddr, Multiaddr}, + swarm::Swarm, + transport_ext::TransportExt, +}; diff --git a/libp2p/src/transport_ext.rs b/libp2p/src/transport_ext.rs index 4f07484fc1f..0ed5b816903 100644 --- a/libp2p/src/transport_ext.rs +++ b/libp2p/src/transport_ext.rs @@ -20,15 +20,19 @@ //! Provides the `TransportExt` trait. +use std::sync::Arc; + +use libp2p_identity::PeerId; + #[allow(deprecated)] use crate::bandwidth::{BandwidthLogging, BandwidthSinks}; -use crate::core::{ - muxing::{StreamMuxer, StreamMuxerBox}, - transport::Boxed, +use crate::{ + core::{ + muxing::{StreamMuxer, StreamMuxerBox}, + transport::Boxed, + }, + Transport, }; -use crate::Transport; -use libp2p_identity::PeerId; -use std::sync::Arc; /// Trait automatically implemented on all objects that implement `Transport`. Provides some /// additional utilities. @@ -42,23 +46,17 @@ pub trait TransportExt: Transport { /// # Example /// /// ``` - /// use libp2p_yamux as yamux; + /// use libp2p::{core::upgrade, identity, Transport, TransportExt}; /// use libp2p_noise as noise; /// use libp2p_tcp as tcp; - /// use libp2p::{ - /// core::upgrade, - /// identity, - /// TransportExt, - /// Transport, - /// }; + /// use libp2p_yamux as yamux; /// /// let id_keys = identity::Keypair::generate_ed25519(); /// /// let transport = tcp::tokio::Transport::new(tcp::Config::default().nodelay(true)) /// .upgrade(upgrade::Version::V1) /// .authenticate( - /// noise::Config::new(&id_keys) - /// .expect("Signing libp2p-noise static DH keypair failed."), + /// noise::Config::new(&id_keys).expect("Signing libp2p-noise static DH keypair failed."), /// ) /// .multiplex(yamux::Config::default()) /// .boxed(); diff --git a/libp2p/src/tutorials/hole_punching.rs b/libp2p/src/tutorials/hole_punching.rs index 0963c0ca59e..06a4dad4037 100644 --- a/libp2p/src/tutorials/hole_punching.rs +++ b/libp2p/src/tutorials/hole_punching.rs @@ -57,8 +57,8 @@ //! cargo build --bin relay-server-example //! ``` //! -//! You can find the binary at `target/debug/relay-server-example`. In case you built it locally, copy -//! it to your server. +//! You can find the binary at `target/debug/relay-server-example`. In case you built it locally, +//! copy it to your server. //! //! On your server, start the relay server binary: //! @@ -98,7 +98,8 @@ //! //! ``` bash //! $ libp2p-lookup direct --address /ip4/111.11.111.111/tcp/4001 -//! Lookup for peer with id PeerId("12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN") succeeded. +//! Lookup for peer with id PeerId("12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN") +//! succeeded. //! //! Protocol version: "/TODO/0.0.1" //! Agent version: "rust-libp2p/0.36.0" @@ -163,12 +164,18 @@ //! [`Multiaddr`](crate::Multiaddr). //! //! ``` ignore -//! [2022-01-30T12:54:10Z INFO client] Established connection to PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X") via Dialer { address: "/ip4/$RELAY_PEER_ID/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN/p2p-circuit/p2p/12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X", role_override: Dialer } +//! [2022-01-30T12:54:10Z INFO client] Established connection to +//! PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X") via Dialer { address: +//! "/ip4/$RELAY_PEER_ID/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN/ +//! p2p-circuit/p2p/12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X", +//! role_override: Dialer } //! ``` //! -//! 2. The direct connection upgrade, also known as hole punch, succeeding. -//! Reported by [`dcutr`](crate::dcutr) through [`Event`](crate::dcutr::Event) containing [`Result::Ok`] with the [`ConnectionId`](libp2p_swarm::ConnectionId) of the new direct connection. +//! 2. The direct connection upgrade, also known as hole punch, succeeding. Reported by +//! [`dcutr`](crate::dcutr) through [`Event`](crate::dcutr::Event) containing [`Result::Ok`] with +//! the [`ConnectionId`](libp2p_swarm::ConnectionId) of the new direct connection. //! //! ``` ignore -//! [2022-01-30T12:54:11Z INFO client] Event { remote_peer_id: PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X"), result: Ok(2) } +//! [2022-01-30T12:54:11Z INFO client] Event { remote_peer_id: +//! PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X"), result: Ok(2) } //! ``` diff --git a/libp2p/src/tutorials/ping.rs b/libp2p/src/tutorials/ping.rs index 31bf5ba3a14..ebaea29f33a 100644 --- a/libp2p/src/tutorials/ping.rs +++ b/libp2p/src/tutorials/ping.rs @@ -72,6 +72,7 @@ //! //! ```rust //! use std::error::Error; +//! //! use tracing_subscriber::EnvFilter; //! //! #[tokio::main] @@ -98,8 +99,9 @@ //! //! ```rust //! use std::error::Error; -//! use tracing_subscriber::EnvFilter; +//! //! use libp2p::{noise, tcp, yamux}; +//! use tracing_subscriber::EnvFilter; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { @@ -139,12 +141,14 @@ //! The two traits [`Transport`] and [`NetworkBehaviour`] allow us to cleanly //! separate _how_ to send bytes from _what_ bytes and to _whom_ to send. //! -//! With the above in mind, let's extend our example, creating a [`ping::Behaviour`](crate::ping::Behaviour) at the end: +//! With the above in mind, let's extend our example, creating a +//! [`ping::Behaviour`](crate::ping::Behaviour) at the end: //! //! ```rust //! use std::error::Error; -//! use tracing_subscriber::EnvFilter; +//! //! use libp2p::{noise, ping, tcp, yamux}; +//! use tracing_subscriber::EnvFilter; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { @@ -174,8 +178,9 @@ //! //! ```rust //! use std::error::Error; -//! use tracing_subscriber::EnvFilter; +//! //! use libp2p::{noise, ping, tcp, yamux}; +//! use tracing_subscriber::EnvFilter; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { @@ -209,8 +214,9 @@ //! //! ```rust //! use std::{error::Error, time::Duration}; -//! use tracing_subscriber::EnvFilter; +//! //! use libp2p::{noise, ping, tcp, yamux}; +//! use tracing_subscriber::EnvFilter; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { @@ -226,7 +232,6 @@ //! yamux::Config::default, //! )? //! .with_behaviour(|_| ping::Behaviour::default())? -//! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX))) //! .build(); //! //! Ok(()) @@ -261,8 +266,9 @@ //! //! ```rust //! use std::{error::Error, time::Duration}; -//! use tracing_subscriber::EnvFilter; +//! //! use libp2p::{noise, ping, tcp, yamux, Multiaddr}; +//! use tracing_subscriber::EnvFilter; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { @@ -278,7 +284,6 @@ //! yamux::Config::default, //! )? //! .with_behaviour(|_| ping::Behaviour::default())? -//! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX))) //! .build(); //! //! // Tell the swarm to listen on all interfaces and a random, OS-assigned @@ -305,9 +310,10 @@ //! //! ```no_run //! use std::{error::Error, time::Duration}; -//! use tracing_subscriber::EnvFilter; -//! use libp2p::{noise, ping, tcp, yamux, Multiaddr, swarm::SwarmEvent}; +//! //! use futures::prelude::*; +//! use libp2p::{noise, ping, swarm::SwarmEvent, tcp, yamux, Multiaddr}; +//! use tracing_subscriber::EnvFilter; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { @@ -323,7 +329,6 @@ //! yamux::Config::default, //! )? //! .with_behaviour(|_| ping::Behaviour::default())? -//! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX))) //! .build(); //! //! // Tell the swarm to listen on all interfaces and a random, OS-assigned diff --git a/misc/allow-block-list/CHANGELOG.md b/misc/allow-block-list/CHANGELOG.md index b5ffd7f0495..e7f68f6f8fe 100644 --- a/misc/allow-block-list/CHANGELOG.md +++ b/misc/allow-block-list/CHANGELOG.md @@ -1,13 +1,10 @@ -## 0.4.2 - -- Deprecate `void` crate. - See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). - ## 0.4.1 - Add getters & setters for the allowed/blocked peers. Return a `bool` for every "insert/remove" function, informing if a change was performed. See [PR 5572](https://github.com/libp2p/rust-libp2p/pull/5572). +- Deprecate `void` crate. + See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). ## 0.4.0 diff --git a/misc/allow-block-list/Cargo.toml b/misc/allow-block-list/Cargo.toml index 66ee3ef9124..c169be87056 100644 --- a/misc/allow-block-list/Cargo.toml +++ b/misc/allow-block-list/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-allow-block-list" edition = "2021" rust-version = { workspace = true } description = "Allow/block list connection management for libp2p." -version = "0.4.2" +version = "0.4.1" license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" keywords = ["peer-to-peer", "libp2p", "networking"] diff --git a/misc/allow-block-list/src/lib.rs b/misc/allow-block-list/src/lib.rs index f93cf4ffefa..ea0d56b5a67 100644 --- a/misc/allow-block-list/src/lib.rs +++ b/misc/allow-block-list/src/lib.rs @@ -31,12 +31,12 @@ //! #[derive(NetworkBehaviour)] //! # #[behaviour(prelude = "libp2p_swarm::derive_prelude")] //! struct MyBehaviour { -//! allowed_peers: allow_block_list::Behaviour, +//! allowed_peers: allow_block_list::Behaviour, //! } //! //! # fn main() { //! let behaviour = MyBehaviour { -//! allowed_peers: allow_block_list::Behaviour::default() +//! allowed_peers: allow_block_list::Behaviour::default(), //! }; //! # } //! ``` @@ -51,27 +51,29 @@ //! #[derive(NetworkBehaviour)] //! # #[behaviour(prelude = "libp2p_swarm::derive_prelude")] //! struct MyBehaviour { -//! blocked_peers: allow_block_list::Behaviour, +//! blocked_peers: allow_block_list::Behaviour, //! } //! //! # fn main() { //! let behaviour = MyBehaviour { -//! blocked_peers: allow_block_list::Behaviour::default() +//! blocked_peers: allow_block_list::Behaviour::default(), //! }; //! # } //! ``` -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +use std::{ + collections::{HashSet, VecDeque}, + convert::Infallible, + fmt, + task::{Context, Poll, Waker}, +}; + +use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ dummy, CloseConnection, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use std::collections::{HashSet, VecDeque}; -use std::convert::Infallible; -use std::fmt; -use std::task::{Context, Poll, Waker}; /// A [`NetworkBehaviour`] that can act as an allow or block list. #[derive(Default, Debug)] @@ -101,7 +103,8 @@ impl Behaviour { /// Allow connections to the given peer. /// - /// Returns whether the peer was newly inserted. Does nothing if the peer was already present in the set. + /// Returns whether the peer was newly inserted. Does nothing if the peer + /// was already present in the set. pub fn allow_peer(&mut self, peer: PeerId) -> bool { let inserted = self.state.peers.insert(peer); if inserted { @@ -116,7 +119,8 @@ impl Behaviour { /// /// All active connections to this peer will be closed immediately. /// - /// Returns whether the peer was present in the set. Does nothing if the peer was not present in the set. + /// Returns whether the peer was present in the set. Does nothing if the peer + /// was not present in the set. pub fn disallow_peer(&mut self, peer: PeerId) -> bool { let removed = self.state.peers.remove(&peer); if removed { @@ -139,7 +143,8 @@ impl Behaviour { /// /// All active connections to this peer will be closed immediately. /// - /// Returns whether the peer was newly inserted. Does nothing if the peer was already present in the set. + /// Returns whether the peer was newly inserted. Does nothing if the peer was already present in + /// the set. pub fn block_peer(&mut self, peer: PeerId) -> bool { let inserted = self.state.peers.insert(peer); if inserted { @@ -153,7 +158,8 @@ impl Behaviour { /// Unblock connections to a given peer. /// - /// Returns whether the peer was present in the set. Does nothing if the peer was not present in the set. + /// Returns whether the peer was present in the set. Does nothing if the peer + /// was not present in the set. pub fn unblock_peer(&mut self, peer: PeerId) -> bool { let removed = self.state.peers.remove(&peer); if removed { @@ -294,10 +300,11 @@ where #[cfg(test)] mod tests { - use super::*; use libp2p_swarm::{dial_opts::DialOpts, DialError, ListenError, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; + use super::*; + #[async_std::test] async fn cannot_dial_blocked_peer() { let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); diff --git a/misc/connection-limits/src/lib.rs b/misc/connection-limits/src/lib.rs index 016a7f2cfd4..c8df5be5653 100644 --- a/misc/connection-limits/src/lib.rs +++ b/misc/connection-limits/src/lib.rs @@ -18,6 +18,13 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + collections::{HashMap, HashSet}, + convert::Infallible, + fmt, + task::{Context, Poll}, +}; + use libp2p_core::{transport::PortUse, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ @@ -25,22 +32,22 @@ use libp2p_swarm::{ dummy, ConnectionClosed, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use std::collections::{HashMap, HashSet}; -use std::convert::Infallible; -use std::fmt; -use std::task::{Context, Poll}; /// A [`NetworkBehaviour`] that enforces a set of [`ConnectionLimits`]. /// -/// For these limits to take effect, this needs to be composed into the behaviour tree of your application. +/// For these limits to take effect, this needs to be composed +/// into the behaviour tree of your application. /// -/// If a connection is denied due to a limit, either a [`SwarmEvent::IncomingConnectionError`](libp2p_swarm::SwarmEvent::IncomingConnectionError) -/// or [`SwarmEvent::OutgoingConnectionError`](libp2p_swarm::SwarmEvent::OutgoingConnectionError) will be emitted. -/// The [`ListenError::Denied`](libp2p_swarm::ListenError::Denied) and respectively the [`DialError::Denied`](libp2p_swarm::DialError::Denied) variant -/// contain a [`ConnectionDenied`] type that can be downcast to [`Exceeded`] error if (and only if) **this** -/// behaviour denied the connection. +/// If a connection is denied due to a limit, either a +/// [`SwarmEvent::IncomingConnectionError`](libp2p_swarm::SwarmEvent::IncomingConnectionError) +/// or [`SwarmEvent::OutgoingConnectionError`](libp2p_swarm::SwarmEvent::OutgoingConnectionError) +/// will be emitted. The [`ListenError::Denied`](libp2p_swarm::ListenError::Denied) and respectively +/// the [`DialError::Denied`](libp2p_swarm::DialError::Denied) variant +/// contain a [`ConnectionDenied`] type that can be downcast to [`Exceeded`] error if (and only if) +/// **this** behaviour denied the connection. /// -/// If you employ multiple [`NetworkBehaviour`]s that manage connections, it may also be a different error. +/// If you employ multiple [`NetworkBehaviour`]s that manage connections, +/// it may also be a different error. /// /// # Example /// @@ -53,9 +60,9 @@ use std::task::{Context, Poll}; /// #[derive(NetworkBehaviour)] /// # #[behaviour(prelude = "libp2p_swarm::derive_prelude")] /// struct MyBehaviour { -/// identify: identify::Behaviour, -/// ping: ping::Behaviour, -/// limits: connection_limits::Behaviour +/// identify: identify::Behaviour, +/// ping: ping::Behaviour, +/// limits: connection_limits::Behaviour, /// } /// ``` pub struct Behaviour { @@ -367,14 +374,16 @@ impl NetworkBehaviour for Behaviour { #[cfg(test)] mod tests { - use super::*; use libp2p_swarm::{ - behaviour::toggle::Toggle, dial_opts::DialOpts, dial_opts::PeerCondition, DialError, - ListenError, Swarm, SwarmEvent, + behaviour::toggle::Toggle, + dial_opts::{DialOpts, PeerCondition}, + DialError, ListenError, Swarm, SwarmEvent, }; use libp2p_swarm_test::SwarmExt; use quickcheck::*; + use super::*; + #[test] fn max_outgoing() { use rand::Rng; diff --git a/misc/keygen/Cargo.toml b/misc/keygen/Cargo.toml index 003993a512c..c5e96553a5c 100644 --- a/misc/keygen/Cargo.toml +++ b/misc/keygen/Cargo.toml @@ -17,7 +17,6 @@ clap = { version = "4.5.6", features = ["derive"] } zeroize = "1" serde = { version = "1.0.203", features = ["derive"] } serde_json = "1.0.117" -libp2p-core = { workspace = true } base64 = "0.22.1" libp2p-identity = { workspace = true } diff --git a/misc/keygen/src/config.rs b/misc/keygen/src/config.rs index e6c563b3c32..7d46b1849bd 100644 --- a/misc/keygen/src/config.rs +++ b/misc/keygen/src/config.rs @@ -1,10 +1,8 @@ +use std::{error::Error, path::Path}; + use base64::prelude::*; +use libp2p_identity::{Keypair, PeerId}; use serde::{Deserialize, Serialize}; -use std::error::Error; -use std::path::Path; - -use libp2p_identity::Keypair; -use libp2p_identity::PeerId; #[derive(Clone, Serialize, Deserialize)] #[serde(rename_all = "PascalCase")] diff --git a/misc/keygen/src/main.rs b/misc/keygen/src/main.rs index 64d98005369..4c4d3bfbf66 100644 --- a/misc/keygen/src/main.rs +++ b/misc/keygen/src/main.rs @@ -1,9 +1,12 @@ +use std::{ + error::Error, + path::PathBuf, + str::{self, FromStr}, + sync::mpsc, + thread, +}; + use base64::prelude::*; -use std::error::Error; -use std::path::PathBuf; -use std::str::{self, FromStr}; -use std::sync::mpsc; -use std::thread; mod config; diff --git a/misc/memory-connection-limits/Cargo.toml b/misc/memory-connection-limits/Cargo.toml index f18cb09d193..2d04b6cf2ac 100644 --- a/misc/memory-connection-limits/Cargo.toml +++ b/misc/memory-connection-limits/Cargo.toml @@ -14,15 +14,13 @@ memory-stats = { version = "1", features = ["always_use_statm"] } libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true, features = ["peerid"] } -sysinfo = "0.30" +sysinfo = "0.33" tracing = { workspace = true } [dev-dependencies] -async-std = { version = "1.12.0", features = ["attributes"] } libp2p-identify = { workspace = true } libp2p-swarm-derive = { path = "../../swarm-derive" } libp2p-swarm-test = { path = "../../swarm-test" } -rand = "0.8.5" [lints] workspace = true diff --git a/misc/memory-connection-limits/src/lib.rs b/misc/memory-connection-limits/src/lib.rs index e2a89977991..28fa5598481 100644 --- a/misc/memory-connection-limits/src/lib.rs +++ b/misc/memory-connection-limits/src/lib.rs @@ -18,35 +18,40 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + convert::Infallible, + fmt, + task::{Context, Poll}, + time::{Duration, Instant}, +}; + use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ dummy, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use std::convert::Infallible; - -use std::{ - fmt, - task::{Context, Poll}, - time::{Duration, Instant}, -}; use sysinfo::MemoryRefreshKind; /// A [`NetworkBehaviour`] that enforces a set of memory usage based limits. /// -/// For these limits to take effect, this needs to be composed into the behaviour tree of your application. +/// For these limits to take effect, this needs to be composed +/// into the behaviour tree of your application. /// -/// If a connection is denied due to a limit, either a [`SwarmEvent::IncomingConnectionError`](libp2p_swarm::SwarmEvent::IncomingConnectionError) -/// or [`SwarmEvent::OutgoingConnectionError`](libp2p_swarm::SwarmEvent::OutgoingConnectionError) will be emitted. -/// The [`ListenError::Denied`](libp2p_swarm::ListenError::Denied) and respectively the [`DialError::Denied`](libp2p_swarm::DialError::Denied) variant -/// contain a [`ConnectionDenied`] type that can be downcast to [`MemoryUsageLimitExceeded`] error if (and only if) **this** -/// behaviour denied the connection. +/// If a connection is denied due to a limit, either a +/// [`SwarmEvent::IncomingConnectionError`](libp2p_swarm::SwarmEvent::IncomingConnectionError) +/// or [`SwarmEvent::OutgoingConnectionError`](libp2p_swarm::SwarmEvent::OutgoingConnectionError) +/// will be emitted. The [`ListenError::Denied`](libp2p_swarm::ListenError::Denied) and respectively +/// the [`DialError::Denied`](libp2p_swarm::DialError::Denied) variant +/// contain a [`ConnectionDenied`] type that can be downcast to [`MemoryUsageLimitExceeded`] error +/// if (and only if) **this** behaviour denied the connection. /// -/// If you employ multiple [`NetworkBehaviour`]s that manage connections, it may also be a different error. +/// If you employ multiple [`NetworkBehaviour`]s that manage connections, +/// it may also be a different error. /// /// [Behaviour::with_max_bytes] and [Behaviour::with_max_percentage] are mutually exclusive. -/// If you need to employ both of them, compose two instances of [Behaviour] into your custom behaviour. +/// If you need to employ both of them, +/// compose two instances of [Behaviour] into your custom behaviour. /// /// # Example /// @@ -58,8 +63,8 @@ use sysinfo::MemoryRefreshKind; /// #[derive(NetworkBehaviour)] /// # #[behaviour(prelude = "libp2p_swarm::derive_prelude")] /// struct MyBehaviour { -/// identify: identify::Behaviour, -/// limits: memory_connection_limits::Behaviour +/// identify: identify::Behaviour, +/// limits: memory_connection_limits::Behaviour, /// } /// ``` pub struct Behaviour { @@ -68,7 +73,8 @@ pub struct Behaviour { last_refreshed: Instant, } -/// The maximum duration for which the retrieved memory-stats of the process are allowed to be stale. +/// The maximum duration for which the retrieved memory-stats +/// of the process are allowed to be stale. /// /// Once exceeded, we will retrieve new stats. const MAX_STALE_DURATION: Duration = Duration::from_millis(100); @@ -94,7 +100,7 @@ impl Behaviour { use sysinfo::{RefreshKind, System}; let system_memory_bytes = System::new_with_specifics( - RefreshKind::new().with_memory(MemoryRefreshKind::new().with_ram()), + RefreshKind::default().with_memory(MemoryRefreshKind::default().with_ram()), ) .total_memory(); diff --git a/misc/memory-connection-limits/tests/max_bytes.rs b/misc/memory-connection-limits/tests/max_bytes.rs index 7f89e2c7a9a..e82ad67d076 100644 --- a/misc/memory-connection-limits/tests/max_bytes.rs +++ b/misc/memory-connection-limits/tests/max_bytes.rs @@ -20,14 +20,14 @@ mod util; +use std::time::Duration; + use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_memory_connection_limits::*; -use std::time::Duration; -use util::*; - use libp2p_swarm::{dial_opts::DialOpts, DialError, Swarm}; use libp2p_swarm_test::SwarmExt; +use util::*; #[test] fn max_bytes() { @@ -69,7 +69,8 @@ fn max_bytes() { .expect("Unexpected connection limit."); } - std::thread::sleep(Duration::from_millis(100)); // Memory stats are only updated every 100ms internally, ensure they are up-to-date when we try to exceed it. + std::thread::sleep(Duration::from_millis(100)); // Memory stats are only updated every 100ms internally, ensure they are up-to-date when we try + // to exceed it. match network .dial( diff --git a/misc/memory-connection-limits/tests/max_percentage.rs b/misc/memory-connection-limits/tests/max_percentage.rs index bfb1b504af5..bdadad437b8 100644 --- a/misc/memory-connection-limits/tests/max_percentage.rs +++ b/misc/memory-connection-limits/tests/max_percentage.rs @@ -20,24 +20,24 @@ mod util; +use std::time::Duration; + use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_memory_connection_limits::*; -use std::time::Duration; -use sysinfo::{MemoryRefreshKind, RefreshKind}; -use util::*; - use libp2p_swarm::{ dial_opts::{DialOpts, PeerCondition}, DialError, Swarm, }; use libp2p_swarm_test::SwarmExt; +use sysinfo::{MemoryRefreshKind, RefreshKind}; +use util::*; #[test] fn max_percentage() { const CONNECTION_LIMIT: usize = 20; let system_info = sysinfo::System::new_with_specifics( - RefreshKind::new().with_memory(MemoryRefreshKind::new().with_ram()), + RefreshKind::default().with_memory(MemoryRefreshKind::default().with_ram()), ); let mut network = Swarm::new_ephemeral(|_| TestBehaviour { @@ -76,7 +76,9 @@ fn max_percentage() { .expect("Unexpected connection limit."); } - std::thread::sleep(Duration::from_millis(100)); // Memory stats are only updated every 100ms internally, ensure they are up-to-date when we try to exceed it. + // Memory stats are only updated every 100ms internally, + // ensure they are up-to-date when we try to exceed it. + std::thread::sleep(Duration::from_millis(100)); match network .dial( diff --git a/misc/memory-connection-limits/tests/util/mod.rs b/misc/memory-connection-limits/tests/util/mod.rs index 333b0ee135f..205f4d13bc4 100644 --- a/misc/memory-connection-limits/tests/util/mod.rs +++ b/misc/memory-connection-limits/tests/util/mod.rs @@ -18,7 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::task::{Context, Poll}; +use std::{ + convert::Infallible, + task::{Context, Poll}, +}; use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; @@ -26,7 +29,6 @@ use libp2p_swarm::{ dummy, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use std::convert::Infallible; #[derive(libp2p_swarm_derive::NetworkBehaviour)] #[behaviour(prelude = "libp2p_swarm::derive_prelude")] diff --git a/misc/metrics/src/bandwidth.rs b/misc/metrics/src/bandwidth.rs index 8a0f54e5b65..b6308ed1b51 100644 --- a/misc/metrics/src/bandwidth.rs +++ b/misc/metrics/src/bandwidth.rs @@ -1,4 +1,10 @@ -use crate::protocol_stack; +use std::{ + convert::TryFrom as _, + io, + pin::Pin, + task::{Context, Poll}, +}; + use futures::{ future::{MapOk, TryFutureExt}, io::{IoSlice, IoSliceMut}, @@ -16,12 +22,8 @@ use prometheus_client::{ metrics::{counter::Counter, family::Family}, registry::{Registry, Unit}, }; -use std::{ - convert::TryFrom as _, - io, - pin::Pin, - task::{Context, Poll}, -}; + +use crate::protocol_stack; #[derive(Debug, Clone)] #[pin_project::pin_project] diff --git a/misc/metrics/src/dcutr.rs b/misc/metrics/src/dcutr.rs index 3e60dca2cab..6a0f27394e9 100644 --- a/misc/metrics/src/dcutr.rs +++ b/misc/metrics/src/dcutr.rs @@ -18,10 +18,11 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::Family; -use prometheus_client::registry::Registry; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{counter::Counter, family::Family}, + registry::Registry, +}; pub(crate) struct Metrics { events: Family, diff --git a/misc/metrics/src/gossipsub.rs b/misc/metrics/src/gossipsub.rs index 2d90b92fbc6..b3e2e11f0b0 100644 --- a/misc/metrics/src/gossipsub.rs +++ b/misc/metrics/src/gossipsub.rs @@ -18,8 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use prometheus_client::metrics::counter::Counter; -use prometheus_client::registry::Registry; +use prometheus_client::{metrics::counter::Counter, registry::Registry}; pub(crate) struct Metrics { messages: Counter, diff --git a/misc/metrics/src/identify.rs b/misc/metrics/src/identify.rs index 03ac3f9634e..b16c6a56ccf 100644 --- a/misc/metrics/src/identify.rs +++ b/misc/metrics/src/identify.rs @@ -18,17 +18,21 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::protocol_stack; +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, +}; + use libp2p_identity::PeerId; use libp2p_swarm::StreamProtocol; -use prometheus_client::collector::Collector; -use prometheus_client::encoding::{DescriptorEncoder, EncodeMetric}; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::gauge::ConstGauge; -use prometheus_client::metrics::MetricType; -use prometheus_client::registry::Registry; -use std::collections::HashMap; -use std::sync::{Arc, Mutex}; +use prometheus_client::{ + collector::Collector, + encoding::{DescriptorEncoder, EncodeMetric}, + metrics::{counter::Counter, gauge::ConstGauge, MetricType}, + registry::Registry, +}; + +use crate::protocol_stack; const ALLOWED_PROTOCOLS: &[StreamProtocol] = &[ #[cfg(feature = "dcutr")] diff --git a/misc/metrics/src/kad.rs b/misc/metrics/src/kad.rs index bd5a6526737..0a2a8038511 100644 --- a/misc/metrics/src/kad.rs +++ b/misc/metrics/src/kad.rs @@ -18,11 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::Family; -use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; -use prometheus_client::registry::{Registry, Unit}; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{ + counter::Counter, + family::Family, + histogram::{exponential_buckets, Histogram}, + }, + registry::{Registry, Unit}, +}; pub(crate) struct Metrics { query_result_get_record_ok: Counter, diff --git a/misc/metrics/src/lib.rs b/misc/metrics/src/lib.rs index 74fd15e2181..1fd79e7846f 100644 --- a/misc/metrics/src/lib.rs +++ b/misc/metrics/src/lib.rs @@ -67,8 +67,8 @@ impl Metrics { /// Create a new set of Swarm and protocol [`Metrics`]. /// /// ``` - /// use prometheus_client::registry::Registry; /// use libp2p_metrics::Metrics; + /// use prometheus_client::registry::Registry; /// let mut registry = Registry::default(); /// let metrics = Metrics::new(&mut registry); /// ``` diff --git a/misc/metrics/src/ping.rs b/misc/metrics/src/ping.rs index afdd05134a6..ce653c72ea1 100644 --- a/misc/metrics/src/ping.rs +++ b/misc/metrics/src/ping.rs @@ -18,11 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::Family; -use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; -use prometheus_client::registry::{Registry, Unit}; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{ + counter::Counter, + family::Family, + histogram::{exponential_buckets, Histogram}, + }, + registry::{Registry, Unit}, +}; #[derive(Clone, Hash, PartialEq, Eq, EncodeLabelSet, Debug)] struct FailureLabels { diff --git a/misc/metrics/src/relay.rs b/misc/metrics/src/relay.rs index 607daf3f1e1..d4c25b6eb3e 100644 --- a/misc/metrics/src/relay.rs +++ b/misc/metrics/src/relay.rs @@ -18,10 +18,11 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::Family; -use prometheus_client::registry::Registry; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{counter::Counter, family::Family}, + registry::Registry, +}; pub(crate) struct Metrics { events: Family, diff --git a/misc/metrics/src/swarm.rs b/misc/metrics/src/swarm.rs index 51c0a0af253..6e95d082de6 100644 --- a/misc/metrics/src/swarm.rs +++ b/misc/metrics/src/swarm.rs @@ -18,18 +18,25 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::collections::HashMap; -use std::sync::{Arc, Mutex}; +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, +}; -use crate::protocol_stack; use libp2p_swarm::{ConnectionId, DialError, SwarmEvent}; -use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::Family; -use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; -use prometheus_client::registry::{Registry, Unit}; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{ + counter::Counter, + family::Family, + histogram::{exponential_buckets, Histogram}, + }, + registry::{Registry, Unit}, +}; use web_time::Instant; +use crate::protocol_stack; + pub(crate) struct Metrics { connections_incoming: Family, connections_incoming_error: Family, diff --git a/misc/multistream-select/Cargo.toml b/misc/multistream-select/Cargo.toml index 1bbe3642477..66ab434b613 100644 --- a/misc/multistream-select/Cargo.toml +++ b/misc/multistream-select/Cargo.toml @@ -22,9 +22,8 @@ unsigned-varint = { workspace = true } async-std = { version = "1.6.2", features = ["attributes"] } futures_ringbuf = "0.4.0" quickcheck = { workspace = true } -rand = "0.8" rw-stream-sink = { workspace = true } -tracing-subscriber = { workspace = true, features = ["env-filter"] } +libp2p-test-utils = { workspace = true } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/misc/multistream-select/src/dialer_select.rs b/misc/multistream-select/src/dialer_select.rs index 83bb4909041..bd537e7fc7b 100644 --- a/misc/multistream-select/src/dialer_select.rs +++ b/misc/multistream-select/src/dialer_select.rs @@ -20,10 +20,6 @@ //! Protocol negotiation strategies for the peer acting as the dialer. -use crate::protocol::{HeaderLine, Message, MessageIO, Protocol, ProtocolError}; -use crate::{Negotiated, NegotiationError, Version}; - -use futures::prelude::*; use std::{ convert::TryFrom as _, iter, mem, @@ -31,6 +27,13 @@ use std::{ task::{Context, Poll}, }; +use futures::prelude::*; + +use crate::{ + protocol::{HeaderLine, Message, MessageIO, Protocol, ProtocolError}, + Negotiated, NegotiationError, Version, +}; + /// Returns a `Future` that negotiates a protocol on the given I/O stream /// for a peer acting as the _dialer_ (or _initiator_). /// @@ -84,8 +87,9 @@ enum State { impl Future for DialerSelectFuture where - // The Unpin bound here is required because we produce a `Negotiated` as the output. - // It also makes the implementation considerably easier to write. + // The Unpin bound here is required because we produce + // a `Negotiated` as the output. It also makes + // the implementation considerably easier to write. R: AsyncRead + AsyncWrite + Unpin, I: Iterator, I::Item: AsRef, @@ -204,14 +208,18 @@ where #[cfg(test)] mod tests { - use super::*; - use crate::listener_select_proto; - use async_std::future::timeout; - use async_std::net::{TcpListener, TcpStream}; - use quickcheck::{Arbitrary, Gen, GenRange}; use std::time::Duration; + + use async_std::{ + future::timeout, + net::{TcpListener, TcpStream}, + }; + use libp2p_test_utils::EnvFilter; + use quickcheck::{Arbitrary, Gen, GenRange}; use tracing::metadata::LevelFilter; - use tracing_subscriber::EnvFilter; + + use super::*; + use crate::listener_select_proto; #[test] fn select_proto_basic() { @@ -267,13 +275,11 @@ mod tests { ListenerProtos(listen_protos): ListenerProtos, DialPayload(dial_payload): DialPayload, ) { - let _ = tracing_subscriber::fmt() - .with_env_filter( - EnvFilter::builder() - .with_default_directive(LevelFilter::DEBUG.into()) - .from_env_lossy(), - ) - .try_init(); + libp2p_test_utils::with_env_filter( + EnvFilter::builder() + .with_default_directive(LevelFilter::DEBUG.into()) + .from_env_lossy(), + ); async_std::task::block_on(async move { let listener = TcpListener::bind("0.0.0.0:0").await.unwrap(); @@ -353,8 +359,8 @@ mod tests { .unwrap(); assert_eq!(proto, "/proto1"); - // client can close the connection even though protocol negotiation is not yet done, i.e. - // `_server_connection` had been untouched. + // client can close the connection even though protocol negotiation is not yet done, + // i.e. `_server_connection` had been untouched. io.close().await.unwrap(); }); diff --git a/misc/multistream-select/src/length_delimited.rs b/misc/multistream-select/src/length_delimited.rs index 3a7988d0548..8062455de46 100644 --- a/misc/multistream-select/src/length_delimited.rs +++ b/misc/multistream-select/src/length_delimited.rs @@ -18,8 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use bytes::{Buf as _, BufMut as _, Bytes, BytesMut}; -use futures::{io::IoSlice, prelude::*}; use std::{ convert::TryFrom as _, io, @@ -27,6 +25,9 @@ use std::{ task::{Context, Poll}, }; +use bytes::{Buf as _, BufMut as _, Bytes, BytesMut}; +use futures::{io::IoSlice, prelude::*}; + const MAX_LEN_BYTES: u16 = 2; const MAX_FRAME_SIZE: u16 = (1 << (MAX_LEN_BYTES * 8 - MAX_LEN_BYTES)) - 1; const DEFAULT_BUFFER_SIZE: usize = 64; @@ -383,10 +384,12 @@ where #[cfg(test)] mod tests { - use crate::length_delimited::LengthDelimited; + use std::io::ErrorKind; + use futures::{io::Cursor, prelude::*}; use quickcheck::*; - use std::io::ErrorKind; + + use crate::length_delimited::LengthDelimited; #[test] fn basic_read() { diff --git a/misc/multistream-select/src/lib.rs b/misc/multistream-select/src/lib.rs index 5565623f25e..96432de6cb0 100644 --- a/misc/multistream-select/src/lib.rs +++ b/misc/multistream-select/src/lib.rs @@ -70,20 +70,21 @@ //! //! ```no_run //! use async_std::net::TcpStream; -//! use multistream_select::{dialer_select_proto, Version}; //! use futures::prelude::*; +//! use multistream_select::{dialer_select_proto, Version}; //! //! async_std::task::block_on(async move { //! let socket = TcpStream::connect("127.0.0.1:10333").await.unwrap(); //! //! let protos = vec!["/echo/1.0.0", "/echo/2.5.0"]; -//! let (protocol, _io) = dialer_select_proto(socket, protos, Version::V1).await.unwrap(); +//! let (protocol, _io) = dialer_select_proto(socket, protos, Version::V1) +//! .await +//! .unwrap(); //! //! println!("Negotiated protocol: {:?}", protocol); //! // You can now use `_io` to communicate with the remote. //! }); //! ``` -//! #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] @@ -93,10 +94,12 @@ mod listener_select; mod negotiated; mod protocol; -pub use self::dialer_select::{dialer_select_proto, DialerSelectFuture}; -pub use self::listener_select::{listener_select_proto, ListenerSelectFuture}; -pub use self::negotiated::{Negotiated, NegotiatedComplete, NegotiationError}; -pub use self::protocol::ProtocolError; +pub use self::{ + dialer_select::{dialer_select_proto, DialerSelectFuture}, + listener_select::{listener_select_proto, ListenerSelectFuture}, + negotiated::{Negotiated, NegotiatedComplete, NegotiationError}, + protocol::ProtocolError, +}; /// Supported multistream-select versions. #[derive(Clone, Copy, Debug, PartialEq, Eq, Default)] diff --git a/misc/multistream-select/src/listener_select.rs b/misc/multistream-select/src/listener_select.rs index b4236310a1d..cd5af72a9d0 100644 --- a/misc/multistream-select/src/listener_select.rs +++ b/misc/multistream-select/src/listener_select.rs @@ -21,11 +21,6 @@ //! Protocol negotiation strategies for the peer acting as the listener //! in a multistream-select protocol negotiation. -use crate::protocol::{HeaderLine, Message, MessageIO, Protocol, ProtocolError}; -use crate::{Negotiated, NegotiationError}; - -use futures::prelude::*; -use smallvec::SmallVec; use std::{ convert::TryFrom as _, mem, @@ -33,6 +28,14 @@ use std::{ task::{Context, Poll}, }; +use futures::prelude::*; +use smallvec::SmallVec; + +use crate::{ + protocol::{HeaderLine, Message, MessageIO, Protocol, ProtocolError}, + Negotiated, NegotiationError, +}; + /// Returns a `Future` that negotiates a protocol on the given I/O stream /// for a peer acting as the _listener_ (or _responder_). /// @@ -109,8 +112,10 @@ enum State { impl Future for ListenerSelectFuture where - // The Unpin bound here is required because we produce a `Negotiated` as the output. - // It also makes the implementation considerably easier to write. + // The Unpin bound here is required because + // we produce a `Negotiated` as the output. + // It also makes the implementation considerably + // easier to write. R: AsyncRead + AsyncWrite + Unpin, N: AsRef + Clone, { diff --git a/misc/multistream-select/src/negotiated.rs b/misc/multistream-select/src/negotiated.rs index a24014a4f5f..6693b3b5636 100644 --- a/misc/multistream-select/src/negotiated.rs +++ b/misc/multistream-select/src/negotiated.rs @@ -18,7 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::protocol::{HeaderLine, Message, MessageReader, Protocol, ProtocolError}; +use std::{ + error::Error, + fmt, io, mem, + pin::Pin, + task::{Context, Poll}, +}; use futures::{ io::{IoSlice, IoSliceMut}, @@ -26,12 +31,8 @@ use futures::{ ready, }; use pin_project::pin_project; -use std::{ - error::Error, - fmt, io, mem, - pin::Pin, - task::{Context, Poll}, -}; + +use crate::protocol::{HeaderLine, Message, MessageReader, Protocol, ProtocolError}; /// An I/O stream that has settled on an (application-layer) protocol to use. /// @@ -59,8 +60,10 @@ pub struct NegotiatedComplete { impl Future for NegotiatedComplete where - // `Unpin` is required not because of implementation details but because we produce the - // `Negotiated` as the output of the future. + // `Unpin` is required not because of + // implementation details but because we produce + // the `Negotiated` as the output of the + // future. TInner: AsyncRead + AsyncWrite + Unpin, { type Output = Result, NegotiationError>; @@ -250,13 +253,13 @@ where } // TODO: implement once method is stabilized in the futures crate - /*unsafe fn initializer(&self) -> Initializer { - match &self.state { - State::Completed { io, .. } => io.initializer(), - State::Expecting { io, .. } => io.inner_ref().initializer(), - State::Invalid => panic!("Negotiated: Invalid state"), - } - }*/ + // unsafe fn initializer(&self) -> Initializer { + // match &self.state { + // State::Completed { io, .. } => io.initializer(), + // State::Expecting { io, .. } => io.inner_ref().initializer(), + // State::Invalid => panic!("Negotiated: Invalid state"), + // } + // } fn poll_read_vectored( mut self: Pin<&mut Self>, diff --git a/misc/multistream-select/src/protocol.rs b/misc/multistream-select/src/protocol.rs index 92b6acedaeb..93cd4ac02b5 100644 --- a/misc/multistream-select/src/protocol.rs +++ b/misc/multistream-select/src/protocol.rs @@ -25,19 +25,22 @@ //! `Stream` and `Sink` implementations of `MessageIO` and //! `MessageReader`. -use crate::length_delimited::{LengthDelimited, LengthDelimitedReader}; -use crate::Version; - -use bytes::{BufMut, Bytes, BytesMut}; -use futures::{io::IoSlice, prelude::*, ready}; use std::{ error::Error, fmt, io, pin::Pin, task::{Context, Poll}, }; + +use bytes::{BufMut, Bytes, BytesMut}; +use futures::{io::IoSlice, prelude::*, ready}; use unsigned_varint as uvi; +use crate::{ + length_delimited::{LengthDelimited, LengthDelimitedReader}, + Version, +}; + /// The maximum number of supported protocols that can be processed. const MAX_PROTOCOLS: usize = 1000; @@ -461,10 +464,12 @@ impl fmt::Display for ProtocolError { #[cfg(test)] mod tests { - use super::*; - use quickcheck::*; use std::iter; + use quickcheck::*; + + use super::*; + impl Arbitrary for Protocol { fn arbitrary(g: &mut Gen) -> Protocol { let n = g.gen_range(1..g.size()); diff --git a/misc/quick-protobuf-codec/src/lib.rs b/misc/quick-protobuf-codec/src/lib.rs index c57b7da7db8..d49315a54c3 100644 --- a/misc/quick-protobuf-codec/src/lib.rs +++ b/misc/quick-protobuf-codec/src/lib.rs @@ -1,10 +1,10 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +use std::{io, marker::PhantomData}; + use asynchronous_codec::{Decoder, Encoder}; use bytes::{Buf, BufMut, BytesMut}; use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer, WriterBackend}; -use std::io; -use std::marker::PhantomData; mod generated; @@ -182,12 +182,13 @@ impl From for io::Error { #[cfg(test)] mod tests { - use super::*; + use std::error::Error; + use asynchronous_codec::FramedRead; - use futures::io::Cursor; - use futures::{FutureExt, StreamExt}; + use futures::{io::Cursor, FutureExt, StreamExt}; use quickcheck::{Arbitrary, Gen, QuickCheck}; - use std::error::Error; + + use super::*; #[test] fn honors_max_message_length() { diff --git a/misc/quick-protobuf-codec/tests/large_message.rs b/misc/quick-protobuf-codec/tests/large_message.rs index 65dafe065d1..a434d3ce17f 100644 --- a/misc/quick-protobuf-codec/tests/large_message.rs +++ b/misc/quick-protobuf-codec/tests/large_message.rs @@ -1,7 +1,6 @@ use asynchronous_codec::Encoder; use bytes::BytesMut; -use quick_protobuf_codec::proto; -use quick_protobuf_codec::Codec; +use quick_protobuf_codec::{proto, Codec}; #[test] fn encode_large_message() { diff --git a/misc/quickcheck-ext/src/lib.rs b/misc/quickcheck-ext/src/lib.rs index 4ada7e73ba1..9c2deec8743 100644 --- a/misc/quickcheck-ext/src/lib.rs +++ b/misc/quickcheck-ext/src/lib.rs @@ -1,9 +1,9 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -pub use quickcheck::*; - use core::ops::Range; + use num_traits::sign::Unsigned; +pub use quickcheck::*; pub trait GenRange { fn gen_range(&mut self, _range: Range) -> T; diff --git a/misc/rw-stream-sink/src/lib.rs b/misc/rw-stream-sink/src/lib.rs index f10e683ad33..5fdf1987252 100644 --- a/misc/rw-stream-sink/src/lib.rs +++ b/misc/rw-stream-sink/src/lib.rs @@ -27,7 +27,6 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use futures::{prelude::*, ready}; use std::{ io::{self, Read}, mem, @@ -35,6 +34,8 @@ use std::{ task::{Context, Poll}, }; +use futures::{prelude::*, ready}; + static_assertions::const_assert!(mem::size_of::() <= mem::size_of::()); /// Wraps a [`Stream`] and [`Sink`] whose items are buffers. @@ -115,14 +116,16 @@ where #[cfg(test)] mod tests { - use super::RwStreamSink; - use async_std::task; - use futures::{channel::mpsc, prelude::*}; use std::{ pin::Pin, task::{Context, Poll}, }; + use async_std::task; + use futures::{channel::mpsc, prelude::*}; + + use super::RwStreamSink; + // This struct merges a stream and a sink and is quite useful for tests. struct Wrapper(St, Si); diff --git a/misc/server/CHANGELOG.md b/misc/server/CHANGELOG.md index fe48de0f553..53341baa9ab 100644 --- a/misc/server/CHANGELOG.md +++ b/misc/server/CHANGELOG.md @@ -1,25 +1,15 @@ -## 0.12.8 - -### Changed - -- Remove deprecated [`libp2p-lookup`](https://github.com/mxinden/libp2p-lookup) from Dockerfile. - See [PR 5610](https://github.com/libp2p/rust-libp2p/pull/5610). - -## 0.12.7 +## 0.12.6 ### Changed +- Stop using kad default protocol. + See [PR 5122](https://github.com/libp2p/rust-libp2p/pull/5122) - Use periodic and automatic bootstrap of Kademlia. See [PR 4838](https://github.com/libp2p/rust-libp2p/pull/4838). - Update to [`libp2p-identify` `v0.45.0`](protocols/identify/CHANGELOG.md#0450). See [PR 4981](https://github.com/libp2p/rust-libp2p/pull/4981). - -## 0.12.6 - -### Changed - -- Stop using kad default protocol. - See [PR 5122](https://github.com/libp2p/rust-libp2p/pull/5122) +- Remove deprecated [`libp2p-lookup`](https://github.com/mxinden/libp2p-lookup) from Dockerfile. + See [PR 5610](https://github.com/libp2p/rust-libp2p/pull/5610). ## 0.12.5 diff --git a/misc/server/Cargo.toml b/misc/server/Cargo.toml index 0954e2f38d8..b2b3d33ca1e 100644 --- a/misc/server/Cargo.toml +++ b/misc/server/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-server" -version = "0.12.8" +version = "0.12.6" authors = ["Max Inden "] edition = "2021" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,7 +14,6 @@ license = "MIT" base64 = "0.22" clap = { version = "4.5.6", features = ["derive"] } futures = { workspace = true } -futures-timer = "3" axum = "0.7" libp2p = { workspace = true, features = [ "autonat", @@ -34,8 +33,7 @@ libp2p = { workspace = true, features = [ "websocket", ] } prometheus-client = { workspace = true } -serde = "1.0.203" -serde_derive = "1.0.125" +serde = { version = "1", features = ["derive"] } serde_json = "1.0" tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } tracing = { workspace = true } diff --git a/misc/server/Dockerfile b/misc/server/Dockerfile index 12a8982eb3f..8b5aac2ae82 100644 --- a/misc/server/Dockerfile +++ b/misc/server/Dockerfile @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:1.5-labs -FROM rust:1.81.0 as chef +FROM rust:1.83.0 as chef RUN wget -q -O- https://github.com/LukeMathWalker/cargo-chef/releases/download/v0.1.62/cargo-chef-x86_64-unknown-linux-gnu.tar.gz | tar -zx -C /usr/local/bin WORKDIR /app diff --git a/misc/server/src/behaviour.rs b/misc/server/src/behaviour.rs index 36b18c9798d..230d62a2ef3 100644 --- a/misc/server/src/behaviour.rs +++ b/misc/server/src/behaviour.rs @@ -1,13 +1,10 @@ -use libp2p::autonat; -use libp2p::identify; -use libp2p::kad; -use libp2p::ping; -use libp2p::relay; -use libp2p::swarm::behaviour::toggle::Toggle; -use libp2p::swarm::{NetworkBehaviour, StreamProtocol}; -use libp2p::{identity, Multiaddr, PeerId}; -use std::str::FromStr; -use std::time::Duration; +use std::{str::FromStr, time::Duration}; + +use libp2p::{ + autonat, identify, identity, kad, ping, relay, + swarm::{behaviour::toggle::Toggle, NetworkBehaviour, StreamProtocol}, + Multiaddr, PeerId, +}; const BOOTNODES: [&str; 4] = [ "QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", diff --git a/misc/server/src/config.rs b/misc/server/src/config.rs index c3e3ec529c1..8f8c71369b2 100644 --- a/misc/server/src/config.rs +++ b/misc/server/src/config.rs @@ -1,7 +1,7 @@ +use std::{error::Error, path::Path}; + use libp2p::Multiaddr; -use serde_derive::Deserialize; -use std::error::Error; -use std::path::Path; +use serde::Deserialize; #[derive(Clone, Deserialize)] #[serde(rename_all = "PascalCase")] diff --git a/misc/server/src/http_service.rs b/misc/server/src/http_service.rs index cee1aa96e28..87a8adb94e0 100644 --- a/misc/server/src/http_service.rs +++ b/misc/server/src/http_service.rs @@ -18,15 +18,13 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use axum::extract::State; -use axum::http::StatusCode; -use axum::response::IntoResponse; -use axum::routing::get; -use axum::Router; -use prometheus_client::encoding::text::encode; -use prometheus_client::registry::Registry; -use std::net::SocketAddr; -use std::sync::{Arc, Mutex}; +use std::{ + net::SocketAddr, + sync::{Arc, Mutex}, +}; + +use axum::{extract::State, http::StatusCode, response::IntoResponse, routing::get, Router}; +use prometheus_client::{encoding::text::encode, registry::Registry}; use tokio::net::TcpListener; const METRICS_CONTENT_TYPE: &str = "application/openmetrics-text;charset=utf-8;version=1.0.0"; diff --git a/misc/server/src/main.rs b/misc/server/src/main.rs index 820921beaed..a633a80207e 100644 --- a/misc/server/src/main.rs +++ b/misc/server/src/main.rs @@ -1,18 +1,18 @@ +use std::{error::Error, path::PathBuf, str::FromStr}; + use base64::Engine; use clap::Parser; use futures::stream::StreamExt; -use libp2p::identity; -use libp2p::identity::PeerId; -use libp2p::kad; -use libp2p::metrics::{Metrics, Recorder}; -use libp2p::swarm::SwarmEvent; -use libp2p::tcp; -use libp2p::{identify, noise, yamux}; -use prometheus_client::metrics::info::Info; -use prometheus_client::registry::Registry; -use std::error::Error; -use std::path::PathBuf; -use std::str::FromStr; +use libp2p::{ + identify, identity, + identity::PeerId, + kad, + metrics::{Metrics, Recorder}, + noise, + swarm::SwarmEvent, + tcp, yamux, +}; +use prometheus_client::{metrics::info::Info, registry::Registry}; use tracing_subscriber::EnvFilter; use zeroize::Zeroizing; diff --git a/misc/test-utils/CHANGELOG.md b/misc/test-utils/CHANGELOG.md new file mode 100644 index 00000000000..0b8ed3ab931 --- /dev/null +++ b/misc/test-utils/CHANGELOG.md @@ -0,0 +1,4 @@ +## 0.1.0 + +- Introduce 'test-utils` crate. + See [PR 5725](https://github.com/libp2p/rust-libp2p/pull/5725). \ No newline at end of file diff --git a/misc/test-utils/Cargo.toml b/misc/test-utils/Cargo.toml new file mode 100644 index 00000000000..438bcabcf2a --- /dev/null +++ b/misc/test-utils/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "libp2p-test-utils" +version = "0.1.0" +edition = "2021" +authors = ["Krishang Shah "] +license = "MIT" +repository = "https://github.com/libp2p/rust-libp2p" +publish = false + +[package.metadata.release] +release = false + +[dependencies] +tracing-subscriber = { workspace = true, features = ["env-filter"] } + +[lints] +workspace = true diff --git a/misc/test-utils/src/lib.rs b/misc/test-utils/src/lib.rs new file mode 100644 index 00000000000..1155c79b614 --- /dev/null +++ b/misc/test-utils/src/lib.rs @@ -0,0 +1,15 @@ +pub use tracing_subscriber::EnvFilter; + +/// Initializes logging with the default environment filter (`RUST_LOG`). +pub fn with_default_env_filter() { + with_env_filter(EnvFilter::from_default_env()); +} + +/// Initializes logging with a custom environment filter. +/// Logs are written to standard error (`stderr`). +pub fn with_env_filter(filter: impl Into) { + let _ = tracing_subscriber::fmt() + .with_env_filter(filter) + .with_writer(std::io::stderr) + .try_init(); +} diff --git a/misc/webrtc-utils/Cargo.toml b/misc/webrtc-utils/Cargo.toml index 287388a49e7..2c50a2f8ab7 100644 --- a/misc/webrtc-utils/Cargo.toml +++ b/misc/webrtc-utils/Cargo.toml @@ -23,7 +23,6 @@ quick-protobuf-codec = { workspace = true } rand = "0.8" serde = { version = "1.0", features = ["derive"] } sha2 = "0.10.8" -thiserror = { workspace = true } tinytemplate = "1.2" tracing = { workspace = true } diff --git a/misc/webrtc-utils/src/fingerprint.rs b/misc/webrtc-utils/src/fingerprint.rs index a02c4d1116d..c32d33d5bab 100644 --- a/misc/webrtc-utils/src/fingerprint.rs +++ b/misc/webrtc-utils/src/fingerprint.rs @@ -19,9 +19,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::fmt; + use libp2p_core::multihash; use sha2::Digest as _; -use std::fmt; pub const SHA256: &str = "sha-256"; const MULTIHASH_SHA256_CODE: u64 = 0x12; diff --git a/misc/webrtc-utils/src/noise.rs b/misc/webrtc-utils/src/noise.rs index 9180acfc1ca..705db7f4697 100644 --- a/misc/webrtc-utils/src/noise.rs +++ b/misc/webrtc-utils/src/noise.rs @@ -19,16 +19,17 @@ // DEALINGS IN THE SOFTWARE. use futures::{AsyncRead, AsyncWrite, AsyncWriteExt}; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; -use libp2p_core::UpgradeInfo; +use libp2p_core::{ + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, + UpgradeInfo, +}; use libp2p_identity as identity; use libp2p_identity::PeerId; use libp2p_noise as noise; +pub use noise::Error; use crate::fingerprint::Fingerprint; -pub use noise::Error; - pub async fn inbound( id_keys: identity::Keypair, stream: T, @@ -89,9 +90,10 @@ pub(crate) fn noise_prologue( #[cfg(test)] mod tests { - use super::*; use hex_literal::hex; + use super::*; + #[test] fn noise_prologue_tests() { let a = Fingerprint::raw(hex!( diff --git a/misc/webrtc-utils/src/sdp.rs b/misc/webrtc-utils/src/sdp.rs index 0796548f449..96a07f5db95 100644 --- a/misc/webrtc-utils/src/sdp.rs +++ b/misc/webrtc-utils/src/sdp.rs @@ -18,13 +18,13 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::fingerprint::Fingerprint; -use serde::Serialize; use std::net::{IpAddr, SocketAddr}; + +use rand::{distributions::Alphanumeric, thread_rng, Rng}; +use serde::Serialize; use tinytemplate::TinyTemplate; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; +use crate::fingerprint::Fingerprint; pub fn answer(addr: SocketAddr, server_fingerprint: Fingerprint, client_ufrag: &str) -> String { let answer = render_description( @@ -71,7 +71,8 @@ pub fn answer(addr: SocketAddr, server_fingerprint: Fingerprint, client_ufrag: & // the answerer is received, which adds additional latency. setup:active allows the answer and // the DTLS handshake to occur in parallel. Thus, setup:active is RECOMMENDED. // -// a=candidate: +// a=candidate: +// // // A transport address for a candidate that can be used for connectivity checks (RFC8839). // diff --git a/misc/webrtc-utils/src/stream.rs b/misc/webrtc-utils/src/stream.rs index 17f746a92a1..0ec420a103a 100644 --- a/misc/webrtc-utils/src/stream.rs +++ b/misc/webrtc-utils/src/stream.rs @@ -19,20 +19,22 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use bytes::Bytes; -use futures::{channel::oneshot, prelude::*, ready}; - use std::{ io, pin::Pin, task::{Context, Poll}, }; -use crate::proto::{Flag, Message}; +use bytes::Bytes; +use futures::{channel::oneshot, prelude::*, ready}; + use crate::{ - stream::drop_listener::GracefullyClosed, - stream::framed_dc::FramedDc, - stream::state::{Closing, State}, + proto::{Flag, Message}, + stream::{ + drop_listener::GracefullyClosed, + framed_dc::FramedDc, + state::{Closing, State}, + }, }; mod drop_listener; @@ -69,7 +71,8 @@ impl Stream where T: AsyncRead + AsyncWrite + Unpin + Clone, { - /// Returns a new [`Stream`] and a [`DropListener`], which will notify the receiver when/if the stream is dropped. + /// Returns a new [`Stream`] and a [`DropListener`], + /// which will notify the receiver when/if the stream is dropped. pub fn new(data_channel: T) -> (Self, DropListener) { let (sender, receiver) = oneshot::channel(); @@ -175,8 +178,9 @@ where buf: &[u8], ) -> Poll> { while self.state.read_flags_in_async_write() { - // TODO: In case AsyncRead::poll_read encountered an error or returned None earlier, we will poll the - // underlying I/O resource once more. Is that allowed? How about introducing a state IoReadClosed? + // TODO: In case AsyncRead::poll_read encountered an error or returned None earlier, we + // will poll the underlying I/O resource once more. Is that allowed? How + // about introducing a state IoReadClosed? let Self { read_buffer, @@ -265,11 +269,12 @@ where #[cfg(test)] mod tests { - use super::*; - use crate::stream::framed_dc::codec; use asynchronous_codec::Encoder; use bytes::BytesMut; + use super::*; + use crate::stream::framed_dc::codec; + #[test] fn max_data_len() { // Largest possible message. diff --git a/misc/webrtc-utils/src/stream/drop_listener.rs b/misc/webrtc-utils/src/stream/drop_listener.rs index 9745e3d4364..ea3f19d2f57 100644 --- a/misc/webrtc-utils/src/stream/drop_listener.rs +++ b/misc/webrtc-utils/src/stream/drop_listener.rs @@ -18,17 +18,22 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::channel::oneshot; -use futures::channel::oneshot::Canceled; -use futures::{AsyncRead, AsyncWrite, FutureExt, SinkExt}; +use std::{ + future::Future, + io, + pin::Pin, + task::{Context, Poll}, +}; -use std::future::Future; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; +use futures::{ + channel::{oneshot, oneshot::Canceled}, + AsyncRead, AsyncWrite, FutureExt, SinkExt, +}; -use crate::proto::{Flag, Message}; -use crate::stream::framed_dc::FramedDc; +use crate::{ + proto::{Flag, Message}, + stream::framed_dc::FramedDc, +}; #[must_use] pub struct DropListener { diff --git a/misc/webrtc-utils/src/stream/framed_dc.rs b/misc/webrtc-utils/src/stream/framed_dc.rs index 721178fdcd3..a7b9b6214e0 100644 --- a/misc/webrtc-utils/src/stream/framed_dc.rs +++ b/misc/webrtc-utils/src/stream/framed_dc.rs @@ -21,8 +21,10 @@ use asynchronous_codec::Framed; use futures::{AsyncRead, AsyncWrite}; -use crate::proto::Message; -use crate::stream::{MAX_DATA_LEN, MAX_MSG_LEN, VARINT_LEN}; +use crate::{ + proto::Message, + stream::{MAX_DATA_LEN, MAX_MSG_LEN, VARINT_LEN}, +}; pub(crate) type FramedDc = Framed>; pub(crate) fn new(inner: T) -> FramedDc diff --git a/misc/webrtc-utils/src/stream/state.rs b/misc/webrtc-utils/src/stream/state.rs index 082325e4d47..006c1610d00 100644 --- a/misc/webrtc-utils/src/stream/state.rs +++ b/misc/webrtc-utils/src/stream/state.rs @@ -18,10 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use bytes::Bytes; - use std::io; +use bytes::Bytes; + use crate::proto::Flag; #[derive(Debug, Copy, Clone)] @@ -46,8 +46,8 @@ pub(crate) enum State { /// Represents the state of closing one half (either read or write) of the connection. /// -/// Gracefully closing the read or write requires sending the `STOP_SENDING` or `FIN` flag respectively -/// and flushing the underlying connection. +/// Gracefully closing the read or write requires sending the `STOP_SENDING` or `FIN` flag +/// respectively and flushing the underlying connection. #[derive(Debug, Copy, Clone)] pub(crate) enum Closing { Requested, @@ -181,8 +181,8 @@ impl State { /// Whether we should read from the stream in the [`futures::AsyncWrite`] implementation. /// - /// This is necessary for read-closed streams because we would otherwise not read any more flags from - /// the socket. + /// This is necessary for read-closed streams because we would otherwise + /// not read any more flags from the socket. pub(crate) fn read_flags_in_async_write(&self) -> bool { matches!(self, Self::ReadClosed) } @@ -324,9 +324,10 @@ impl State { #[cfg(test)] mod tests { - use super::*; use std::io::ErrorKind; + use super::*; + #[test] fn cannot_read_after_receiving_fin() { let mut open = State::Open; diff --git a/misc/webrtc-utils/src/transport.rs b/misc/webrtc-utils/src/transport.rs index 440ad73ed02..60b1934082f 100644 --- a/misc/webrtc-utils/src/transport.rs +++ b/misc/webrtc-utils/src/transport.rs @@ -1,7 +1,9 @@ -use crate::fingerprint::Fingerprint; -use libp2p_core::{multiaddr::Protocol, Multiaddr}; use std::net::{IpAddr, SocketAddr}; +use libp2p_core::{multiaddr::Protocol, Multiaddr}; + +use crate::fingerprint::Fingerprint; + /// Parse the given [`Multiaddr`] into a [`SocketAddr`] and a [`Fingerprint`] for dialing. pub fn parse_webrtc_dial_addr(addr: &Multiaddr) -> Option<(SocketAddr, Fingerprint)> { let mut iter = addr.iter(); @@ -38,9 +40,10 @@ pub fn parse_webrtc_dial_addr(addr: &Multiaddr) -> Option<(SocketAddr, Fingerpri #[cfg(test)] mod tests { - use super::*; use std::net::{Ipv4Addr, Ipv6Addr}; + use super::*; + #[test] fn parse_valid_address_with_certhash_and_p2p() { let addr = "/ip4/127.0.0.1/udp/39901/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w/p2p/12D3KooWNpDk9w6WrEEcdsEH1y47W71S36yFjw4sd3j7omzgCSMS" diff --git a/muxers/mplex/Cargo.toml b/muxers/mplex/Cargo.toml index 7f887c8b3b8..78650218f4b 100644 --- a/muxers/mplex/Cargo.toml +++ b/muxers/mplex/Cargo.toml @@ -32,7 +32,7 @@ libp2p-muxer-test-harness = { path = "../test-harness" } libp2p-plaintext = { workspace = true } libp2p-tcp = { workspace = true, features = ["async-io"] } quickcheck = { workspace = true } -tracing-subscriber = { workspace = true, features = ["env-filter"] } +libp2p-test-utils = { workspace = true } [[bench]] name = "split_send_size" diff --git a/muxers/mplex/benches/split_send_size.rs b/muxers/mplex/benches/split_send_size.rs index 44eafa884ac..7a0e9780ca7 100644 --- a/muxers/mplex/benches/split_send_size.rs +++ b/muxers/mplex/benches/split_send_size.rs @@ -21,22 +21,23 @@ //! A benchmark for the `split_send_size` configuration option //! using different transports. +use std::{pin::Pin, time::Duration}; + use async_std::task; use criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput}; -use futures::future::poll_fn; -use futures::prelude::*; -use futures::{channel::oneshot, future::join}; -use libp2p_core::muxing::StreamMuxerExt; -use libp2p_core::transport::ListenerId; -use libp2p_core::Endpoint; -use libp2p_core::{multiaddr::multiaddr, muxing, transport, upgrade, Multiaddr, Transport}; +use futures::{ + channel::oneshot, + future::{join, poll_fn}, + prelude::*, +}; +use libp2p_core::{ + multiaddr::multiaddr, muxing, muxing::StreamMuxerExt, transport, transport::ListenerId, + upgrade, Endpoint, Multiaddr, Transport, +}; use libp2p_identity as identity; use libp2p_identity::PeerId; use libp2p_mplex as mplex; use libp2p_plaintext as plaintext; -use std::pin::Pin; -use std::time::Duration; -use tracing_subscriber::EnvFilter; type BenchTransport = transport::Boxed<(PeerId, muxing::StreamMuxerBox)>; @@ -53,9 +54,7 @@ const BENCH_SIZES: [usize; 8] = [ ]; fn prepare(c: &mut Criterion) { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let payload: Vec = vec![1; 1024 * 1024]; @@ -120,7 +119,8 @@ fn run( } transport::TransportEvent::Incoming { upgrade, .. } => { let (_peer, mut conn) = upgrade.await.unwrap(); - // Just calling `poll_inbound` without `poll` is fine here because mplex makes progress through all `poll_` functions. It is hacky though. + // Just calling `poll_inbound` without `poll` is fine here because mplex makes + // progress through all `poll_` functions. It is hacky though. let mut s = poll_fn(|cx| conn.poll_inbound_unpin(cx)) .await .expect("unexpected error"); @@ -158,7 +158,8 @@ fn run( .unwrap() .await .unwrap(); - // Just calling `poll_outbound` without `poll` is fine here because mplex makes progress through all `poll_` functions. It is hacky though. + // Just calling `poll_outbound` without `poll` is fine here because mplex makes progress + // through all `poll_` functions. It is hacky though. let mut stream = poll_fn(|cx| conn.poll_outbound_unpin(cx)).await.unwrap(); let mut off = 0; loop { diff --git a/muxers/mplex/src/codec.rs b/muxers/mplex/src/codec.rs index 014ee899280..20ee6bb4ed6 100644 --- a/muxers/mplex/src/codec.rs +++ b/muxers/mplex/src/codec.rs @@ -18,14 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use asynchronous_codec::{Decoder, Encoder}; -use bytes::{BufMut, Bytes, BytesMut}; -use libp2p_core::Endpoint; use std::{ fmt, hash::{Hash, Hasher}, io, mem, }; + +use asynchronous_codec::{Decoder, Encoder}; +use bytes::{BufMut, Bytes, BytesMut}; +use libp2p_core::Endpoint; use unsigned_varint::{codec, encode}; // Maximum size for a packet: 1MB as per the spec. @@ -298,7 +299,7 @@ impl Encoder for Codec { role: Endpoint::Listener, }, data, - } => (num << 3 | 1, data), + } => ((num << 3) | 1, data), Frame::Data { stream_id: LocalStreamId { @@ -306,35 +307,35 @@ impl Encoder for Codec { role: Endpoint::Dialer, }, data, - } => (num << 3 | 2, data), + } => ((num << 3) | 2, data), Frame::Close { stream_id: LocalStreamId { num, role: Endpoint::Listener, }, - } => (num << 3 | 3, Bytes::new()), + } => ((num << 3) | 3, Bytes::new()), Frame::Close { stream_id: LocalStreamId { num, role: Endpoint::Dialer, }, - } => (num << 3 | 4, Bytes::new()), + } => ((num << 3) | 4, Bytes::new()), Frame::Reset { stream_id: LocalStreamId { num, role: Endpoint::Listener, }, - } => (num << 3 | 5, Bytes::new()), + } => ((num << 3) | 5, Bytes::new()), Frame::Reset { stream_id: LocalStreamId { num, role: Endpoint::Dialer, }, - } => (num << 3 | 6, Bytes::new()), + } => ((num << 3) | 6, Bytes::new()), }; let mut header_buf = encode::u64_buffer(); diff --git a/muxers/mplex/src/config.rs b/muxers/mplex/src/config.rs index 3bf5e703a18..45bb05b2240 100644 --- a/muxers/mplex/src/config.rs +++ b/muxers/mplex/src/config.rs @@ -18,9 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::codec::MAX_FRAME_SIZE; use std::cmp; +use crate::codec::MAX_FRAME_SIZE; + pub(crate) const DEFAULT_MPLEX_PROTOCOL_NAME: &str = "/mplex/6.7.0"; /// Configuration for the multiplexer. diff --git a/muxers/mplex/src/io.rs b/muxers/mplex/src/io.rs index 50fc0fc1d3f..eeea4ce734f 100644 --- a/muxers/mplex/src/io.rs +++ b/muxers/mplex/src/io.rs @@ -18,23 +18,31 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::codec::{Codec, Frame, LocalStreamId, RemoteStreamId}; -use crate::{MaxBufferBehaviour, MplexConfig}; +pub(crate) use std::io::{Error, Result}; +use std::{ + cmp, + collections::VecDeque, + fmt, io, mem, + sync::Arc, + task::{Context, Poll, Waker}, +}; + use asynchronous_codec::Framed; use bytes::Bytes; -use futures::task::{waker_ref, ArcWake, AtomicWaker, WakerRef}; -use futures::{prelude::*, ready, stream::Fuse}; +use futures::{ + prelude::*, + ready, + stream::Fuse, + task::{waker_ref, ArcWake, AtomicWaker, WakerRef}, +}; use nohash_hasher::{IntMap, IntSet}; use parking_lot::Mutex; use smallvec::SmallVec; -use std::collections::VecDeque; -use std::{ - cmp, fmt, io, mem, - sync::Arc, - task::{Context, Poll, Waker}, -}; -pub(crate) use std::io::{Error, Result}; +use crate::{ + codec::{Codec, Frame, LocalStreamId, RemoteStreamId}, + MaxBufferBehaviour, MplexConfig, +}; /// A connection identifier. /// /// Randomly generated and mainly intended to improve log output @@ -302,13 +310,11 @@ where /// reading and writing immediately. The remote is informed /// based on the current state of the substream: /// - /// * If the substream was open, a `Reset` frame is sent at - /// the next opportunity. - /// * If the substream was half-closed, i.e. a `Close` frame - /// has already been sent, nothing further happens. - /// * If the substream was half-closed by the remote, i.e. - /// a `Close` frame has already been received, a `Close` - /// frame is sent at the next opportunity. + /// * If the substream was open, a `Reset` frame is sent at the next opportunity. + /// * If the substream was half-closed, i.e. a `Close` frame has already been sent, nothing + /// further happens. + /// * If the substream was half-closed by the remote, i.e. a `Close` frame has already been + /// received, a `Close` frame is sent at the next opportunity. /// /// If the multiplexed stream is closed or encountered /// an error earlier, or there is no known substream with @@ -1146,15 +1152,14 @@ const EXTRA_PENDING_FRAMES: usize = 1000; #[cfg(test)] mod tests { - use super::*; + use std::{collections::HashSet, num::NonZeroU8, ops::DerefMut, pin::Pin}; + use async_std::task; use asynchronous_codec::{Decoder, Encoder}; use bytes::BytesMut; use quickcheck::*; - use std::collections::HashSet; - use std::num::NonZeroU8; - use std::ops::DerefMut; - use std::pin::Pin; + + use super::*; impl Arbitrary for MaxBufferBehaviour { fn arbitrary(g: &mut Gen) -> MaxBufferBehaviour { @@ -1226,10 +1231,7 @@ mod tests { #[test] fn max_buffer_behaviour() { - use tracing_subscriber::EnvFilter; - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); fn prop(cfg: MplexConfig, overflow: NonZeroU8) { let mut r_buf = BytesMut::new(); @@ -1364,10 +1366,7 @@ mod tests { #[test] fn close_on_error() { - use tracing_subscriber::EnvFilter; - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); fn prop(cfg: MplexConfig, num_streams: NonZeroU8) { let num_streams = cmp::min(cfg.max_substreams, num_streams.get() as usize); diff --git a/muxers/mplex/src/lib.rs b/muxers/mplex/src/lib.rs index 17ca9ad46f6..1ef89dc283a 100644 --- a/muxers/mplex/src/lib.rs +++ b/muxers/mplex/src/lib.rs @@ -26,15 +26,22 @@ mod codec; mod config; mod io; -pub use config::{MaxBufferBehaviour, MplexConfig}; +use std::{ + cmp, iter, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; use bytes::Bytes; use codec::LocalStreamId; +pub use config::{MaxBufferBehaviour, MplexConfig}; use futures::{prelude::*, ready}; -use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}; +use libp2p_core::{ + muxing::{StreamMuxer, StreamMuxerEvent}, + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}, +}; use parking_lot::Mutex; -use std::{cmp, iter, pin::Pin, sync::Arc, task::Context, task::Poll}; impl UpgradeInfo for MplexConfig { type Info = &'static str; diff --git a/muxers/test-harness/src/lib.rs b/muxers/test-harness/src/lib.rs index d03bdbdfed7..489d476f158 100644 --- a/muxers/test-harness/src/lib.rs +++ b/muxers/test-harness/src/lib.rs @@ -1,15 +1,20 @@ +use std::{ + fmt, + future::Future, + mem, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{future, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, Stream, StreamExt}; +use libp2p_core::{ + muxing::StreamMuxerExt, + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, + StreamMuxer, UpgradeInfo, +}; + use crate::future::{BoxFuture, Either, FutureExt}; -use futures::{future, AsyncRead, AsyncWrite}; -use futures::{AsyncReadExt, Stream}; -use futures::{AsyncWriteExt, StreamExt}; -use libp2p_core::muxing::StreamMuxerExt; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; -use libp2p_core::{StreamMuxer, UpgradeInfo}; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::time::Duration; -use std::{fmt, mem}; pub async fn connected_muxers_on_memory_ring_buffer() -> (M, M) where @@ -41,7 +46,8 @@ where .unwrap() } -/// Verifies that Alice can send a message and immediately close the stream afterwards and Bob can use `read_to_end` to read the entire message. +/// Verifies that Alice can send a message and immediately close the stream afterwards and Bob can +/// use `read_to_end` to read the entire message. pub async fn close_implies_flush(alice: A, bob: B) where A: StreamMuxer + Unpin, @@ -99,7 +105,8 @@ where .await; } -/// Runs the given protocol between the two parties, ensuring commutativity, i.e. either party can be the dialer and listener. +/// Runs the given protocol between the two parties, ensuring commutativity, i.e. either party can +/// be the dialer and listener. async fn run_commutative( mut alice: A, mut bob: B, @@ -120,7 +127,8 @@ async fn run_commutative( /// Runs a given protocol between the two parties. /// /// The first party will open a new substream and the second party will wait for this. -/// The [`StreamMuxer`] is polled until both parties have completed the protocol to ensure that the underlying connection can make progress at all times. +/// The [`StreamMuxer`] is polled until both parties have completed the protocol to ensure that the +/// underlying connection can make progress at all times. async fn run( dialer: &mut A, listener: &mut B, diff --git a/muxers/yamux/src/lib.rs b/muxers/yamux/src/lib.rs index bcfeb62fccf..001eb6b0348 100644 --- a/muxers/yamux/src/lib.rs +++ b/muxers/yamux/src/lib.rs @@ -22,17 +22,20 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use either::Either; -use futures::{prelude::*, ready}; -use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}; -use std::collections::VecDeque; -use std::io::{IoSlice, IoSliceMut}; -use std::task::Waker; use std::{ - io, iter, + collections::VecDeque, + io, + io::{IoSlice, IoSliceMut}, + iter, pin::Pin, - task::{Context, Poll}, + task::{Context, Poll, Waker}, +}; + +use either::Either; +use futures::{prelude::*, ready}; +use libp2p_core::{ + muxing::{StreamMuxer, StreamMuxerEvent}, + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}, }; use thiserror::Error; @@ -40,10 +43,12 @@ use thiserror::Error; #[derive(Debug)] pub struct Muxer { connection: Either, yamux013::Connection>, - /// Temporarily buffers inbound streams in case our node is performing backpressure on the remote. + /// Temporarily buffers inbound streams in case our node is + /// performing backpressure on the remote. /// - /// The only way how yamux can make progress is by calling [`yamux013::Connection::poll_next_inbound`]. However, the - /// [`StreamMuxer`] interface is designed to allow a caller to selectively make progress via + /// The only way how yamux can make progress is by calling + /// [`yamux013::Connection::poll_next_inbound`]. However, the [`StreamMuxer`] interface is + /// designed to allow a caller to selectively make progress via /// [`StreamMuxer::poll_inbound`] and [`StreamMuxer::poll_outbound`] whilst the more general /// [`StreamMuxer::poll`] is designed to make progress on existing streams etc. /// @@ -57,7 +62,8 @@ pub struct Muxer { /// How many streams to buffer before we start resetting them. /// /// This is equal to the ACK BACKLOG in `rust-yamux`. -/// Thus, for peers running on a recent version of `rust-libp2p`, we should never need to reset streams because they'll voluntarily stop opening them once they hit the ACK backlog. +/// Thus, for peers running on a recent version of `rust-libp2p`, we should never need to reset +/// streams because they'll voluntarily stop opening them once they hit the ACK backlog. const MAX_BUFFERED_INBOUND_STREAMS: usize = 256; impl Muxer diff --git a/protocols/autonat/CHANGELOG.md b/protocols/autonat/CHANGELOG.md index 9b2bc4cb2ea..75a40b8c5ad 100644 --- a/protocols/autonat/CHANGELOG.md +++ b/protocols/autonat/CHANGELOG.md @@ -1,9 +1,9 @@ ## 0.13.1 - Verify that an incoming AutoNAT dial comes from a connected peer. See [PR 5597](https://github.com/libp2p/rust-libp2p/pull/5597). - - Deprecate `void` crate. See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). +- Update to `libp2p-request-response` `v0.28.0`. ## 0.13.0 diff --git a/protocols/autonat/Cargo.toml b/protocols/autonat/Cargo.toml index 92ca163d8ec..8ad4492fbff 100644 --- a/protocols/autonat/Cargo.toml +++ b/protocols/autonat/Cargo.toml @@ -18,7 +18,6 @@ categories = ["network-programming", "asynchronous"] [dependencies] async-trait = { version = "0.1", optional = true } asynchronous-codec = { workspace = true } -bytes = { version = "1", optional = true } either = { version = "1.9.0", optional = true } futures = { workspace = true } futures-bounded = { workspace = true, optional = true } @@ -38,14 +37,14 @@ thiserror = { workspace = true, optional = true } [dev-dependencies] tokio = { workspace = true, features = ["macros", "rt", "sync"] } libp2p-swarm-test = { path = "../../swarm-test" } -tracing-subscriber = { version = "0.3", features = ["env-filter"] } +libp2p-test-utils = { workspace = true } libp2p-identify = { workspace = true } libp2p-swarm = { workspace = true, features = ["macros"] } [features] default = ["v1", "v2"] v1 = ["dep:libp2p-request-response", "dep:web-time", "dep:async-trait"] -v2 = ["dep:bytes", "dep:either", "dep:futures-bounded", "dep:thiserror", "dep:rand_core"] +v2 = ["dep:either", "dep:futures-bounded", "dep:thiserror", "dep:rand_core"] # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/autonat/src/v1.rs b/protocols/autonat/src/v1.rs index c60e4805f40..4de601c5df5 100644 --- a/protocols/autonat/src/v1.rs +++ b/protocols/autonat/src/v1.rs @@ -29,6 +29,8 @@ pub(crate) mod behaviour; pub(crate) mod protocol; +pub use libp2p_request_response::{InboundFailure, OutboundFailure}; + pub use self::{ behaviour::{ Behaviour, Config, Event, InboundProbeError, InboundProbeEvent, NatStatus, @@ -36,7 +38,6 @@ pub use self::{ }, protocol::{ResponseError, DEFAULT_PROTOCOL_NAME}, }; -pub use libp2p_request_response::{InboundFailure, OutboundFailure}; pub(crate) mod proto { #![allow(unreachable_pub)] diff --git a/protocols/autonat/src/v1/behaviour.rs b/protocols/autonat/src/v1/behaviour.rs index 7a717baed8d..24ec1b13be7 100644 --- a/protocols/autonat/src/v1/behaviour.rs +++ b/protocols/autonat/src/v1/behaviour.rs @@ -21,15 +21,19 @@ mod as_client; mod as_server; -use crate::protocol::{AutoNatCodec, DialRequest, DialResponse, ResponseError}; -use crate::DEFAULT_PROTOCOL_NAME; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + iter, + task::{Context, Poll}, + time::Duration, +}; + use as_client::AsClient; pub use as_client::{OutboundProbeError, OutboundProbeEvent}; use as_server::AsServer; pub use as_server::{InboundProbeError, InboundProbeEvent}; use futures_timer::Delay; -use libp2p_core::transport::PortUse; -use libp2p_core::{multiaddr::Protocol, ConnectedPoint, Endpoint, Multiaddr}; +use libp2p_core::{multiaddr::Protocol, transport::PortUse, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_request_response::{ self as request_response, InboundRequestId, OutboundRequestId, ProtocolSupport, ResponseChannel, @@ -39,14 +43,13 @@ use libp2p_swarm::{ ConnectionDenied, ConnectionId, ListenAddresses, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use std::{ - collections::{HashMap, HashSet, VecDeque}, - iter, - task::{Context, Poll}, - time::Duration, -}; use web_time::Instant; +use crate::{ + protocol::{AutoNatCodec, DialRequest, DialResponse, ResponseError}, + DEFAULT_PROTOCOL_NAME, +}; + /// Config for the [`Behaviour`]. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Config { @@ -148,17 +151,18 @@ pub enum Event { /// [`NetworkBehaviour`] for AutoNAT. /// -/// The behaviour frequently runs probes to determine whether the local peer is behind NAT and/ or a firewall, or -/// publicly reachable. -/// In a probe, a dial-back request is sent to a peer that is randomly selected from the list of fixed servers and -/// connected peers. Upon receiving a dial-back request, the remote tries to dial the included addresses. When a -/// first address was successfully dialed, a status Ok will be send back together with the dialed address. If no address -/// can be reached a dial-error is send back. +/// The behaviour frequently runs probes to determine whether the local peer is behind NAT and/ or a +/// firewall, or publicly reachable. +/// In a probe, a dial-back request is sent to a peer that is randomly selected from the list of +/// fixed servers and connected peers. Upon receiving a dial-back request, the remote tries to dial +/// the included addresses. When a first address was successfully dialed, a status Ok will be send +/// back together with the dialed address. If no address can be reached a dial-error is send back. /// Based on the received response, the sender assumes themselves to be public or private. -/// The status is retried in a frequency of [`Config::retry_interval`] or [`Config::retry_interval`], depending on whether -/// enough confidence in the assumed NAT status was reached or not. -/// The confidence increases each time a probe confirms the assumed status, and decreases if a different status is reported. -/// If the confidence is 0, the status is flipped and the Behaviour will report the new status in an `OutEvent`. +/// The status is retried in a frequency of [`Config::retry_interval`] or +/// [`Config::retry_interval`], depending on whether enough confidence in the assumed NAT status was +/// reached or not. The confidence increases each time a probe confirms the assumed status, and +/// decreases if a different status is reported. If the confidence is 0, the status is flipped and +/// the Behaviour will report the new status in an `OutEvent`. pub struct Behaviour { // Local peer id local_peer_id: PeerId, @@ -195,11 +199,12 @@ pub struct Behaviour { ongoing_outbound: HashMap, // Connected peers with the observed address of each connection. - // If the endpoint of a connection is relayed or not global (in case of Config::only_global_ips), - // the observed address is `None`. + // If the endpoint of a connection is relayed or not global (in case of + // Config::only_global_ips), the observed address is `None`. connected: HashMap>>, - // Used servers in recent outbound probes that are throttled through Config::throttle_server_period. + // Used servers in recent outbound probes that are throttled through + // Config::throttle_server_period. throttled_servers: Vec<(PeerId, Instant)>, // Recent probes done for clients @@ -264,8 +269,8 @@ impl Behaviour { } /// Add a peer to the list over servers that may be used for probes. - /// These peers are used for dial-request even if they are currently not connection, in which case a connection will be - /// establish before sending the dial-request. + /// These peers are used for dial-request even if they are currently not connection, in which + /// case a connection will be establish before sending the dial-request. pub fn add_server(&mut self, peer: PeerId, address: Option) { self.servers.insert(peer); if let Some(addr) = address { @@ -564,7 +569,8 @@ impl NetworkBehaviour for Behaviour { type Action = ToSwarm<::ToSwarm, THandlerInEvent>; -// Trait implemented for `AsClient` and `AsServer` to handle events from the inner [`request_response::Behaviour`] Protocol. +// Trait implemented for `AsClient` and `AsServer` to handle events from the inner +// [`request_response::Behaviour`] Protocol. trait HandleInnerEvent { fn handle_event( &mut self, @@ -671,7 +677,8 @@ impl GlobalIp for std::net::Ipv6Addr { // Variation of unstable method [`std::net::Ipv6Addr::multicast_scope`] that instead of the // `Ipv6MulticastScope` just returns if the scope is global or not. - // Equivalent to `Ipv6Addr::multicast_scope(..).map(|scope| matches!(scope, Ipv6MulticastScope::Global))`. + // Equivalent to `Ipv6Addr::multicast_scope(..).map(|scope| matches!(scope, + // Ipv6MulticastScope::Global))`. fn is_multicast_scope_global(addr: &std::net::Ipv6Addr) -> Option { match addr.segments()[0] & 0x000f { 14 => Some(true), // Global multicast scope. diff --git a/protocols/autonat/src/v1/behaviour/as_client.rs b/protocols/autonat/src/v1/behaviour/as_client.rs index 385dee50ee1..ca8daf6e1ac 100644 --- a/protocols/autonat/src/v1/behaviour/as_client.rs +++ b/protocols/autonat/src/v1/behaviour/as_client.rs @@ -18,12 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::ResponseError; - -use super::{ - Action, AutoNatCodec, Config, DialRequest, DialResponse, Event, HandleInnerEvent, NatStatus, - ProbeId, +use std::{ + collections::{HashMap, HashSet, VecDeque}, + task::{Context, Poll}, + time::Duration, }; + use futures::FutureExt; use futures_timer::Delay; use libp2p_core::Multiaddr; @@ -31,13 +31,14 @@ use libp2p_identity::PeerId; use libp2p_request_response::{self as request_response, OutboundFailure, OutboundRequestId}; use libp2p_swarm::{ConnectionId, ListenAddresses, ToSwarm}; use rand::{seq::SliceRandom, thread_rng}; -use std::{ - collections::{HashMap, HashSet, VecDeque}, - task::{Context, Poll}, - time::Duration, -}; use web_time::Instant; +use super::{ + Action, AutoNatCodec, Config, DialRequest, DialResponse, Event, HandleInnerEvent, NatStatus, + ProbeId, +}; +use crate::ResponseError; + /// Outbound probe failed or was aborted. #[derive(Debug)] pub enum OutboundProbeError { @@ -111,6 +112,7 @@ impl HandleInnerEvent for AsClient<'_> { request_id, response, }, + .. } => { tracing::debug!(?response, "Outbound dial-back request returned response"); @@ -153,6 +155,7 @@ impl HandleInnerEvent for AsClient<'_> { peer, error, request_id, + .. } => { tracing::debug!( %peer, diff --git a/protocols/autonat/src/v1/behaviour/as_server.rs b/protocols/autonat/src/v1/behaviour/as_server.rs index 01148add6e8..32b4120c552 100644 --- a/protocols/autonat/src/v1/behaviour/as_server.rs +++ b/protocols/autonat/src/v1/behaviour/as_server.rs @@ -17,10 +17,11 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::{ - Action, AutoNatCodec, Config, DialRequest, DialResponse, Event, HandleInnerEvent, ProbeId, - ResponseError, +use std::{ + collections::{HashMap, HashSet, VecDeque}, + num::NonZeroU8, }; + use libp2p_core::{multiaddr::Protocol, Multiaddr}; use libp2p_identity::PeerId; use libp2p_request_response::{ @@ -30,12 +31,13 @@ use libp2p_swarm::{ dial_opts::{DialOpts, PeerCondition}, ConnectionId, DialError, ToSwarm, }; -use std::{ - collections::{HashMap, HashSet, VecDeque}, - num::NonZeroU8, -}; use web_time::Instant; +use super::{ + Action, AutoNatCodec, Config, DialRequest, DialResponse, Event, HandleInnerEvent, ProbeId, + ResponseError, +}; + /// Inbound probe failed. #[derive(Debug)] pub enum InboundProbeError { @@ -105,6 +107,7 @@ impl HandleInnerEvent for AsServer<'_> { request, channel, }, + .. } => { let probe_id = self.probe_id.next(); if !self.connected.contains_key(&peer) { @@ -181,6 +184,7 @@ impl HandleInnerEvent for AsServer<'_> { peer, error, request_id, + .. } => { tracing::debug!( %peer, @@ -379,10 +383,10 @@ impl AsServer<'_> { #[cfg(test)] mod test { - use super::*; - use std::net::Ipv4Addr; + use super::*; + fn random_ip<'a>() -> Protocol<'a> { Protocol::Ip4(Ipv4Addr::new( rand::random(), diff --git a/protocols/autonat/src/v1/protocol.rs b/protocols/autonat/src/v1/protocol.rs index 2ce538fddf4..6aa0c99167b 100644 --- a/protocols/autonat/src/v1/protocol.rs +++ b/protocols/autonat/src/v1/protocol.rs @@ -18,16 +18,20 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; +use std::io; + use async_trait::async_trait; use asynchronous_codec::{FramedRead, FramedWrite}; -use futures::io::{AsyncRead, AsyncWrite}; -use futures::{SinkExt, StreamExt}; +use futures::{ + io::{AsyncRead, AsyncWrite}, + SinkExt, StreamExt, +}; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_request_response::{self as request_response}; use libp2p_swarm::StreamProtocol; -use std::io; + +use crate::proto; /// The protocol name used for negotiating with multistream-select. pub const DEFAULT_PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/libp2p/autonat/1.0.0"); diff --git a/protocols/autonat/src/v2.rs b/protocols/autonat/src/v2.rs index 48e9f25f890..94decf50a55 100644 --- a/protocols/autonat/src/v2.rs +++ b/protocols/autonat/src/v2.rs @@ -4,17 +4,17 @@ //! //! The new version fixes the issues of the first version: //! - The server now always dials back over a newly allocated port. This greatly reduces the risk of -//! false positives that often occurred in the first version, when the clinet-server connection -//! occurred over a hole-punched port. +//! false positives that often occurred in the first version, when the clinet-server connection +//! occurred over a hole-punched port. //! - The server protects against DoS attacks by requiring the client to send more data to the -//! server then the dial back puts on the client, thus making the protocol unatractive for an -//! attacker. +//! server then the dial back puts on the client, thus making the protocol unatractive for an +//! attacker. //! //! The protocol is separated into two parts: //! - The client part, which is implemented in the `client` module. (The client is the party that -//! wants to check if it is reachable from the outside.) +//! wants to check if it is reachable from the outside.) //! - The server part, which is implemented in the `server` module. (The server is the party -//! performing reachability checks on behalf of the client.) +//! performing reachability checks on behalf of the client.) //! //! The two can be used together. diff --git a/protocols/autonat/src/v2/client.rs b/protocols/autonat/src/v2/client.rs index d3272512f35..11ddb792839 100644 --- a/protocols/autonat/src/v2/client.rs +++ b/protocols/autonat/src/v2/client.rs @@ -1,5 +1,4 @@ mod behaviour; mod handler; -pub use behaviour::Event; -pub use behaviour::{Behaviour, Config}; +pub use behaviour::{Behaviour, Config, Event}; diff --git a/protocols/autonat/src/v2/client/behaviour.rs b/protocols/autonat/src/v2/client/behaviour.rs index 97509c05443..8e238fc9be4 100644 --- a/protocols/autonat/src/v2/client/behaviour.rs +++ b/protocols/autonat/src/v2/client/behaviour.rs @@ -1,5 +1,6 @@ use std::{ collections::{HashMap, VecDeque}, + fmt::{Debug, Display, Formatter}, task::{Context, Poll}, time::Duration, }; @@ -15,14 +16,12 @@ use libp2p_swarm::{ }; use rand::prelude::*; use rand_core::OsRng; -use std::fmt::{Debug, Display, Formatter}; - -use crate::v2::{protocol::DialRequest, Nonce}; use super::handler::{ dial_back::{self, IncomingNonce}, dial_request, }; +use crate::v2::{protocol::DialRequest, Nonce}; #[derive(Debug, Clone, Copy)] pub struct Config { @@ -281,10 +280,12 @@ where } } - /// Issues dial requests to random AutoNAT servers for the most frequently reported, untested candidates. + /// Issues dial requests to random AutoNAT servers for the most frequently reported, untested + /// candidates. /// /// In the current implementation, we only send a single address to each AutoNAT server. - /// This spreads our candidates out across all servers we are connected to which should give us pretty fast feedback on all of them. + /// This spreads our candidates out across all servers we are connected to which should give us + /// pretty fast feedback on all of them. fn issue_dial_requests_for_untested_candidates(&mut self) { for addr in self.untested_candidates() { let Some((conn_id, peer_id)) = self.random_autonat_server() else { @@ -311,7 +312,8 @@ where /// Returns all untested candidates, sorted by the frequency they were reported at. /// - /// More frequently reported candidates are considered to more likely be external addresses and thus tested first. + /// More frequently reported candidates are considered to more likely be external addresses and + /// thus tested first. fn untested_candidates(&self) -> impl Iterator { let mut entries = self .address_candidates @@ -333,7 +335,8 @@ where .map(|(addr, _)| addr) } - /// Chooses an active connection to one of our peers that reported support for the [`DIAL_REQUEST_PROTOCOL`](crate::v2::DIAL_REQUEST_PROTOCOL) protocol. + /// Chooses an active connection to one of our peers that reported support for the + /// [`DIAL_REQUEST_PROTOCOL`](crate::v2::DIAL_REQUEST_PROTOCOL) protocol. fn random_autonat_server(&mut self) -> Option<(ConnectionId, PeerId)> { let (conn_id, info) = self .peer_info diff --git a/protocols/autonat/src/v2/client/handler/dial_back.rs b/protocols/autonat/src/v2/client/handler/dial_back.rs index b3b3a59c02d..7cdf194343a 100644 --- a/protocols/autonat/src/v2/client/handler/dial_back.rs +++ b/protocols/autonat/src/v2/client/handler/dial_back.rs @@ -1,4 +1,5 @@ use std::{ + convert::Infallible, io, task::{Context, Poll}, time::Duration, @@ -11,7 +12,6 @@ use libp2p_swarm::{ handler::{ConnectionEvent, FullyNegotiatedInbound, ListenUpgradeError}, ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol, }; -use std::convert::Infallible; use crate::v2::{protocol, Nonce, DIAL_BACK_PROTOCOL}; @@ -35,16 +35,14 @@ impl ConnectionHandler for Handler { type InboundOpenInfo = (); type OutboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { SubstreamProtocol::new(ReadyUpgrade::new(DIAL_BACK_PROTOCOL), ()) } fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent, - > { + ) -> Poll> { loop { match self.inbound.poll_next_unpin(cx) { Poll::Pending => return Poll::Pending, @@ -68,12 +66,7 @@ impl ConnectionHandler for Handler { fn on_connection_event( &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + event: ConnectionEvent, ) { match event { ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { @@ -83,7 +76,7 @@ impl ConnectionHandler for Handler { tracing::warn!("Dial back request dropped, too many requests in flight"); } } - // TODO: remove when Rust 1.82 is MSRVprotocols/autonat/src/v2/client/handler/dial_back.rs + // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] ConnectionEvent::ListenUpgradeError(ListenUpgradeError { error, .. }) => { libp2p_core::util::unreachable(error); diff --git a/protocols/autonat/src/v2/client/handler/dial_request.rs b/protocols/autonat/src/v2/client/handler/dial_request.rs index 0f303167523..61f564505eb 100644 --- a/protocols/autonat/src/v2/client/handler/dial_request.rs +++ b/protocols/autonat/src/v2/client/handler/dial_request.rs @@ -1,10 +1,18 @@ +use std::{ + collections::VecDeque, + convert::Infallible, + io, + iter::{once, repeat}, + task::{Context, Poll}, + time::Duration, +}; + use futures::{channel::oneshot, AsyncWrite}; use futures_bounded::FuturesMap; use libp2p_core::{ upgrade::{DeniedUpgrade, ReadyUpgrade}, Multiaddr, }; - use libp2p_swarm::{ handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedOutbound, OutboundUpgradeSend, @@ -13,14 +21,6 @@ use libp2p_swarm::{ ConnectionHandler, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError, SubstreamProtocol, }; -use std::{ - collections::VecDeque, - convert::Infallible, - io, - iter::{once, repeat}, - task::{Context, Poll}, - time::Duration, -}; use crate::v2::{ generated::structs::{mod_DialResponse::ResponseStatus, DialStatus}, @@ -72,7 +72,7 @@ pub struct Handler { queued_events: VecDeque< ConnectionHandlerEvent< ::OutboundProtocol, - ::OutboundOpenInfo, + (), ::ToBehaviour, >, >, @@ -121,16 +121,14 @@ impl ConnectionHandler for Handler { type InboundOpenInfo = (); type OutboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { SubstreamProtocol::new(DeniedUpgrade, ()) } fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent, - > { + ) -> Poll> { if let Some(event) = self.queued_events.pop_front() { return Poll::Ready(event); } @@ -161,12 +159,7 @@ impl ConnectionHandler for Handler { fn on_connection_event( &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + event: ConnectionEvent, ) { match event { ConnectionEvent::DialUpgradeError(DialUpgradeError { error, .. }) => { @@ -261,7 +254,9 @@ async fn start_stream_handle( Ok(_) => {} Err(err) => { if err.kind() == io::ErrorKind::ConnectionReset { - // The AutoNAT server may have already closed the stream (this is normal because the probe is finished), in this case we have this error: + // The AutoNAT server may have already closed the stream + // (this is normal because the probe is finished), + // in this case we have this error: // Err(Custom { kind: ConnectionReset, error: Stopped(0) }) // so we silently ignore this error } else { diff --git a/protocols/autonat/src/v2/protocol.rs b/protocols/autonat/src/v2/protocol.rs index 4077fd65f5d..70f9f8c37af 100644 --- a/protocols/autonat/src/v2/protocol.rs +++ b/protocols/autonat/src/v2/protocol.rs @@ -1,13 +1,10 @@ // change to quick-protobuf-codec -use std::io; -use std::io::ErrorKind; +use std::{io, io::ErrorKind}; use asynchronous_codec::{Framed, FramedRead, FramedWrite}; - use futures::{AsyncRead, AsyncWrite, SinkExt, StreamExt}; use libp2p_core::Multiaddr; - use quick_protobuf_codec::Codec; use rand::Rng; @@ -103,7 +100,10 @@ impl From for proto::Message { ); proto::Message { msg: proto::mod_Message::OneOfmsg::dialDataResponse(proto::DialDataResponse { - data: vec![0; val.data_count], // One could use Cow::Borrowed here, but it will require a modification of the generated code and that will fail the CI + // One could use Cow::Borrowed here, but it will + // require a modification of the generated code + // and that will fail the CI + data: vec![0; val.data_count], }), } } diff --git a/protocols/autonat/src/v2/server.rs b/protocols/autonat/src/v2/server.rs index 25819307784..cd9b1e46b18 100644 --- a/protocols/autonat/src/v2/server.rs +++ b/protocols/autonat/src/v2/server.rs @@ -1,5 +1,4 @@ mod behaviour; mod handler; -pub use behaviour::Behaviour; -pub use behaviour::Event; +pub use behaviour::{Behaviour, Event}; diff --git a/protocols/autonat/src/v2/server/behaviour.rs b/protocols/autonat/src/v2/server/behaviour.rs index 027cfff7c13..125955cb53a 100644 --- a/protocols/autonat/src/v2/server/behaviour.rs +++ b/protocols/autonat/src/v2/server/behaviour.rs @@ -4,20 +4,19 @@ use std::{ task::{Context, Poll}, }; -use crate::v2::server::handler::dial_request::DialBackStatus; use either::Either; use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::dial_opts::PeerCondition; use libp2p_swarm::{ - dial_opts::DialOpts, dummy, ConnectionDenied, ConnectionHandler, ConnectionId, DialFailure, - FromSwarm, NetworkBehaviour, ToSwarm, + dial_opts::{DialOpts, PeerCondition}, + dummy, ConnectionDenied, ConnectionHandler, ConnectionId, DialFailure, FromSwarm, + NetworkBehaviour, ToSwarm, }; use rand_core::{OsRng, RngCore}; use crate::v2::server::handler::{ dial_back, - dial_request::{self, DialBackCommand}, + dial_request::{self, DialBackCommand, DialBackStatus}, Handler, }; diff --git a/protocols/autonat/src/v2/server/handler/dial_back.rs b/protocols/autonat/src/v2/server/handler/dial_back.rs index 3cacd4ff32b..8adb33509ef 100644 --- a/protocols/autonat/src/v2/server/handler/dial_back.rs +++ b/protocols/autonat/src/v2/server/handler/dial_back.rs @@ -14,13 +14,12 @@ use libp2p_swarm::{ SubstreamProtocol, }; +use super::dial_request::{DialBackCommand, DialBackStatus as DialBackRes}; use crate::v2::{ protocol::{dial_back, recv_dial_back_response}, DIAL_BACK_PROTOCOL, }; -use super::dial_request::{DialBackCommand, DialBackStatus as DialBackRes}; - pub(crate) type ToBehaviour = io::Result<()>; pub struct Handler { @@ -47,16 +46,14 @@ impl ConnectionHandler for Handler { type InboundOpenInfo = (); type OutboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { SubstreamProtocol::new(DeniedUpgrade, ()) } fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent, - > { + ) -> Poll> { if let Poll::Ready(result) = self.outbound.poll_unpin(cx) { return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( result @@ -77,12 +74,7 @@ impl ConnectionHandler for Handler { fn on_connection_event( &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + event: ConnectionEvent, ) { match event { ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { diff --git a/protocols/autonat/src/v2/server/handler/dial_request.rs b/protocols/autonat/src/v2/server/handler/dial_request.rs index 5058e0f3f42..22cab2b9cab 100644 --- a/protocols/autonat/src/v2/server/handler/dial_request.rs +++ b/protocols/autonat/src/v2/server/handler/dial_request.rs @@ -81,16 +81,14 @@ where type InboundOpenInfo = (); type OutboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { SubstreamProtocol::new(ReadyUpgrade::new(DIAL_REQUEST_PROTOCOL), ()) } fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent, - > { + ) -> Poll> { loop { match self.inbound.poll_unpin(cx) { Poll::Ready(Ok(event)) => { @@ -117,12 +115,7 @@ where fn on_connection_event( &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + event: ConnectionEvent, ) { match event { ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { diff --git a/protocols/autonat/tests/autonatv2.rs b/protocols/autonat/tests/autonatv2.rs index f22a2e51470..1e278f5554f 100644 --- a/protocols/autonat/tests/autonatv2.rs +++ b/protocols/autonat/tests/autonatv2.rs @@ -1,23 +1,20 @@ -use libp2p_autonat::v2::client::{self, Config}; -use libp2p_autonat::v2::server; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::transport::TransportError; -use libp2p_core::Multiaddr; +use std::{sync::Arc, time::Duration}; + +use libp2p_autonat::v2::{ + client::{self, Config}, + server, +}; +use libp2p_core::{multiaddr::Protocol, transport::TransportError, Multiaddr}; use libp2p_swarm::{ DialError, FromSwarm, NetworkBehaviour, NewExternalAddrCandidate, Swarm, SwarmEvent, }; use libp2p_swarm_test::SwarmExt; use rand_core::OsRng; -use std::sync::Arc; -use std::time::Duration; use tokio::sync::oneshot; -use tracing_subscriber::EnvFilter; #[tokio::test] async fn confirm_successful() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let (mut alice, mut bob) = start_and_connect().await; let cor_server_peer = *alice.local_peer_id(); @@ -128,9 +125,7 @@ async fn confirm_successful() { #[tokio::test] async fn dial_back_to_unsupported_protocol() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let (mut alice, mut bob) = bootstrap().await; let alice_peer_id = *alice.local_peer_id(); @@ -226,9 +221,7 @@ async fn dial_back_to_unsupported_protocol() { #[tokio::test] async fn dial_back_to_non_libp2p() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let (mut alice, mut bob) = bootstrap().await; let alice_peer_id = *alice.local_peer_id(); @@ -314,9 +307,7 @@ async fn dial_back_to_non_libp2p() { #[tokio::test] async fn dial_back_to_not_supporting() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let (mut alice, mut bob) = bootstrap().await; let alice_peer_id = *alice.local_peer_id(); diff --git a/protocols/autonat/tests/test_client.rs b/protocols/autonat/tests/test_client.rs index f5c18e3f34e..49c6c483514 100644 --- a/protocols/autonat/tests/test_client.rs +++ b/protocols/autonat/tests/test_client.rs @@ -18,6 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::time::Duration; + use libp2p_autonat::{ Behaviour, Config, Event, NatStatus, OutboundProbeError, OutboundProbeEvent, ResponseError, }; @@ -25,7 +27,6 @@ use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; -use std::time::Duration; use tokio::task::JoinHandle; const MAX_CONFIDENCE: usize = 3; @@ -116,7 +117,8 @@ async fn test_auto_probe() { // It can happen that the server observed the established connection and // returned a response before the inbound established connection was reported at the client. - // In this (rare) case the `ConnectionEstablished` event occurs after the `OutboundProbeEvent::Response`. + // In this (rare) case the `ConnectionEstablished` event + // occurs after the `OutboundProbeEvent::Response`. if !had_connection_event { match client.next_swarm_event().await { SwarmEvent::ConnectionEstablished { diff --git a/protocols/autonat/tests/test_server.rs b/protocols/autonat/tests/test_server.rs index d43d14198d4..944c4301b20 100644 --- a/protocols/autonat/tests/test_server.rs +++ b/protocols/autonat/tests/test_server.rs @@ -18,15 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{num::NonZeroU32, time::Duration}; + use libp2p_autonat::{ Behaviour, Config, Event, InboundProbeError, InboundProbeEvent, ResponseError, }; use libp2p_core::{multiaddr::Protocol, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::DialError; -use libp2p_swarm::{Swarm, SwarmEvent}; +use libp2p_swarm::{DialError, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; -use std::{num::NonZeroU32, time::Duration}; #[tokio::test] async fn test_dial_back() { @@ -340,7 +340,8 @@ async fn test_global_ips_config() { client.listen().await; tokio::spawn(client.loop_on_next()); - // Expect the probe to be refused as both peers run on the same machine and thus in the same local network. + // Expect the probe to be refused as both peers run + // on the same machine and thus in the same local network. match server.next_behaviour_event().await { Event::InboundProbe(InboundProbeEvent::Error { error, .. }) => assert!(matches!( error, diff --git a/protocols/dcutr/Cargo.toml b/protocols/dcutr/Cargo.toml index a47f5400488..7bc05671aa2 100644 --- a/protocols/dcutr/Cargo.toml +++ b/protocols/dcutr/Cargo.toml @@ -27,19 +27,14 @@ lru = "0.12.3" futures-bounded = { workspace = true } [dev-dependencies] -clap = { version = "4.5.6", features = ["derive"] } -libp2p-dns = { workspace = true, features = ["async-std"] } libp2p-identify = { workspace = true } -libp2p-noise = { workspace = true } -libp2p-ping = { workspace = true } libp2p-plaintext = { workspace = true } libp2p-relay = { workspace = true } libp2p-swarm = { workspace = true, features = ["macros"] } libp2p-swarm-test = { path = "../../swarm-test" } libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-yamux = { workspace = true } -rand = "0.8" -tracing-subscriber = { workspace = true, features = ["env-filter"] } +libp2p-test-utils = { workspace = true } tokio = { workspace = true, features = ["rt", "macros"] } # Passing arguments to the docsrs builder in order to properly document cfg's. diff --git a/protocols/dcutr/src/behaviour.rs b/protocols/dcutr/src/behaviour.rs index 7d0366c98bc..989635c02ba 100644 --- a/protocols/dcutr/src/behaviour.rs +++ b/protocols/dcutr/src/behaviour.rs @@ -20,27 +20,29 @@ //! [`NetworkBehaviour`] to act as a direct connection upgrade through relay node. -use crate::{handler, protocol}; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + convert::Infallible, + num::NonZeroUsize, + task::{Context, Poll}, +}; + use either::Either; -use libp2p_core::connection::ConnectedPoint; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +use libp2p_core::{ + connection::ConnectedPoint, multiaddr::Protocol, transport::PortUse, Endpoint, Multiaddr, +}; use libp2p_identity::PeerId; -use libp2p_swarm::behaviour::{ConnectionClosed, DialFailure, FromSwarm}; -use libp2p_swarm::dial_opts::{self, DialOpts}; use libp2p_swarm::{ - dummy, ConnectionDenied, ConnectionHandler, ConnectionId, NewExternalAddrCandidate, THandler, - THandlerOutEvent, + behaviour::{ConnectionClosed, DialFailure, FromSwarm}, + dial_opts::{self, DialOpts}, + dummy, ConnectionDenied, ConnectionHandler, ConnectionId, NetworkBehaviour, + NewExternalAddrCandidate, NotifyHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use libp2p_swarm::{NetworkBehaviour, NotifyHandler, THandlerInEvent, ToSwarm}; use lru::LruCache; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::convert::Infallible; -use std::num::NonZeroUsize; -use std::task::{Context, Poll}; use thiserror::Error; +use crate::{handler, protocol}; + pub(crate) const MAX_NUMBER_OF_UPGRADE_ATTEMPTS: u8 = 3; /// The events produced by the [`Behaviour`]. @@ -184,7 +186,8 @@ impl NetworkBehaviour for Behaviour { handler::relayed::Handler::new(connected_point, self.observed_addresses()); handler.on_behaviour_event(handler::relayed::Command::Connect); - return Ok(Either::Left(handler)); // TODO: We could make two `handler::relayed::Handler` here, one inbound one outbound. + // TODO: We could make two `handler::relayed::Handler` here, one inbound one outbound. + return Ok(Either::Left(handler)); } self.direct_connections .entry(peer) @@ -217,7 +220,8 @@ impl NetworkBehaviour for Behaviour { port_use, }, self.observed_addresses(), - ))); // TODO: We could make two `handler::relayed::Handler` here, one inbound one outbound. + ))); // TODO: We could make two `handler::relayed::Handler` here, one inbound one + // outbound. } self.direct_connections @@ -255,7 +259,8 @@ impl NetworkBehaviour for Behaviour { Either::Left(_) => connection_id, Either::Right(_) => match self.direct_to_relayed_connections.get(&connection_id) { None => { - // If the connection ID is unknown to us, it means we didn't create it so ignore any event coming from it. + // If the connection ID is unknown to us, it means we didn't create it so ignore + // any event coming from it. return; } Some(relayed_connection_id) => *relayed_connection_id, @@ -347,8 +352,9 @@ impl NetworkBehaviour for Behaviour { /// /// We use an [`LruCache`] to favor addresses that are reported more often. /// When attempting a hole-punch, we will try more frequent addresses first. -/// Most of these addresses will come from observations by other nodes (via e.g. the identify protocol). -/// More common observations mean a more likely stable port-mapping and thus a higher chance of a successful hole-punch. +/// Most of these addresses will come from observations by other nodes (via e.g. the identify +/// protocol). More common observations mean a more likely stable port-mapping and thus a higher +/// chance of a successful hole-punch. struct Candidates { inner: LruCache, me: PeerId, diff --git a/protocols/dcutr/src/handler/relayed.rs b/protocols/dcutr/src/handler/relayed.rs index ad12a196cb9..47813493e9e 100644 --- a/protocols/dcutr/src/handler/relayed.rs +++ b/protocols/dcutr/src/handler/relayed.rs @@ -20,26 +20,31 @@ //! [`ConnectionHandler`] handling relayed connection potentially upgraded to a direct connection. -use crate::behaviour::MAX_NUMBER_OF_UPGRADE_ATTEMPTS; -use crate::{protocol, PROTOCOL_NAME}; +use std::{ + collections::VecDeque, + io, + task::{Context, Poll}, + time::Duration, +}; + use either::Either; use futures::future; -use libp2p_core::multiaddr::Multiaddr; -use libp2p_core::upgrade::{DeniedUpgrade, ReadyUpgrade}; -use libp2p_core::ConnectedPoint; -use libp2p_swarm::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, - ListenUpgradeError, +use libp2p_core::{ + multiaddr::Multiaddr, + upgrade::{DeniedUpgrade, ReadyUpgrade}, + ConnectedPoint, }; use libp2p_swarm::{ + handler::{ + ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, + ListenUpgradeError, + }, ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, StreamUpgradeError, SubstreamProtocol, }; use protocol::{inbound, outbound}; -use std::collections::VecDeque; -use std::io; -use std::task::{Context, Poll}; -use std::time::Duration; + +use crate::{behaviour::MAX_NUMBER_OF_UPGRADE_ATTEMPTS, protocol, PROTOCOL_NAME}; #[derive(Debug)] pub enum Command { @@ -60,7 +65,7 @@ pub struct Handler { queued_events: VecDeque< ConnectionHandlerEvent< ::OutboundProtocol, - ::OutboundOpenInfo, + (), ::ToBehaviour, >, >, @@ -93,10 +98,7 @@ impl Handler { &mut self, FullyNegotiatedInbound { protocol: output, .. - }: FullyNegotiatedInbound< - ::InboundProtocol, - ::InboundOpenInfo, - >, + }: FullyNegotiatedInbound<::InboundProtocol>, ) { match output { future::Either::Left(stream) => { @@ -114,8 +116,8 @@ impl Handler { } self.attempts += 1; } - // A connection listener denies all incoming substreams, thus none can ever be fully negotiated. - // TODO: remove when Rust 1.82 is MSRV + // A connection listener denies all incoming substreams, thus none can ever be fully + // negotiated. TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] future::Either::Right(output) => libp2p_core::util::unreachable(output), } @@ -125,10 +127,7 @@ impl Handler { &mut self, FullyNegotiatedOutbound { protocol: stream, .. - }: FullyNegotiatedOutbound< - ::OutboundProtocol, - ::OutboundOpenInfo, - >, + }: FullyNegotiatedOutbound<::OutboundProtocol>, ) { assert!( self.endpoint.is_listener(), @@ -151,7 +150,7 @@ impl Handler { fn on_listen_upgrade_error( &mut self, ListenUpgradeError { error, .. }: ListenUpgradeError< - ::InboundOpenInfo, + (), ::InboundProtocol, >, ) { @@ -163,7 +162,7 @@ impl Handler { fn on_dial_upgrade_error( &mut self, DialUpgradeError { error, .. }: DialUpgradeError< - ::OutboundOpenInfo, + (), ::OutboundProtocol, >, ) { @@ -191,7 +190,7 @@ impl ConnectionHandler for Handler { type OutboundOpenInfo = (); type InboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { match self.endpoint { ConnectedPoint::Dialer { .. } => { SubstreamProtocol::new(Either::Left(ReadyUpgrade::new(PROTOCOL_NAME)), ()) @@ -231,9 +230,7 @@ impl ConnectionHandler for Handler { fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent, - > { + ) -> Poll> { // Return queued events. if let Some(event) = self.queued_events.pop_front() { return Poll::Ready(event); @@ -290,12 +287,7 @@ impl ConnectionHandler for Handler { fn on_connection_event( &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + event: ConnectionEvent, ) { match event { ConnectionEvent::FullyNegotiatedInbound(fully_negotiated_inbound) => { diff --git a/protocols/dcutr/src/protocol/inbound.rs b/protocols/dcutr/src/protocol/inbound.rs index 005d8394f5e..c5209930ca2 100644 --- a/protocols/dcutr/src/protocol/inbound.rs +++ b/protocols/dcutr/src/protocol/inbound.rs @@ -18,14 +18,16 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; +use std::io; + use asynchronous_codec::Framed; use futures::prelude::*; use libp2p_core::{multiaddr::Protocol, Multiaddr}; use libp2p_swarm::Stream; -use std::io; use thiserror::Error; +use crate::proto; + pub(crate) async fn handshake( stream: Stream, candidates: Vec, diff --git a/protocols/dcutr/src/protocol/outbound.rs b/protocols/dcutr/src/protocol/outbound.rs index 8639ff4f053..cdd3d5fbf0b 100644 --- a/protocols/dcutr/src/protocol/outbound.rs +++ b/protocols/dcutr/src/protocol/outbound.rs @@ -18,17 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; -use crate::PROTOCOL_NAME; +use std::io; + use asynchronous_codec::Framed; use futures::prelude::*; use futures_timer::Delay; use libp2p_core::{multiaddr::Protocol, Multiaddr}; use libp2p_swarm::Stream; -use std::io; use thiserror::Error; use web_time::Instant; +use crate::{proto, PROTOCOL_NAME}; + pub(crate) async fn handshake( stream: Stream, candidates: Vec, diff --git a/protocols/dcutr/tests/lib.rs b/protocols/dcutr/tests/lib.rs index 36f168fb04a..ce7119cebcf 100644 --- a/protocols/dcutr/tests/lib.rs +++ b/protocols/dcutr/tests/lib.rs @@ -18,9 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use libp2p_core::multiaddr::{Multiaddr, Protocol}; -use libp2p_core::transport::upgrade::Version; -use libp2p_core::transport::{MemoryTransport, Transport}; +use std::time::Duration; + +use libp2p_core::{ + multiaddr::{Multiaddr, Protocol}, + transport::{upgrade::Version, MemoryTransport, Transport}, +}; use libp2p_dcutr as dcutr; use libp2p_identify as identify; use libp2p_identity as identity; @@ -29,14 +32,10 @@ use libp2p_plaintext as plaintext; use libp2p_relay as relay; use libp2p_swarm::{Config, NetworkBehaviour, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; -use std::time::Duration; -use tracing_subscriber::EnvFilter; #[tokio::test] async fn connect() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let mut relay = build_relay(); let mut dst = build_client(); diff --git a/protocols/floodsub/src/layer.rs b/protocols/floodsub/src/layer.rs index 1a70d2213b2..477172b42c0 100644 --- a/protocols/floodsub/src/layer.rs +++ b/protocols/floodsub/src/layer.rs @@ -18,27 +18,36 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::protocol::{ - FloodsubMessage, FloodsubProtocol, FloodsubRpc, FloodsubSubscription, - FloodsubSubscriptionAction, +use std::{ + collections::{ + hash_map::{DefaultHasher, HashMap}, + VecDeque, + }, + iter, + task::{Context, Poll}, }; -use crate::topic::Topic; -use crate::FloodsubConfig; + use bytes::Bytes; use cuckoofilter::{CuckooError, CuckooFilter}; use fnv::FnvHashSet; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm}; use libp2p_swarm::{ - dial_opts::DialOpts, CloseConnection, ConnectionDenied, ConnectionId, NetworkBehaviour, - NotifyHandler, OneShotHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm}, + dial_opts::DialOpts, + CloseConnection, ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, + OneShotHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use smallvec::SmallVec; -use std::collections::hash_map::{DefaultHasher, HashMap}; -use std::task::{Context, Poll}; -use std::{collections::VecDeque, iter}; + +use crate::{ + protocol::{ + FloodsubMessage, FloodsubProtocol, FloodsubRpc, FloodsubSubscription, + FloodsubSubscriptionAction, + }, + topic::Topic, + FloodsubConfig, +}; /// Network behaviour that handles the floodsub protocol. pub struct Floodsub { @@ -192,7 +201,8 @@ impl Floodsub { self.publish_many_inner(topic, data, true) } - /// Publishes a message with multiple topics to the network, even if we're not subscribed to any of the topics. + /// Publishes a message with multiple topics to the network, even if we're not subscribed to any + /// of the topics. pub fn publish_many_any( &mut self, topic: impl IntoIterator>, diff --git a/protocols/floodsub/src/lib.rs b/protocols/floodsub/src/lib.rs index 94766d5fdca..d43b0c88788 100644 --- a/protocols/floodsub/src/lib.rs +++ b/protocols/floodsub/src/lib.rs @@ -35,9 +35,11 @@ mod proto { pub(crate) use self::floodsub::pb::{mod_RPC::SubOpts, Message, RPC}; } -pub use self::layer::{Floodsub, FloodsubEvent}; -pub use self::protocol::{FloodsubMessage, FloodsubRpc}; -pub use self::topic::Topic; +pub use self::{ + layer::{Floodsub, FloodsubEvent}, + protocol::{FloodsubMessage, FloodsubRpc}, + topic::Topic, +}; /// Configuration options for the Floodsub protocol. #[derive(Debug, Clone)] diff --git a/protocols/floodsub/src/protocol.rs b/protocols/floodsub/src/protocol.rs index edc842be8ce..69cfcbd9dc7 100644 --- a/protocols/floodsub/src/protocol.rs +++ b/protocols/floodsub/src/protocol.rs @@ -18,19 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; -use crate::topic::Topic; +use std::{io, iter, pin::Pin}; + use asynchronous_codec::Framed; use bytes::Bytes; use futures::{ io::{AsyncRead, AsyncWrite}, - Future, + Future, SinkExt, StreamExt, }; -use futures::{SinkExt, StreamExt}; use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use libp2p_identity::PeerId; use libp2p_swarm::StreamProtocol; -use std::{io, iter, pin::Pin}; + +use crate::{proto, topic::Topic}; const MAX_MESSAGE_LEN_BYTES: usize = 2048; diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index ddbbc7fb552..94b9b922973 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -1,5 +1,14 @@ ## 0.48.0 +- Allow broadcasting `IDONTWANT` messages when publishing to avoid downloading data that is already available. + See [PR 5773](https://github.com/libp2p/rust-libp2p/pull/5773) + +- Add configurable `idontwant_message_size_threshold` parameter. + See [PR 5770](https://github.com/libp2p/rust-libp2p/pull/5770) + +- Introduce Gossipsub v1.2 [spec](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.2.md). + See [PR 5697](https://github.com/libp2p/rust-libp2p/pull/5697) + - Correct state inconsistencies with the mesh and fanout when unsubscribing. See [PR 5690](https://github.com/libp2p/rust-libp2p/pull/5690) @@ -18,10 +27,17 @@ - Introduce back pressure and penalize slow peers. Drop stale messages that timeout before being delivered. See [PR 5595](https://github.com/libp2p/rust-libp2p/pull/5595). + - Change `Behaviour::unsubscribe` and `Behaviour::report_message_validation_result` to `bool` they don't need to be a `Result`. See [PR 5595](https://github.com/libp2p/rust-libp2p/pull/5595). +- Fix `cargo clippy` warnings in `rustc 1.84.0-beta.1`. + See [PR 5700](https://github.com/libp2p/rust-libp2p/pull/5700). + +- Fixe an issue where an `InsufficientPeers` error could occur under certain conditions, despite having peers subscribed to a topic. + See [PR 5793](https://github.com/libp2p/rust-libp2p/pull/5793). + ## 0.47.0 @@ -242,7 +258,7 @@ - Move from `open-metrics-client` to `prometheus-client` (see [PR 2442]). -- Emit gossip of all non empty topics (see [PR 2481]). +- Emit gossip of all non-empty topics (see [PR 2481]). - Merge NetworkBehaviour's inject_\* paired methods (see [PR 2445]). diff --git a/protocols/gossipsub/Cargo.toml b/protocols/gossipsub/Cargo.toml index c09286c8aa0..328d4367204 100644 --- a/protocols/gossipsub/Cargo.toml +++ b/protocols/gossipsub/Cargo.toml @@ -23,7 +23,8 @@ either = "1.11" fnv = "1.0.7" futures = { workspace = true } futures-timer = "3.0.2" -getrandom = "0.2.15" +getrandom = { workspace = true } +hashlink = { workspace = true} hex_fmt = "0.3.0" web-time = { workspace = true } libp2p-core = { workspace = true } @@ -35,20 +36,16 @@ rand = "0.8" regex = "1.10.5" serde = { version = "1", optional = true, features = ["derive"] } sha2 = "0.10.8" -smallvec = "1.13.2" tracing = { workspace = true } # Metrics dependencies prometheus-client = { workspace = true } [dev-dependencies] -hex = "0.4.2" libp2p-core = { workspace = true } -libp2p-yamux = { workspace = true } -libp2p-noise = { workspace = true } libp2p-swarm-test = { path = "../../swarm-test" } quickcheck = { workspace = true } -tracing-subscriber = { workspace = true, features = ["env-filter"] } +libp2p-test-utils = { workspace = true } tokio = { workspace = true, features = ["rt", "rt-multi-thread", "time", "macros"] } # Passing arguments to the docsrs builder in order to properly document cfg's. diff --git a/protocols/gossipsub/src/backoff.rs b/protocols/gossipsub/src/backoff.rs index 4414ffb00e6..ee600d22098 100644 --- a/protocols/gossipsub/src/backoff.rs +++ b/protocols/gossipsub/src/backoff.rs @@ -19,15 +19,19 @@ // DEALINGS IN THE SOFTWARE. //! Data structure for efficiently storing known back-off's when pruning peers. -use crate::topic::TopicHash; -use libp2p_identity::PeerId; -use std::collections::{ - hash_map::{Entry, HashMap}, - HashSet, +use std::{ + collections::{ + hash_map::{Entry, HashMap}, + HashSet, + }, + time::Duration, }; -use std::time::Duration; + +use libp2p_identity::PeerId; use web_time::Instant; +use crate::topic::TopicHash; + #[derive(Copy, Clone)] struct HeartbeatIndex(usize); @@ -68,8 +72,8 @@ impl BackoffStorage { } } - /// Updates the backoff for a peer (if there is already a more restrictive backoff then this call - /// doesn't change anything). + /// Updates the backoff for a peer (if there is already a more restrictive backoff then this + /// call doesn't change anything). pub(crate) fn update_backoff(&mut self, topic: &TopicHash, peer: &PeerId, time: Duration) { let instant = Instant::now() + time; let insert_into_backoffs_by_heartbeat = @@ -124,7 +128,7 @@ impl BackoffStorage { pub(crate) fn is_backoff_with_slack(&self, topic: &TopicHash, peer: &PeerId) -> bool { self.backoffs .get(topic) - .map_or(false, |m| m.contains_key(peer)) + .is_some_and(|m| m.contains_key(peer)) } pub(crate) fn get_backoff_time(&self, topic: &TopicHash, peer: &PeerId) -> Option { @@ -155,7 +159,7 @@ impl BackoffStorage { None => false, }; if !keep { - //remove from backoffs + // remove from backoffs if let Entry::Occupied(mut m) = backoffs.entry(topic.clone()) { if m.get_mut().remove(peer).is_some() && m.get().is_empty() { m.remove(); diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 075a881db48..356f1d6cd77 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -19,11 +19,10 @@ // DEALINGS IN THE SOFTWARE. use std::{ - cmp::{max, Ordering}, - collections::HashSet, - collections::VecDeque, - collections::{BTreeSet, HashMap}, + cmp::{max, Ordering, Ordering::Equal}, + collections::{BTreeSet, HashMap, HashSet, VecDeque}, fmt, + fmt::Debug, net::IpAddr, task::{Context, Poll}, time::Duration, @@ -31,56 +30,56 @@ use std::{ use futures::FutureExt; use futures_timer::Delay; -use prometheus_client::registry::Registry; -use rand::{seq::SliceRandom, thread_rng}; - +use hashlink::LinkedHashMap; use libp2p_core::{ - multiaddr::Protocol::Ip4, multiaddr::Protocol::Ip6, transport::PortUse, Endpoint, Multiaddr, + multiaddr::Protocol::{Ip4, Ip6}, + transport::PortUse, + Endpoint, Multiaddr, }; -use libp2p_identity::Keypair; -use libp2p_identity::PeerId; +use libp2p_identity::{Keypair, PeerId}; use libp2p_swarm::{ behaviour::{AddressChange, ConnectionClosed, ConnectionEstablished, FromSwarm}, dial_opts::DialOpts, ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; +use prometheus_client::registry::Registry; +use quick_protobuf::{MessageWrite, Writer}; +use rand::{seq::SliceRandom, thread_rng}; use web_time::{Instant, SystemTime}; -use crate::peer_score::{PeerScore, PeerScoreParams, PeerScoreThresholds, RejectReason}; -use crate::protocol::SIGNING_PREFIX; -use crate::subscription_filter::{AllowAllSubscriptionFilter, TopicSubscriptionFilter}; -use crate::time_cache::DuplicateCache; -use crate::topic::{Hasher, Topic, TopicHash}; -use crate::transform::{DataTransform, IdentityTransform}; -use crate::types::{ - ControlAction, Message, MessageAcceptance, MessageId, PeerInfo, RawMessage, Subscription, - SubscriptionAction, -}; -use crate::types::{PeerConnections, PeerKind, RpcOut}; -use crate::{backoff::BackoffStorage, FailedMessages}; use crate::{ + backoff::BackoffStorage, config::{Config, ValidationMode}, - types::Graft, -}; -use crate::{gossip_promises::GossipPromises, types::Prune}; -use crate::{ + gossip_promises::GossipPromises, handler::{Handler, HandlerEvent, HandlerIn}, - types::IWant, -}; -use crate::{mcache::MessageCache, types::IHave}; -use crate::{ + mcache::MessageCache, metrics::{Churn, Config as MetricsConfig, Inclusion, Metrics, Penalty}, + peer_score::{PeerScore, PeerScoreParams, PeerScoreThresholds, RejectReason}, + protocol::SIGNING_PREFIX, rpc::Sender, + rpc_proto::proto, + subscription_filter::{AllowAllSubscriptionFilter, TopicSubscriptionFilter}, + time_cache::DuplicateCache, + topic::{Hasher, Topic, TopicHash}, + transform::{DataTransform, IdentityTransform}, + types::{ + ControlAction, Graft, IDontWant, IHave, IWant, Message, MessageAcceptance, MessageId, + PeerConnections, PeerInfo, PeerKind, Prune, RawMessage, RpcOut, Subscription, + SubscriptionAction, + }, + FailedMessages, PublishError, SubscriptionError, TopicScoreParams, ValidationError, }; -use crate::{rpc_proto::proto, TopicScoreParams}; -use crate::{PublishError, SubscriptionError, ValidationError}; -use quick_protobuf::{MessageWrite, Writer}; -use std::{cmp::Ordering::Equal, fmt::Debug}; #[cfg(test)] mod tests; +/// IDONTWANT cache capacity. +const IDONTWANT_CAP: usize = 10_000; + +/// IDONTWANT timeout before removal. +const IDONTWANT_TIMEOUT: Duration = Duration::new(3, 0); + /// Determines if published messages should be signed or not. /// /// Without signing, a number of privacy preserving modes can be selected. @@ -221,8 +220,9 @@ impl From for PublishConfig { let public_key = keypair.public(); let key_enc = public_key.encode_protobuf(); let key = if key_enc.len() <= 42 { - // The public key can be inlined in [`rpc_proto::proto::::Message::from`], so we don't include it - // specifically in the [`rpc_proto::proto::Message::key`] field. + // The public key can be inlined in [`rpc_proto::proto::::Message::from`], so we + // don't include it specifically in the + // [`rpc_proto::proto::Message::key`] field. None } else { // Include the protobuf encoding of the public key in the message. @@ -289,7 +289,7 @@ pub struct Behaviour { /// The last publish time for fanout topics. fanout_last_pub: HashMap, - ///Storage for backoffs + /// Storage for backoffs backoffs: BackoffStorage, /// Message cache for the last few heartbeats. @@ -314,7 +314,7 @@ pub struct Behaviour { /// Stores optional peer score data together with thresholds, decay interval and gossip /// promises. - peer_score: Option<(PeerScore, PeerScoreThresholds, Delay, GossipPromises)>, + peer_score: Option<(PeerScore, PeerScoreThresholds, Delay)>, /// Counts the number of `IHAVE` received from each peer since the last heartbeat. count_received_ihave: HashMap, @@ -339,6 +339,9 @@ pub struct Behaviour { /// Tracks the numbers of failed messages per peer-id. failed_messages: HashMap, + + /// Tracks recently sent `IWANT` messages and checks if peers respond to them. + gossip_promises: GossipPromises, } impl Behaviour @@ -472,6 +475,7 @@ where subscription_filter, data_transform, failed_messages: Default::default(), + gossip_promises: Default::default(), }) } } @@ -671,9 +675,14 @@ where // Gossipsub peers None => { tracing::debug!(topic=%topic_hash, "Topic not in the mesh"); + // `fanout_peers` is always non-empty if it's `Some`. + let fanout_peers = self + .fanout + .get(&topic_hash) + .filter(|peers| !peers.is_empty()); // If we have fanout peers add them to the map. - if self.fanout.contains_key(&topic_hash) { - for peer in self.fanout.get(&topic_hash).expect("Topic must exist") { + if let Some(peers) = fanout_peers { + for peer in peers { recipient_peers.insert(*peer); } } else { @@ -753,6 +762,13 @@ where return Err(PublishError::AllQueuesFull(recipient_peers.len())); } + // Broadcast IDONTWANT messages + if raw_message.raw_protobuf_len() > self.config.idontwant_message_size_threshold() + && self.config.idontwant_on_publish() + { + self.send_idontwant(&raw_message, &msg_id, raw_message.source.as_ref()); + } + tracing::debug!(message=%msg_id, "Published message"); if let Some(metrics) = self.metrics.as_mut() { @@ -905,7 +921,7 @@ where let interval = Delay::new(params.decay_interval); let peer_score = PeerScore::new_with_message_delivery_time_callback(params, callback); - self.peer_score = Some((peer_score, threshold, interval, GossipPromises::default())); + self.peer_score = Some((peer_score, threshold, interval)); Ok(()) } @@ -1169,7 +1185,7 @@ where } fn score_below_threshold_from_scores( - peer_score: &Option<(PeerScore, PeerScoreThresholds, Delay, GossipPromises)>, + peer_score: &Option<(PeerScore, PeerScoreThresholds, Delay)>, peer_id: &PeerId, threshold: impl Fn(&PeerScoreThresholds) -> f64, ) -> (bool, f64) { @@ -1230,10 +1246,7 @@ where return false; } - self.peer_score - .as_ref() - .map(|(_, _, _, promises)| !promises.contains(id)) - .unwrap_or(true) + !self.gossip_promises.contains(id) }; for (topic, ids) in ihave_msgs { @@ -1280,13 +1293,11 @@ where iwant_ids_vec.truncate(iask); *iasked += iask; - if let Some((_, _, _, gossip_promises)) = &mut self.peer_score { - gossip_promises.add_promise( - *peer_id, - &iwant_ids_vec, - Instant::now() + self.config.iwant_followup_time(), - ); - } + self.gossip_promises.add_promise( + *peer_id, + &iwant_ids_vec, + Instant::now() + self.config.iwant_followup_time(), + ); tracing::trace!( peer=%peer_id, "IHAVE: Asking for the following messages from peer: {:?}", @@ -1415,7 +1426,7 @@ where + self.config.graft_flood_threshold()) - self.config.prune_backoff(); if flood_cutoff > now { - //extra penalty + // extra penalty peer_score.add_penalty(peer_id, 1); } } @@ -1436,15 +1447,16 @@ where topic=%topic_hash, "GRAFT: ignoring peer with negative score" ); - // we do send them PRUNE however, because it's a matter of protocol correctness + // we do send them PRUNE however, because it's a matter of protocol + // correctness to_prune_topics.insert(topic_hash.clone()); // but we won't PX to them do_px = false; continue; } - // check mesh upper bound and only allow graft if the upper bound is not reached or - // if it is an outbound peer + // check mesh upper bound and only allow graft if the upper bound is not reached + // or if it is an outbound peer if peers.len() >= self.config.mesh_n_high() && !self.outbound_peers.contains(peer_id) { @@ -1572,7 +1584,7 @@ where self.remove_peer_from_mesh(peer_id, &topic_hash, backoff, true, Churn::Prune); if self.mesh.contains_key(&topic_hash) { - //connect to px peers + // connect to px peers if !px.is_empty() { // we ignore PX from peers with insufficient score if below_threshold { @@ -1604,7 +1616,7 @@ where let n = self.config.prune_peers(); // Ignore peerInfo with no ID // - //TODO: Once signed records are spec'd: Can we use peerInfo without any IDs if they have a + // TODO: Once signed records are spec'd: Can we use peerInfo without any IDs if they have a // signed peer record? px.retain(|p| p.peer_id.is_some()); if px.len() > n { @@ -1649,14 +1661,15 @@ where peer=%propagation_source, "Rejecting message from blacklisted peer" ); - if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { + self.gossip_promises + .reject_message(msg_id, &RejectReason::BlackListedPeer); + if let Some((peer_score, ..)) = &mut self.peer_score { peer_score.reject_message( propagation_source, msg_id, &raw_message.topic, RejectReason::BlackListedPeer, ); - gossip_promises.reject_message(msg_id, &RejectReason::BlackListedPeer); } return false; } @@ -1689,7 +1702,7 @@ where let self_published = !self.config.allow_self_origin() && if let Some(own_id) = self.publish_config.get_own_id() { own_id != propagation_source - && raw_message.source.as_ref().map_or(false, |s| s == own_id) + && raw_message.source.as_ref().is_some_and(|s| s == own_id) } else { self.published_message_ids.contains(msg_id) }; @@ -1738,6 +1751,11 @@ where // Calculate the message id on the transformed data. let msg_id = self.config.message_id(&message); + // Broadcast IDONTWANT messages + if raw_message.raw_protobuf_len() > self.config.idontwant_message_size_threshold() { + self.send_idontwant(&raw_message, &msg_id, Some(propagation_source)); + } + // Check the validity of the message // Peers get penalized if this message is invalid. We don't add it to the duplicate cache // and instead continually penalize peers that repeatedly send this message. @@ -1753,6 +1771,7 @@ where self.mcache.observe_duplicate(&msg_id, propagation_source); return; } + tracing::debug!( message=%msg_id, "Put message in duplicate_cache and resolve promises" @@ -1765,9 +1784,11 @@ where // Tells score that message arrived (but is maybe not fully validated yet). // Consider the message as delivered for gossip promises. - if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { + self.gossip_promises.message_delivered(&msg_id); + + // Tells score that message arrived (but is maybe not fully validated yet). + if let Some((peer_score, ..)) = &mut self.peer_score { peer_score.validate_message(propagation_source, &msg_id, &message.topic); - gossip_promises.message_delivered(&msg_id); } // Add the message to our memcache @@ -1809,12 +1830,14 @@ where raw_message: &RawMessage, reject_reason: RejectReason, ) { - if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { - if let Some(metrics) = self.metrics.as_mut() { - metrics.register_invalid_message(&raw_message.topic); - } + if let Some(metrics) = self.metrics.as_mut() { + metrics.register_invalid_message(&raw_message.topic); + } - if let Ok(message) = self.data_transform.inbound_transform(raw_message.clone()) { + let message = self.data_transform.inbound_transform(raw_message.clone()); + + match (&mut self.peer_score, message) { + (Some((peer_score, ..)), Ok(message)) => { let message_id = self.config.message_id(&message); peer_score.reject_message( @@ -1824,13 +1847,22 @@ where reject_reason, ); - gossip_promises.reject_message(&message_id, &reject_reason); - } else { + self.gossip_promises + .reject_message(&message_id, &reject_reason); + } + (Some((peer_score, ..)), Err(_)) => { // The message is invalid, we reject it ignoring any gossip promises. If a peer is // advertising this message via an IHAVE and it's invalid it will be double // penalized, one for sending us an invalid and again for breaking a promise. peer_score.reject_invalid_message(propagation_source, &raw_message.topic); } + (None, Ok(message)) => { + // Valid transformation without peer scoring + let message_id = self.config.message_id(&message); + self.gossip_promises + .reject_message(&message_id, &reject_reason); + } + (None, Err(_)) => {} } } @@ -1897,7 +1929,7 @@ where // if the mesh needs peers add the peer to the mesh if !self.explicit_peers.contains(propagation_source) - && matches!(peer.kind, PeerKind::Gossipsubv1_1 | PeerKind::Gossipsub) + && peer.kind.is_gossipsub() && !Self::score_below_threshold_from_scores( &self.peer_score, propagation_source, @@ -2001,8 +2033,8 @@ where /// Applies penalties to peers that did not respond to our IWANT requests. fn apply_iwant_penalties(&mut self) { - if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { - for (peer, count) in gossip_promises.get_broken_promises() { + if let Some((peer_score, ..)) = &mut self.peer_score { + for (peer, count) in self.gossip_promises.get_broken_promises() { peer_score.add_penalty(&peer, count); if let Some(metrics) = self.metrics.as_mut() { metrics.register_score_penalty(Penalty::BrokenPromise); @@ -2223,7 +2255,7 @@ where && peers.len() > 1 && self.peer_score.is_some() { - if let Some((_, thresholds, _, _)) = &self.peer_score { + if let Some((_, thresholds, _)) = &self.peer_score { // Opportunistic grafting works as follows: we check the median score of peers // in the mesh; if this score is below the opportunisticGraftThreshold, we // select a few peers at random with score over the median. @@ -2316,7 +2348,7 @@ where for (topic_hash, peers) in self.fanout.iter_mut() { let mut to_remove_peers = Vec::new(); let publish_threshold = match &self.peer_score { - Some((_, thresholds, _, _)) => thresholds.publish_threshold, + Some((_, thresholds, _)) => thresholds.publish_threshold, _ => 0.0, }; for peer_id in peers.iter() { @@ -2409,6 +2441,17 @@ where } self.failed_messages.shrink_to_fit(); + // Flush stale IDONTWANTs. + for peer in self.connected_peers.values_mut() { + while let Some((_front, instant)) = peer.dont_send.front() { + if (*instant + IDONTWANT_TIMEOUT) >= Instant::now() { + break; + } else { + peer.dont_send.pop_front(); + } + } + } + tracing::debug!("Completed Heartbeat"); if let Some(metrics) = self.metrics.as_mut() { let duration = u64::try_from(start.elapsed().as_millis()).unwrap_or(u64::MAX); @@ -2564,6 +2607,49 @@ where } } + /// Helper function which sends an IDONTWANT message to mesh\[topic\] peers. + fn send_idontwant( + &mut self, + message: &RawMessage, + msg_id: &MessageId, + propagation_source: Option<&PeerId>, + ) { + let Some(mesh_peers) = self.mesh.get(&message.topic) else { + return; + }; + + let iwant_peers = self.gossip_promises.peers_for_message(msg_id); + + let recipient_peers: Vec = mesh_peers + .iter() + .chain(iwant_peers.iter()) + .filter(|&peer_id| { + Some(peer_id) != propagation_source && Some(peer_id) != message.source.as_ref() + }) + .cloned() + .collect(); + + for peer_id in recipient_peers { + let Some(peer) = self.connected_peers.get_mut(&peer_id) else { + tracing::error!(peer = %peer_id, + "Could not IDONTWANT, peer doesn't exist in connected peer list"); + continue; + }; + + // Only gossipsub 1.2 peers support IDONTWANT. + if peer.kind != PeerKind::Gossipsubv1_2 { + continue; + } + + self.send_message( + peer_id, + RpcOut::IDontWant(IDontWant { + message_ids: vec![msg_id.clone()], + }), + ); + } + } + /// Helper function which forwards a message to mesh\[topic\] peers. /// /// Returns true if at least one peer was messaged. @@ -2619,13 +2705,23 @@ where } // forward the message to peers - for peer in recipient_peers.iter() { - let event = RpcOut::Forward { - message: message.clone(), - timeout: Delay::new(self.config.forward_queue_duration()), - }; - tracing::debug!(%peer, message=%msg_id, "Sending message to peer"); - self.send_message(*peer, event); + for peer_id in recipient_peers.iter() { + if let Some(peer) = self.connected_peers.get_mut(peer_id) { + if peer.dont_send.contains_key(msg_id) { + tracing::debug!(%peer_id, message=%msg_id, "Peer doesn't want message"); + continue; + } + + tracing::debug!(%peer_id, message=%msg_id, "Sending message to peer"); + + self.send_message( + *peer_id, + RpcOut::Forward { + message: message.clone(), + timeout: Delay::new(self.config.forward_queue_duration()), + }, + ); + } } tracing::debug!("Completed forwarding message"); true @@ -2761,14 +2857,14 @@ where failed_messages.non_priority += 1; failed_messages.forward += 1; } - RpcOut::IWant(_) | RpcOut::IHave(_) => { + RpcOut::IWant(_) | RpcOut::IHave(_) | RpcOut::IDontWant(_) => { failed_messages.non_priority += 1; } RpcOut::Graft(_) | RpcOut::Prune(_) | RpcOut::Subscribe(_) | RpcOut::Unsubscribe(_) => { - unreachable!("Channel for highpriority contorl messages is unbounded and should always be open.") + unreachable!("Channel for highpriority control messages is unbounded and should always be open.") } } @@ -2867,8 +2963,8 @@ where .expect("Previously established connection to peer must be present"); peer.connections.remove(index); - // If there are more connections and this peer is in a mesh, inform the first connection - // handler. + // If there are more connections and this peer is in a mesh, inform the first + // connection handler. if !peer.connections.is_empty() { for topic in &peer.topics { if let Some(mesh_peers) = self.mesh.get(topic) { @@ -2921,7 +3017,7 @@ where // If metrics are enabled, register the disconnection of a peer based on its protocol. if let Some(metrics) = self.metrics.as_mut() { - metrics.peer_protocol_disconnected(connected_peer.kind.clone()); + metrics.peer_protocol_disconnected(connected_peer.kind); } self.connected_peers.remove(&peer_id); @@ -3001,6 +3097,7 @@ where connections: vec![], sender: Sender::new(self.config.connection_handler_queue_len()), topics: Default::default(), + dont_send: LinkedHashMap::new(), }); // Add the new connection connected_peer.connections.push(connection_id); @@ -3027,6 +3124,7 @@ where connections: vec![], sender: Sender::new(self.config.connection_handler_queue_len()), topics: Default::default(), + dont_send: LinkedHashMap::new(), }); // Add the new connection connected_peer.connections.push(connection_id); @@ -3048,7 +3146,7 @@ where // We have identified the protocol this peer is using if let Some(metrics) = self.metrics.as_mut() { - metrics.peer_protocol_connected(kind.clone()); + metrics.peer_protocol_connected(kind); } if let PeerKind::NotSupported = kind { @@ -3076,7 +3174,7 @@ where } HandlerEvent::MessageDropped(rpc) => { // Account for this in the scoring logic - if let Some((peer_score, _, _, _)) = &mut self.peer_score { + if let Some((peer_score, _, _)) = &mut self.peer_score { peer_score.failed_message_slow_peer(&propagation_source); } @@ -3162,7 +3260,8 @@ where } // Handle control messages - // group some control messages, this minimises SendEvents (code is simplified to handle each event at a time however) + // group some control messages, this minimises SendEvents (code is simplified to + // handle each event at a time however) let mut ihave_msgs = vec![]; let mut graft_msgs = vec![]; let mut prune_msgs = vec![]; @@ -3183,6 +3282,24 @@ where peers, backoff, }) => prune_msgs.push((topic_hash, peers, backoff)), + ControlAction::IDontWant(IDontWant { message_ids }) => { + let Some(peer) = self.connected_peers.get_mut(&propagation_source) + else { + tracing::error!(peer = %propagation_source, + "Could not handle IDONTWANT, peer doesn't exist in connected peer list"); + continue; + }; + if let Some(metrics) = self.metrics.as_mut() { + metrics.register_idontwant(message_ids.len()); + } + for message_id in message_ids { + peer.dont_send.insert(message_id, Instant::now()); + // Don't exceed capacity. + if peer.dont_send.len() > IDONTWANT_CAP { + peer.dont_send.pop_front(); + } + } + } } } if !ihave_msgs.is_empty() { @@ -3208,7 +3325,7 @@ where } // update scores - if let Some((peer_score, _, delay, _)) = &mut self.peer_score { + if let Some((peer_score, _, delay)) = &mut self.peer_score { if delay.poll_unpin(cx).is_ready() { peer_score.refresh_scores(); delay.reset(peer_score.params.decay_interval); @@ -3335,7 +3452,7 @@ fn get_random_peers_dynamic( .iter() .filter(|(_, p)| p.topics.contains(topic_hash)) .filter(|(peer_id, _)| f(peer_id)) - .filter(|(_, p)| p.kind == PeerKind::Gossipsub || p.kind == PeerKind::Gossipsubv1_1) + .filter(|(_, p)| p.kind.is_gossipsub()) .map(|(peer_id, _)| *peer_id) .collect::>(); diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index 9567150382a..bed74ecdce7 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -20,25 +20,20 @@ // Collection of tests for the gossipsub network behaviour -use super::*; -use crate::rpc::Receiver; -use crate::subscription_filter::WhitelistSubscriptionFilter; -use crate::{config::ConfigBuilder, types::Rpc, IdentTopic as Topic}; +use std::{future, net::Ipv4Addr, thread::sleep}; + use byteorder::{BigEndian, ByteOrder}; use libp2p_core::ConnectedPoint; use rand::Rng; -use std::future; -use std::net::Ipv4Addr; -use std::thread::sleep; + +use super::*; +use crate::{ + config::ConfigBuilder, rpc::Receiver, subscription_filter::WhitelistSubscriptionFilter, + types::Rpc, IdentTopic as Topic, +}; #[derive(Default, Debug)] -struct InjectNodes -// TODO: remove trait bound Default when this issue is fixed: -// https://github.com/colin-kiegel/rust-derive-builder/issues/93 -where - D: DataTransform + Default + Clone + Send + 'static, - F: TopicSubscriptionFilter + Clone + Default + Send + 'static, -{ +struct InjectNodes { peer_no: usize, topics: Vec, to_subscribe: bool, @@ -48,6 +43,7 @@ where scoring: Option<(PeerScoreParams, PeerScoreThresholds)>, data_transform: D, subscription_filter: F, + peer_kind: Option, } impl InjectNodes @@ -95,7 +91,7 @@ where let empty = vec![]; for i in 0..self.peer_no { - let (peer, receiver) = add_peer( + let (peer, receiver) = add_peer_with_addr_and_kind( &mut gs, if self.to_subscribe { &topic_hashes @@ -104,6 +100,8 @@ where }, i < self.outbound, i < self.explicit, + Multiaddr::empty(), + self.peer_kind.or(Some(PeerKind::Gossipsubv1_1)), ); peers.push(peer); receivers.insert(peer, receiver); @@ -152,6 +150,11 @@ where self.subscription_filter = subscription_filter; self } + + fn peer_kind(mut self, peer_kind: PeerKind) -> Self { + self.peer_kind = Some(peer_kind); + self + } } fn inject_nodes() -> InjectNodes @@ -234,10 +237,11 @@ where gs.connected_peers.insert( peer, PeerConnections { - kind: kind.clone().unwrap_or(PeerKind::Floodsub), + kind: kind.unwrap_or(PeerKind::Floodsub), connections: vec![connection_id], topics: Default::default(), sender, + dont_send: LinkedHashMap::new(), }, ); @@ -311,7 +315,8 @@ fn proto_to_message(rpc: &proto::RPC) -> Rpc { messages.push(RawMessage { source: message.from.map(|x| PeerId::from_bytes(&x).unwrap()), data: message.data.unwrap_or_default(), - sequence_number: message.seqno.map(|x| BigEndian::read_u64(&x)), // don't inform the application + sequence_number: message.seqno.map(|x| BigEndian::read_u64(&x)), /* don't inform the + * application */ topic: TopicHash::from_raw(message.topic), signature: message.signature, // don't inform the application key: None, @@ -623,6 +628,7 @@ fn test_join() { connections: vec![connection_id], topics: Default::default(), sender, + dont_send: LinkedHashMap::new(), }, ); receivers.insert(random_peer, receiver); @@ -677,7 +683,7 @@ fn test_publish_without_flood_publishing() { // - Send publish message to all peers // - Insert message into gs.mcache and gs.received - //turn off flood publish to test old behaviour + // turn off flood publish to test old behaviour let config = ConfigBuilder::default() .flood_publish(false) .build() @@ -757,7 +763,7 @@ fn test_fanout() { // - Send publish message to fanout peers // - Insert message into gs.mcache and gs.received - //turn off flood publish to test fanout behaviour + // turn off flood publish to test fanout behaviour let config = ConfigBuilder::default() .flood_publish(false) .build() @@ -1018,6 +1024,7 @@ fn test_get_random_peers() { connections: vec![ConnectionId::new_unchecked(0)], topics: topics.clone(), sender: Sender::new(gs.config.connection_handler_queue_len()), + dont_send: LinkedHashMap::new(), }, ); } @@ -1447,10 +1454,10 @@ fn test_explicit_peer_gets_connected() { .to_subscribe(true) .create_network(); - //create new peer + // create new peer let peer = PeerId::random(); - //add peer as explicit peer + // add peer as explicit peer gs.add_explicit_peer(&peer); let num_events = gs @@ -1483,17 +1490,17 @@ fn test_explicit_peer_reconnects() { let peer = others.first().unwrap(); - //add peer as explicit peer + // add peer as explicit peer gs.add_explicit_peer(peer); flush_events(&mut gs, receivers); - //disconnect peer + // disconnect peer disconnect_peer(&mut gs, peer); gs.heartbeat(); - //check that no reconnect after first heartbeat since `explicit_peer_ticks == 2` + // check that no reconnect after first heartbeat since `explicit_peer_ticks == 2` assert_eq!( gs.events .iter() @@ -1508,7 +1515,7 @@ fn test_explicit_peer_reconnects() { gs.heartbeat(); - //check that there is a reconnect after second heartbeat + // check that there is a reconnect after second heartbeat assert!( gs.events .iter() @@ -1536,11 +1543,11 @@ fn test_handle_graft_explicit_peer() { gs.handle_graft(peer, topic_hashes.clone()); - //peer got not added to mesh + // peer got not added to mesh assert!(gs.mesh[&topic_hashes[0]].is_empty()); assert!(gs.mesh[&topic_hashes[1]].is_empty()); - //check prunes + // check prunes let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == peer && match m { @@ -1566,13 +1573,13 @@ fn explicit_peers_not_added_to_mesh_on_receiving_subscription() { .explicit(1) .create_network(); - //only peer 1 is in the mesh not peer 0 (which is an explicit peer) + // only peer 1 is in the mesh not peer 0 (which is an explicit peer) assert_eq!( gs.mesh[&topic_hashes[0]], vec![peers[1]].into_iter().collect() ); - //assert that graft gets created to non-explicit peer + // assert that graft gets created to non-explicit peer let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. }) }); @@ -1581,7 +1588,7 @@ fn explicit_peers_not_added_to_mesh_on_receiving_subscription() { "No graft message got created to non-explicit peer" ); - //assert that no graft gets created to explicit peer + // assert that no graft gets created to explicit peer let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. }) }); @@ -1603,10 +1610,10 @@ fn do_not_graft_explicit_peer() { gs.heartbeat(); - //mesh stays empty + // mesh stays empty assert_eq!(gs.mesh[&topic_hashes[0]], BTreeSet::new()); - //assert that no graft gets created to explicit peer + // assert that no graft gets created to explicit peer let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == &others[0] && matches!(m, RpcOut::Graft { .. }) }); @@ -1663,7 +1670,7 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() { .explicit(1) .create_network(); - //create new topic, both peers subscribing to it but we do not subscribe to it + // create new topic, both peers subscribing to it but we do not subscribe to it let topic = Topic::new(String::from("t")); let topic_hash = topic.hash(); for peer in peers.iter().take(2) { @@ -1676,13 +1683,13 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() { ); } - //subscribe now to topic + // subscribe now to topic gs.subscribe(&topic).unwrap(); - //only peer 1 is in the mesh not peer 0 (which is an explicit peer) + // only peer 1 is in the mesh not peer 0 (which is an explicit peer) assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); - //assert that graft gets created to non-explicit peer + // assert that graft gets created to non-explicit peer let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. }) }); @@ -1691,7 +1698,7 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() { "No graft message got created to non-explicit peer" ); - //assert that no graft gets created to explicit peer + // assert that no graft gets created to explicit peer let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. }) }); @@ -1711,7 +1718,7 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { .explicit(1) .create_network(); - //create new topic, both peers subscribing to it but we do not subscribe to it + // create new topic, both peers subscribing to it but we do not subscribe to it let topic = Topic::new(String::from("t")); let topic_hash = topic.hash(); for peer in peers.iter().take(2) { @@ -1724,16 +1731,16 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { ); } - //we send a message for this topic => this will initialize the fanout + // we send a message for this topic => this will initialize the fanout gs.publish(topic.clone(), vec![1, 2, 3]).unwrap(); - //subscribe now to topic + // subscribe now to topic gs.subscribe(&topic).unwrap(); - //only peer 1 is in the mesh not peer 0 (which is an explicit peer) + // only peer 1 is in the mesh not peer 0 (which is an explicit peer) assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); - //assert that graft gets created to non-explicit peer + // assert that graft gets created to non-explicit peer let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. }) }); @@ -1742,7 +1749,7 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { "No graft message got created to non-explicit peer" ); - //assert that no graft gets created to explicit peer + // assert that no graft gets created to explicit peer let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. }) }); @@ -1774,15 +1781,15 @@ fn no_gossip_gets_sent_to_explicit_peers() { validated: true, }; - //forward the message + // forward the message gs.handle_received_message(message, &local_id); - //simulate multiple gossip calls (for randomness) + // simulate multiple gossip calls (for randomness) for _ in 0..3 { gs.emit_gossip(); } - //assert that no gossip gets sent to explicit peer + // assert that no gossip gets sent to explicit peer let receiver = receivers.remove(&peers[0]).unwrap(); let mut gossips = 0; let non_priority = receiver.non_priority.get_ref(); @@ -1835,7 +1842,7 @@ fn test_mesh_subtraction() { // Adds mesh_low peers and PRUNE 2 giving us a deficit. let n = config.mesh_n_high() + 10; - //make all outbound connections so that we allow grafting to all + // make all outbound connections so that we allow grafting to all let (mut gs, peers, _receivers, topics) = inject_nodes1() .peer_no(n) .topics(vec!["test".into()]) @@ -1866,10 +1873,10 @@ fn test_connect_to_px_peers_on_handle_prune() { .to_subscribe(true) .create_network(); - //handle prune from single peer with px peers + // handle prune from single peer with px peers let mut px = Vec::new(); - //propose more px peers than config.prune_peers() + // propose more px peers than config.prune_peers() for _ in 0..config.prune_peers() + 5 { px.push(PeerInfo { peer_id: Some(PeerId::random()), @@ -1885,7 +1892,7 @@ fn test_connect_to_px_peers_on_handle_prune() { )], ); - //Check DialPeer events for px peers + // Check DialPeer events for px peers let dials: Vec<_> = gs .events .iter() @@ -1903,7 +1910,7 @@ fn test_connect_to_px_peers_on_handle_prune() { // No duplicates assert_eq!(dials_set.len(), config.prune_peers()); - //all dial peers must be in px + // all dial peers must be in px assert!(dials_set.is_subset( &px.iter() .map(|i| *i.peer_id.as_ref().unwrap()) @@ -1915,14 +1922,14 @@ fn test_connect_to_px_peers_on_handle_prune() { fn test_send_px_and_backoff_in_prune() { let config: Config = Config::default(); - //build mesh with enough peers for px + // build mesh with enough peers for px let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(config.prune_peers() + 1) .topics(vec!["test".into()]) .to_subscribe(true) .create_network(); - //send prune to peer + // send prune to peer gs.send_graft_prune( HashMap::new(), vec![(peers[0], vec![topics[0].clone()])] @@ -1931,7 +1938,7 @@ fn test_send_px_and_backoff_in_prune() { HashSet::new(), ); - //check prune message + // check prune message let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[0] && match m { @@ -1957,14 +1964,14 @@ fn test_send_px_and_backoff_in_prune() { fn test_prune_backoffed_peer_on_graft() { let config: Config = Config::default(); - //build mesh with enough peers for px + // build mesh with enough peers for px let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(config.prune_peers() + 1) .topics(vec!["test".into()]) .to_subscribe(true) .create_network(); - //remove peer from mesh and send prune to peer => this adds a backoff for this peer + // remove peer from mesh and send prune to peer => this adds a backoff for this peer gs.mesh.get_mut(&topics[0]).unwrap().remove(&peers[0]); gs.send_graft_prune( HashMap::new(), @@ -1974,13 +1981,13 @@ fn test_prune_backoffed_peer_on_graft() { HashSet::new(), ); - //ignore all messages until now + // ignore all messages until now let receivers = flush_events(&mut gs, receivers); - //handle graft + // handle graft gs.handle_graft(&peers[0], vec![topics[0].clone()]); - //check prune message + // check prune message let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[0] && match m { @@ -2007,7 +2014,7 @@ fn test_do_not_graft_within_backoff_period() { .heartbeat_interval(Duration::from_millis(100)) .build() .unwrap(); - //only one peer => mesh too small and will try to regraft as early as possible + // only one peer => mesh too small and will try to regraft as early as possible let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -2015,22 +2022,22 @@ fn test_do_not_graft_within_backoff_period() { .gs_config(config) .create_network(); - //handle prune from peer with backoff of one second + // handle prune from peer with backoff of one second gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), Some(1))]); - //forget all events until now + // forget all events until now let receivers = flush_events(&mut gs, receivers); - //call heartbeat + // call heartbeat gs.heartbeat(); - //Sleep for one second and apply 10 regular heartbeats (interval = 100ms). + // Sleep for one second and apply 10 regular heartbeats (interval = 100ms). for _ in 0..10 { sleep(Duration::from_millis(100)); gs.heartbeat(); } - //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat + // Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat // is needed). let (control_msgs, receivers) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); @@ -2039,11 +2046,11 @@ fn test_do_not_graft_within_backoff_period() { "Graft message created too early within backoff period" ); - //Heartbeat one more time this should graft now + // Heartbeat one more time this should graft now sleep(Duration::from_millis(100)); gs.heartbeat(); - //check that graft got created + // check that graft got created let (control_msgs, _) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); assert!( control_msgs > 0, @@ -2053,14 +2060,14 @@ fn test_do_not_graft_within_backoff_period() { #[test] fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without_backoff() { - //set default backoff period to 1 second + // set default backoff period to 1 second let config = ConfigBuilder::default() .prune_backoff(Duration::from_millis(90)) .backoff_slack(1) .heartbeat_interval(Duration::from_millis(100)) .build() .unwrap(); - //only one peer => mesh too small and will try to regraft as early as possible + // only one peer => mesh too small and will try to regraft as early as possible let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -2068,20 +2075,20 @@ fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without .gs_config(config) .create_network(); - //handle prune from peer without a specified backoff + // handle prune from peer without a specified backoff gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), None)]); - //forget all events until now + // forget all events until now let receivers = flush_events(&mut gs, receivers); - //call heartbeat + // call heartbeat gs.heartbeat(); - //Apply one more heartbeat + // Apply one more heartbeat sleep(Duration::from_millis(100)); gs.heartbeat(); - //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat + // Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat // is needed). let (control_msgs, receivers) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); @@ -2090,11 +2097,11 @@ fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without "Graft message created too early within backoff period" ); - //Heartbeat one more time this should graft now + // Heartbeat one more time this should graft now sleep(Duration::from_millis(100)); gs.heartbeat(); - //check that graft got created + // check that graft got created let (control_msgs, _) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); assert!( control_msgs > 0, @@ -2181,7 +2188,7 @@ fn test_flood_publish() { .to_subscribe(true) .create_network(); - //publish message + // publish message let publish_data = vec![0; 42]; gs.publish(Topic::new(topic), publish_data).unwrap(); @@ -2228,15 +2235,15 @@ fn test_flood_publish() { fn test_gossip_to_at_least_gossip_lazy_peers() { let config: Config = Config::default(); - //add more peers than in mesh to test gossipping - //by default only mesh_n_low peers will get added to mesh + // add more peers than in mesh to test gossipping + // by default only mesh_n_low peers will get added to mesh let (mut gs, _, receivers, topic_hashes) = inject_nodes1() .peer_no(config.mesh_n_low() + config.gossip_lazy() + 1) .topics(vec!["topic".into()]) .to_subscribe(true) .create_network(); - //receive message + // receive message let raw_message = RawMessage { source: Some(PeerId::random()), data: vec![], @@ -2248,7 +2255,7 @@ fn test_gossip_to_at_least_gossip_lazy_peers() { }; gs.handle_received_message(raw_message.clone(), &PeerId::random()); - //emit gossip + // emit gossip gs.emit_gossip(); // Transform the inbound message @@ -2256,7 +2263,7 @@ fn test_gossip_to_at_least_gossip_lazy_peers() { let msg_id = gs.config.message_id(message); - //check that exactly config.gossip_lazy() many gossip messages were sent. + // check that exactly config.gossip_lazy() many gossip messages were sent. let (control_msgs, _) = count_control_msgs(receivers, |_, action| match action { RpcOut::IHave(IHave { topic_hash, @@ -2271,7 +2278,7 @@ fn test_gossip_to_at_least_gossip_lazy_peers() { fn test_gossip_to_at_most_gossip_factor_peers() { let config: Config = Config::default(); - //add a lot of peers + // add a lot of peers let m = config.mesh_n_low() + config.gossip_lazy() * (2.0 / config.gossip_factor()) as usize; let (mut gs, _, receivers, topic_hashes) = inject_nodes1() .peer_no(m) @@ -2279,7 +2286,7 @@ fn test_gossip_to_at_most_gossip_factor_peers() { .to_subscribe(true) .create_network(); - //receive message + // receive message let raw_message = RawMessage { source: Some(PeerId::random()), data: vec![], @@ -2291,14 +2298,14 @@ fn test_gossip_to_at_most_gossip_factor_peers() { }; gs.handle_received_message(raw_message.clone(), &PeerId::random()); - //emit gossip + // emit gossip gs.emit_gossip(); // Transform the inbound message let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); let msg_id = gs.config.message_id(message); - //check that exactly config.gossip_lazy() many gossip messages were sent. + // check that exactly config.gossip_lazy() many gossip messages were sent. let (control_msgs, _) = count_control_msgs(receivers, |_, action| match action { RpcOut::IHave(IHave { topic_hash, @@ -2316,7 +2323,7 @@ fn test_gossip_to_at_most_gossip_factor_peers() { fn test_accept_only_outbound_peer_grafts_when_mesh_full() { let config: Config = Config::default(); - //enough peers to fill the mesh + // enough peers to fill the mesh let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) @@ -2328,30 +2335,30 @@ fn test_accept_only_outbound_peer_grafts_when_mesh_full() { gs.handle_graft(&peer, topics.clone()); } - //assert current mesh size + // assert current mesh size assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high()); - //create an outbound and an inbound peer + // create an outbound and an inbound peer let (inbound, _in_reciver) = add_peer(&mut gs, &topics, false, false); let (outbound, _out_receiver) = add_peer(&mut gs, &topics, true, false); - //send grafts + // send grafts gs.handle_graft(&inbound, vec![topics[0].clone()]); gs.handle_graft(&outbound, vec![topics[0].clone()]); - //assert mesh size + // assert mesh size assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high() + 1); - //inbound is not in mesh + // inbound is not in mesh assert!(!gs.mesh[&topics[0]].contains(&inbound)); - //outbound is in mesh + // outbound is in mesh assert!(gs.mesh[&topics[0]].contains(&outbound)); } #[test] fn test_do_not_remove_too_many_outbound_peers() { - //use an extreme case to catch errors with high probability + // use an extreme case to catch errors with high probability let m = 50; let n = 2 * m; let config = ConfigBuilder::default() @@ -2362,7 +2369,7 @@ fn test_do_not_remove_too_many_outbound_peers() { .build() .unwrap(); - //fill the mesh with inbound connections + // fill the mesh with inbound connections let (mut gs, peers, _receivers, topics) = inject_nodes1() .peer_no(n) .topics(vec!["test".into()]) @@ -2375,7 +2382,7 @@ fn test_do_not_remove_too_many_outbound_peers() { gs.handle_graft(&peer, topics.clone()); } - //create m outbound connections and graft (we will accept the graft) + // create m outbound connections and graft (we will accept the graft) let mut outbound = HashSet::new(); for _ in 0..m { let (peer, _) = add_peer(&mut gs, &topics, true, false); @@ -2383,7 +2390,7 @@ fn test_do_not_remove_too_many_outbound_peers() { gs.handle_graft(&peer, topics.clone()); } - //mesh is overly full + // mesh is overly full assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n + m); // run a heartbeat @@ -2392,7 +2399,7 @@ fn test_do_not_remove_too_many_outbound_peers() { // Peers should be removed to reach n assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n); - //all outbound peers are still in the mesh + // all outbound peers are still in the mesh assert!(outbound.iter().all(|p| gs.mesh[&topics[0]].contains(p))); } @@ -2412,7 +2419,7 @@ fn test_add_outbound_peers_if_min_is_not_satisfied() { gs.handle_graft(&peer, topics.clone()); } - //create config.mesh_outbound_min() many outbound connections without grafting + // create config.mesh_outbound_min() many outbound connections without grafting let mut peers = vec![]; for _ in 0..config.mesh_outbound_min() { peers.push(add_peer(&mut gs, &topics, true, false)); @@ -2435,7 +2442,7 @@ fn test_add_outbound_peers_if_min_is_not_satisfied() { fn test_prune_negative_scored_peers() { let config = Config::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -2449,16 +2456,16 @@ fn test_prune_negative_scored_peers() { ))) .create_network(); - //add penalty to peer + // add penalty to peer gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); - //execute heartbeat + // execute heartbeat gs.heartbeat(); - //peer should not be in mesh anymore + // peer should not be in mesh anymore assert!(gs.mesh[&topics[0]].is_empty()); - //check prune message + // check prune message let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[0] && match m { @@ -2481,7 +2488,7 @@ fn test_prune_negative_scored_peers() { #[test] fn test_dont_graft_to_negative_scored_peers() { let config = Config::default(); - //init full mesh + // init full mesh let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) @@ -2493,34 +2500,34 @@ fn test_dont_graft_to_negative_scored_peers() { ))) .create_network(); - //add two additional peers that will not be part of the mesh + // add two additional peers that will not be part of the mesh let (p1, _receiver1) = add_peer(&mut gs, &topics, false, false); let (p2, _receiver2) = add_peer(&mut gs, &topics, false, false); - //reduce score of p1 to negative + // reduce score of p1 to negative gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 1); - //handle prunes of all other peers + // handle prunes of all other peers for p in peers { gs.handle_prune(&p, vec![(topics[0].clone(), Vec::new(), None)]); } - //heartbeat + // heartbeat gs.heartbeat(); - //assert that mesh only contains p2 + // assert that mesh only contains p2 assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), 1); assert!(gs.mesh.get(&topics[0]).unwrap().contains(&p2)); } -///Note that in this test also without a penalty the px would be ignored because of the +/// Note that in this test also without a penalty the px would be ignored because of the /// acceptPXThreshold, but the spec still explicitly states the rule that px from negative /// peers should get ignored, therefore we test it here. #[test] fn test_ignore_px_from_negative_scored_peer() { let config = Config::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -2532,10 +2539,10 @@ fn test_ignore_px_from_negative_scored_peer() { ))) .create_network(); - //penalize peer + // penalize peer gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); - //handle prune from single peer with px peers + // handle prune from single peer with px peers let px = vec![PeerInfo { peer_id: Some(PeerId::random()), }]; @@ -2549,7 +2556,7 @@ fn test_ignore_px_from_negative_scored_peer() { )], ); - //assert no dials + // assert no dials assert_eq!( gs.events .iter() @@ -2760,7 +2767,7 @@ fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { collected_messages }); - //the message got sent to p2 + // the message got sent to p2 assert!(sent_messages .iter() .map(|(peer_id, msg)| ( @@ -2768,7 +2775,7 @@ fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { gs.data_transform.inbound_transform(msg.clone()).unwrap() )) .any(|(peer_id, msg)| peer_id == &p2 && gs.config.message_id(&msg) == msg_id)); - //the message got not sent to p1 + // the message got not sent to p1 assert!(sent_messages .iter() .map(|(peer_id, msg)| ( @@ -2786,7 +2793,7 @@ fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() { gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, ..PeerScoreThresholds::default() }; - //build full mesh + // build full mesh let (mut gs, peers, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) @@ -2802,21 +2809,21 @@ fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() { gs.handle_graft(&peer, topics.clone()); } - //add two additional peers that will not be part of the mesh + // add two additional peers that will not be part of the mesh let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); receivers.insert(p1, receiver1); let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); receivers.insert(p2, receiver2); - //reduce score of p1 below peer_score_thresholds.gossip_threshold - //note that penalties get squared so two penalties means a score of + // reduce score of p1 below peer_score_thresholds.gossip_threshold + // note that penalties get squared so two penalties means a score of // 4 * peer_score_params.behaviour_penalty_weight. gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); - //reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold + // reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); - //message that other peers have + // message that other peers have let raw_message = RawMessage { source: Some(PeerId::random()), data: vec![], @@ -2863,31 +2870,31 @@ fn test_do_not_publish_to_peer_below_publish_threshold() { ..PeerScoreThresholds::default() }; - //build mesh with no peers and no subscribed topics + // build mesh with no peers and no subscribed topics let (mut gs, _, mut receivers, _) = inject_nodes1() .gs_config(config) .scoring(Some((peer_score_params, peer_score_thresholds))) .create_network(); - //create a new topic for which we are not subscribed + // create a new topic for which we are not subscribed let topic = Topic::new("test"); let topics = vec![topic.hash()]; - //add two additional peers that will be added to the mesh + // add two additional peers that will be added to the mesh let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); receivers.insert(p1, receiver1); let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); receivers.insert(p2, receiver2); - //reduce score of p1 below peer_score_thresholds.publish_threshold - //note that penalties get squared so two penalties means a score of + // reduce score of p1 below peer_score_thresholds.publish_threshold + // note that penalties get squared so two penalties means a score of // 4 * peer_score_params.behaviour_penalty_weight. gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); - //reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold + // reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); - //a heartbeat will remove the peers from the mesh + // a heartbeat will remove the peers from the mesh gs.heartbeat(); // publish on topic @@ -2907,7 +2914,7 @@ fn test_do_not_publish_to_peer_below_publish_threshold() { collected_publish }); - //assert only published to p2 + // assert only published to p2 assert_eq!(publishes.len(), 1); assert_eq!(publishes[0].0, p2); } @@ -2921,28 +2928,28 @@ fn test_do_not_flood_publish_to_peer_below_publish_threshold() { publish_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, ..PeerScoreThresholds::default() }; - //build mesh with no peers + // build mesh with no peers let (mut gs, _, mut receivers, topics) = inject_nodes1() .topics(vec!["test".into()]) .gs_config(config) .scoring(Some((peer_score_params, peer_score_thresholds))) .create_network(); - //add two additional peers that will be added to the mesh + // add two additional peers that will be added to the mesh let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); receivers.insert(p1, receiver1); let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); receivers.insert(p2, receiver2); - //reduce score of p1 below peer_score_thresholds.publish_threshold - //note that penalties get squared so two penalties means a score of + // reduce score of p1 below peer_score_thresholds.publish_threshold + // note that penalties get squared so two penalties means a score of // 4 * peer_score_params.behaviour_penalty_weight. gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); - //reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold + // reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); - //a heartbeat will remove the peers from the mesh + // a heartbeat will remove the peers from the mesh gs.heartbeat(); // publish on topic @@ -2962,7 +2969,7 @@ fn test_do_not_flood_publish_to_peer_below_publish_threshold() { collected_publish }); - //assert only published to p2 + // assert only published to p2 assert_eq!(publishes.len(), 1); assert!(publishes[0].0 == p2); } @@ -2978,23 +2985,23 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { ..PeerScoreThresholds::default() }; - //build mesh with no peers + // build mesh with no peers let (mut gs, _, _, topics) = inject_nodes1() .topics(vec!["test".into()]) .gs_config(config.clone()) .scoring(Some((peer_score_params, peer_score_thresholds))) .create_network(); - //add two additional peers that will be added to the mesh + // add two additional peers that will be added to the mesh let (p1, _receiver1) = add_peer(&mut gs, &topics, false, false); let (p2, _receiver2) = add_peer(&mut gs, &topics, false, false); - //reduce score of p1 below peer_score_thresholds.graylist_threshold - //note that penalties get squared so two penalties means a score of + // reduce score of p1 below peer_score_thresholds.graylist_threshold + // note that penalties get squared so two penalties means a score of // 4 * peer_score_params.behaviour_penalty_weight. gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); - //reduce score of p2 below publish_threshold but not below graylist_threshold + // reduce score of p2 below publish_threshold but not below graylist_threshold gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); let raw_message1 = RawMessage { @@ -3053,10 +3060,10 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { message_ids: vec![config.message_id(message2)], }); - //clear events + // clear events gs.events.clear(); - //receive from p1 + // receive from p1 gs.on_connection_handler_event( p1, ConnectionId::new_unchecked(0), @@ -3070,7 +3077,7 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { }, ); - //only the subscription event gets processed, the rest is dropped + // only the subscription event gets processed, the rest is dropped assert_eq!(gs.events.len(), 1); assert!(matches!( gs.events[0], @@ -3082,7 +3089,7 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { message_ids: vec![config.message_id(message4)], }); - //receive from p2 + // receive from p2 gs.on_connection_handler_event( p2, ConnectionId::new_unchecked(0), @@ -3096,7 +3103,7 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { }, ); - //events got processed + // events got processed assert!(gs.events.len() > 1); } @@ -3145,7 +3152,7 @@ fn test_ignore_px_from_peers_below_accept_px_threshold() { 0 ); - //handle prune from peer peers[1] with px peers + // handle prune from peer peers[1] with px peers let px = vec![PeerInfo { peer_id: Some(PeerId::random()), }]; @@ -3158,7 +3165,7 @@ fn test_ignore_px_from_peers_below_accept_px_threshold() { )], ); - //assert there are dials now + // assert there are dials now assert!( gs.events .iter() @@ -3178,7 +3185,7 @@ fn test_keep_best_scoring_peers_on_oversubscription() { .build() .unwrap(); - //build mesh with more peers than mesh can hold + // build mesh with more peers than mesh can hold let n = config.mesh_n_high() + 1; let (mut gs, peers, _receivers, topics) = inject_nodes1() .peer_no(n) @@ -3198,21 +3205,21 @@ fn test_keep_best_scoring_peers_on_oversubscription() { gs.handle_graft(peer, topics.clone()); } - //assign scores to peers equalling their index + // assign scores to peers equalling their index - //set random positive scores + // set random positive scores for (index, peer) in peers.iter().enumerate() { gs.set_application_score(peer, index as f64); } assert_eq!(gs.mesh[&topics[0]].len(), n); - //heartbeat to prune some peers + // heartbeat to prune some peers gs.heartbeat(); assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n()); - //mesh contains retain_scores best peers + // mesh contains retain_scores best peers assert!(gs.mesh[&topics[0]].is_superset( &peers[(n - config.retain_scores())..] .iter() @@ -3239,7 +3246,7 @@ fn test_scoring_p1() { .insert(topic_hash, topic_params.clone()); let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, _) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3250,9 +3257,9 @@ fn test_scoring_p1() { .scoring(Some((peer_score_params, peer_score_thresholds))) .create_network(); - //sleep for 2 times the mesh_quantum + // sleep for 2 times the mesh_quantum sleep(topic_params.time_in_mesh_quantum * 2); - //refresh scores + // refresh scores gs.peer_score.as_mut().unwrap().0.refresh_scores(); assert!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]) @@ -3265,9 +3272,9 @@ fn test_scoring_p1() { "score should be less than 3 * time_in_mesh_weight * topic_weight" ); - //sleep again for 2 times the mesh_quantum + // sleep again for 2 times the mesh_quantum sleep(topic_params.time_in_mesh_quantum * 2); - //refresh scores + // refresh scores gs.peer_score.as_mut().unwrap().0.refresh_scores(); assert!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]) @@ -3275,9 +3282,9 @@ fn test_scoring_p1() { "score should be at least 4 * time_in_mesh_weight * topic_weight" ); - //sleep for enough periods to reach maximum + // sleep for enough periods to reach maximum sleep(topic_params.time_in_mesh_quantum * (topic_params.time_in_mesh_cap - 3.0) as u32); - //refresh scores + // refresh scores gs.peer_score.as_mut().unwrap().0.refresh_scores(); assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]), @@ -3309,7 +3316,7 @@ fn test_scoring_p2() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh + time_in_mesh_weight: 0.0, // deactivate time in mesh first_message_deliveries_weight: 2.0, first_message_deliveries_cap: 10.0, first_message_deliveries_decay: 0.9, @@ -3321,7 +3328,7 @@ fn test_scoring_p2() { .insert(topic_hash, topic_params.clone()); let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(2) .topics(vec!["test".into()]) @@ -3338,9 +3345,9 @@ fn test_scoring_p2() { }; let m1 = random_message(&mut seq, &topics); - //peer 0 delivers message first + // peer 0 delivers message first deliver_message(&mut gs, 0, m1.clone()); - //peer 1 delivers message second + // peer 1 delivers message second deliver_message(&mut gs, 1, m1); assert_eq!( @@ -3355,7 +3362,7 @@ fn test_scoring_p2() { "there should be no score for second message deliveries * topic_weight" ); - //peer 2 delivers two new messages + // peer 2 delivers two new messages deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); assert_eq!( @@ -3364,7 +3371,7 @@ fn test_scoring_p2() { "score should be exactly 2 * first_message_deliveries_weight * topic_weight" ); - //test decaying + // test decaying gs.peer_score.as_mut().unwrap().0.refresh_scores(); assert_eq!( @@ -3385,7 +3392,7 @@ fn test_scoring_p2() { first_message_deliveries_weight * topic_weight" ); - //test cap + // test cap for _ in 0..topic_params.first_message_deliveries_cap as u64 { deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); } @@ -3407,8 +3414,8 @@ fn test_scoring_p3() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries + time_in_mesh_weight: 0.0, // deactivate time in mesh + first_message_deliveries_weight: 0.0, // deactivate first time deliveries mesh_message_deliveries_weight: -2.0, mesh_message_deliveries_decay: 0.9, mesh_message_deliveries_cap: 10.0, @@ -3421,7 +3428,7 @@ fn test_scoring_p3() { peer_score_params.topics.insert(topic_hash, topic_params); let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with two peers + // build mesh with two peers let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(2) .topics(vec!["test".into()]) @@ -3439,35 +3446,35 @@ fn test_scoring_p3() { let mut expected_message_deliveries = 0.0; - //messages used to test window + // messages used to test window let m1 = random_message(&mut seq, &topics); let m2 = random_message(&mut seq, &topics); - //peer 1 delivers m1 + // peer 1 delivers m1 deliver_message(&mut gs, 1, m1.clone()); - //peer 0 delivers two message + // peer 0 delivers two message deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); expected_message_deliveries += 2.0; sleep(Duration::from_millis(60)); - //peer 1 delivers m2 + // peer 1 delivers m2 deliver_message(&mut gs, 1, m2.clone()); sleep(Duration::from_millis(70)); - //peer 0 delivers m1 and m2 only m2 gets counted + // peer 0 delivers m1 and m2 only m2 gets counted deliver_message(&mut gs, 0, m1); deliver_message(&mut gs, 0, m2); expected_message_deliveries += 1.0; sleep(Duration::from_millis(900)); - //message deliveries penalties get activated, peer 0 has only delivered 3 messages and + // message deliveries penalties get activated, peer 0 has only delivered 3 messages and // therefore gets a penalty gs.peer_score.as_mut().unwrap().0.refresh_scores(); - expected_message_deliveries *= 0.9; //decay + expected_message_deliveries *= 0.9; // decay assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]), @@ -3483,10 +3490,10 @@ fn test_scoring_p3() { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - //apply 10 decays + // apply 10 decays for _ in 0..10 { gs.peer_score.as_mut().unwrap().0.refresh_scores(); - expected_message_deliveries *= 0.9; //decay + expected_message_deliveries *= 0.9; // decay } assert_eq!( @@ -3505,8 +3512,8 @@ fn test_scoring_p3b() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries + time_in_mesh_weight: 0.0, // deactivate time in mesh + first_message_deliveries_weight: 0.0, // deactivate first time deliveries mesh_message_deliveries_weight: -2.0, mesh_message_deliveries_decay: 0.9, mesh_message_deliveries_cap: 10.0, @@ -3522,7 +3529,7 @@ fn test_scoring_p3b() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3540,49 +3547,49 @@ fn test_scoring_p3b() { let mut expected_message_deliveries = 0.0; - //add some positive score + // add some positive score gs.peer_score .as_mut() .unwrap() .0 .set_application_score(&peers[0], 100.0); - //peer 0 delivers two message + // peer 0 delivers two message deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); expected_message_deliveries += 2.0; sleep(Duration::from_millis(1050)); - //activation kicks in + // activation kicks in gs.peer_score.as_mut().unwrap().0.refresh_scores(); - expected_message_deliveries *= 0.9; //decay + expected_message_deliveries *= 0.9; // decay - //prune peer + // prune peer gs.handle_prune(&peers[0], vec![(topics[0].clone(), vec![], None)]); - //wait backoff + // wait backoff sleep(Duration::from_millis(130)); - //regraft peer + // regraft peer gs.handle_graft(&peers[0], topics.clone()); - //the score should now consider p3b + // the score should now consider p3b let mut expected_b3 = (5f64 - expected_message_deliveries).powi(2); assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 100.0 + expected_b3 * -3.0 * 0.7 ); - //we can also add a new p3 to the score + // we can also add a new p3 to the score - //peer 0 delivers one message + // peer 0 delivers one message deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); expected_message_deliveries += 1.0; sleep(Duration::from_millis(1050)); gs.peer_score.as_mut().unwrap().0.refresh_scores(); - expected_message_deliveries *= 0.9; //decay + expected_message_deliveries *= 0.9; // decay expected_b3 *= 0.95; assert_eq!( @@ -3601,10 +3608,14 @@ fn test_scoring_p4_valid_message() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + // deactivate time in mesh + time_in_mesh_weight: 0.0, + // deactivate first time deliveries + first_message_deliveries_weight: 0.0, + // deactivate message deliveries + mesh_message_deliveries_weight: 0.0, + // deactivate mesh failure penalties + mesh_failure_penalty_weight: 0.0, invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -3614,7 +3625,7 @@ fn test_scoring_p4_valid_message() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with two peers + // build mesh with two peers let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3630,7 +3641,7 @@ fn test_scoring_p4_valid_message() { gs.handle_received_message(msg, &peers[index]); }; - //peer 0 delivers valid message + // peer 0 delivers valid message let m1 = random_message(&mut seq, &topics); deliver_message(&mut gs, 0, m1.clone()); @@ -3639,7 +3650,7 @@ fn test_scoring_p4_valid_message() { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - //message m1 gets validated + // message m1 gets validated gs.report_message_validation_result( &config.message_id(message1), &peers[0], @@ -3659,10 +3670,14 @@ fn test_scoring_p4_invalid_signature() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + // deactivate time in mesh + time_in_mesh_weight: 0.0, + // deactivate first time deliveries + first_message_deliveries_weight: 0.0, + // deactivate message deliveries + mesh_message_deliveries_weight: 0.0, + // deactivate mesh failure penalties + mesh_failure_penalty_weight: 0.0, invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -3672,7 +3687,7 @@ fn test_scoring_p4_invalid_signature() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3685,7 +3700,7 @@ fn test_scoring_p4_invalid_signature() { let mut seq = 0; - //peer 0 delivers message with invalid signature + // peer 0 delivers message with invalid signature let m = random_message(&mut seq, &topics); gs.on_connection_handler_event( @@ -3717,10 +3732,14 @@ fn test_scoring_p4_message_from_self() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + // deactivate time in mesh + time_in_mesh_weight: 0.0, + // deactivate first time deliveries + first_message_deliveries_weight: 0.0, + // deactivate message deliveries + mesh_message_deliveries_weight: 0.0, + // deactivate mesh failure penalties + mesh_failure_penalty_weight: 0.0, invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -3730,7 +3749,7 @@ fn test_scoring_p4_message_from_self() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with two peers + // build mesh with two peers let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3746,7 +3765,7 @@ fn test_scoring_p4_message_from_self() { gs.handle_received_message(msg, &peers[index]); }; - //peer 0 delivers invalid message from self + // peer 0 delivers invalid message from self let mut m = random_message(&mut seq, &topics); m.source = Some(*gs.publish_config.get_own_id().unwrap()); @@ -3767,10 +3786,14 @@ fn test_scoring_p4_ignored_message() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + // deactivate time in mesh + time_in_mesh_weight: 0.0, + // deactivate first time deliveries + first_message_deliveries_weight: 0.0, + // deactivate message deliveries + mesh_message_deliveries_weight: 0.0, + // deactivate mesh failure penalties + mesh_failure_penalty_weight: 0.0, invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -3780,7 +3803,7 @@ fn test_scoring_p4_ignored_message() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with two peers + // build mesh with two peers let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3796,7 +3819,7 @@ fn test_scoring_p4_ignored_message() { gs.handle_received_message(msg, &peers[index]); }; - //peer 0 delivers ignored message + // peer 0 delivers ignored message let m1 = random_message(&mut seq, &topics); deliver_message(&mut gs, 0, m1.clone()); @@ -3805,7 +3828,7 @@ fn test_scoring_p4_ignored_message() { // Transform the inbound message let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); - //message m1 gets ignored + // message m1 gets ignored gs.report_message_validation_result( &config.message_id(message1), &peers[0], @@ -3825,10 +3848,14 @@ fn test_scoring_p4_application_invalidated_message() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + // deactivate time in mesh + time_in_mesh_weight: 0.0, + // deactivate first time deliveries + first_message_deliveries_weight: 0.0, + // deactivate message deliveries + mesh_message_deliveries_weight: 0.0, + // deactivate mesh failure penalties + mesh_failure_penalty_weight: 0.0, invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -3838,7 +3865,7 @@ fn test_scoring_p4_application_invalidated_message() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with two peers + // build mesh with two peers let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3854,7 +3881,7 @@ fn test_scoring_p4_application_invalidated_message() { gs.handle_received_message(msg, &peers[index]); }; - //peer 0 delivers invalid message + // peer 0 delivers invalid message let m1 = random_message(&mut seq, &topics); deliver_message(&mut gs, 0, m1.clone()); @@ -3863,7 +3890,7 @@ fn test_scoring_p4_application_invalidated_message() { // Transform the inbound message let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); - //message m1 gets rejected + // message m1 gets rejected gs.report_message_validation_result( &config.message_id(message1), &peers[0], @@ -3886,10 +3913,14 @@ fn test_scoring_p4_application_invalid_message_from_two_peers() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + // deactivate time in mesh + time_in_mesh_weight: 0.0, + // deactivate first time deliveries + first_message_deliveries_weight: 0.0, + // deactivate message deliveries + mesh_message_deliveries_weight: 0.0, + // deactivate mesh failure penalties + mesh_failure_penalty_weight: 0.0, invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -3899,7 +3930,7 @@ fn test_scoring_p4_application_invalid_message_from_two_peers() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with two peers + // build mesh with two peers let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(2) .topics(vec!["test".into()]) @@ -3915,20 +3946,20 @@ fn test_scoring_p4_application_invalid_message_from_two_peers() { gs.handle_received_message(msg, &peers[index]); }; - //peer 0 delivers invalid message + // peer 0 delivers invalid message let m1 = random_message(&mut seq, &topics); deliver_message(&mut gs, 0, m1.clone()); // Transform the inbound message let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); - //peer 1 delivers same message + // peer 1 delivers same message deliver_message(&mut gs, 1, m1); assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[1]), 0.0); - //message m1 gets rejected + // message m1 gets rejected gs.report_message_validation_result( &config.message_id(message1), &peers[0], @@ -3955,10 +3986,14 @@ fn test_scoring_p4_three_application_invalid_messages() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + // deactivate time in mesh + time_in_mesh_weight: 0.0, + // deactivate first time deliveries + first_message_deliveries_weight: 0.0, + // deactivate message deliveries + mesh_message_deliveries_weight: 0.0, + // deactivate mesh failure penalties + mesh_failure_penalty_weight: 0.0, invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -3968,7 +4003,7 @@ fn test_scoring_p4_three_application_invalid_messages() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3984,7 +4019,7 @@ fn test_scoring_p4_three_application_invalid_messages() { gs.handle_received_message(msg, &peers[index]); }; - //peer 0 delivers two invalid message + // peer 0 delivers two invalid message let m1 = random_message(&mut seq, &topics); let m2 = random_message(&mut seq, &topics); let m3 = random_message(&mut seq, &topics); @@ -4002,7 +4037,7 @@ fn test_scoring_p4_three_application_invalid_messages() { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - //messages gets rejected + // messages gets rejected gs.report_message_validation_result( &config.message_id(message1), &peers[0], @@ -4021,7 +4056,7 @@ fn test_scoring_p4_three_application_invalid_messages() { MessageAcceptance::Reject, ); - //number of invalid messages gets squared + // number of invalid messages gets squared assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 9.0 * -2.0 * 0.7 @@ -4038,10 +4073,14 @@ fn test_scoring_p4_decay() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + // deactivate time in mesh + time_in_mesh_weight: 0.0, + // deactivate first time deliveries + first_message_deliveries_weight: 0.0, + // deactivate message deliveries + mesh_message_deliveries_weight: 0.0, + // deactivate mesh failure penalties + mesh_failure_penalty_weight: 0.0, invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -4051,7 +4090,7 @@ fn test_scoring_p4_decay() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -4067,7 +4106,7 @@ fn test_scoring_p4_decay() { gs.handle_received_message(msg, &peers[index]); }; - //peer 0 delivers invalid message + // peer 0 delivers invalid message let m1 = random_message(&mut seq, &topics); deliver_message(&mut gs, 0, m1.clone()); @@ -4075,7 +4114,7 @@ fn test_scoring_p4_decay() { let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - //message m1 gets rejected + // message m1 gets rejected gs.report_message_validation_result( &config.message_id(message1), &peers[0], @@ -4087,7 +4126,7 @@ fn test_scoring_p4_decay() { -2.0 * 0.7 ); - //we decay + // we decay gs.peer_score.as_mut().unwrap().0.refresh_scores(); // the number of invalids gets decayed to 0.9 and then squared in the score @@ -4104,7 +4143,7 @@ fn test_scoring_p5() { ..PeerScoreParams::default() }; - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, _) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -4141,7 +4180,7 @@ fn test_scoring_p6() { .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) .create_network(); - //create 5 peers with the same ip + // create 5 peers with the same ip let addr = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 3)); let peers = vec![ add_peer_with_addr(&mut gs, &[], false, false, addr.clone()).0, @@ -4151,7 +4190,7 @@ fn test_scoring_p6() { add_peer_with_addr(&mut gs, &[], true, true, addr.clone()).0, ]; - //create 4 other peers with other ip + // create 4 other peers with other ip let addr2 = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 4)); let others = vec![ add_peer_with_addr(&mut gs, &[], false, false, addr2.clone()).0, @@ -4160,12 +4199,12 @@ fn test_scoring_p6() { add_peer_with_addr(&mut gs, &[], true, false, addr2.clone()).0, ]; - //no penalties yet + // no penalties yet for peer in peers.iter().chain(others.iter()) { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 0.0); } - //add additional connection for 3 others with addr + // add additional connection for 3 others with addr for id in others.iter().take(3) { gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id: *id, @@ -4180,14 +4219,14 @@ fn test_scoring_p6() { })); } - //penalties apply squared + // penalties apply squared for peer in peers.iter().chain(others.iter().take(3)) { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); } - //fourth other peer still no penalty + // fourth other peer still no penalty assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&others[3]), 0.0); - //add additional connection for 3 of the peers to addr2 + // add additional connection for 3 of the peers to addr2 for peer in peers.iter().take(3) { gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id: *peer, @@ -4202,7 +4241,7 @@ fn test_scoring_p6() { })); } - //double penalties for the first three of each + // double penalties for the first three of each for peer in peers.iter().take(3).chain(others.iter().take(3)) { assert_eq!( gs.peer_score.as_ref().unwrap().0.score(peer), @@ -4210,7 +4249,7 @@ fn test_scoring_p6() { ); } - //single penalties for the rest + // single penalties for the rest for peer in peers.iter().skip(3) { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); } @@ -4219,7 +4258,7 @@ fn test_scoring_p6() { 4.0 * -2.0 ); - //two times same ip doesn't count twice + // two times same ip doesn't count twice gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id: peers[0], connection_id: ConnectionId::new_unchecked(0), @@ -4232,8 +4271,8 @@ fn test_scoring_p6() { other_established: 2, })); - //nothing changed - //double penalties for the first three of each + // nothing changed + // double penalties for the first three of each for peer in peers.iter().take(3).chain(others.iter().take(3)) { assert_eq!( gs.peer_score.as_ref().unwrap().0.score(peer), @@ -4241,7 +4280,7 @@ fn test_scoring_p6() { ); } - //single penalties for the rest + // single penalties for the rest for peer in peers.iter().skip(3) { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); } @@ -4274,7 +4313,7 @@ fn test_scoring_p7_grafts_before_backoff() { .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) .create_network(); - //remove peers from mesh and send prune to them => this adds a backoff for the peers + // remove peers from mesh and send prune to them => this adds a backoff for the peers for peer in peers.iter().take(2) { gs.mesh.get_mut(&topics[0]).unwrap().remove(peer); gs.send_graft_prune( @@ -4284,31 +4323,31 @@ fn test_scoring_p7_grafts_before_backoff() { ); } - //wait 50 millisecs + // wait 50 millisecs sleep(Duration::from_millis(50)); - //first peer tries to graft + // first peer tries to graft gs.handle_graft(&peers[0], vec![topics[0].clone()]); - //double behaviour penalty for first peer (squared) + // double behaviour penalty for first peer (squared) assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 4.0 * -2.0 ); - //wait 100 millisecs + // wait 100 millisecs sleep(Duration::from_millis(100)); - //second peer tries to graft + // second peer tries to graft gs.handle_graft(&peers[1], vec![topics[0].clone()]); - //single behaviour penalty for second peer + // single behaviour penalty for second peer assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[1]), 1.0 * -2.0 ); - //test decay + // test decay gs.peer_score.as_mut().unwrap().0.refresh_scores(); assert_eq!( @@ -4327,7 +4366,7 @@ fn test_opportunistic_grafting() { .mesh_n_low(3) .mesh_n(5) .mesh_n_high(7) - .mesh_outbound_min(0) //deactivate outbound handling + .mesh_outbound_min(0) // deactivate outbound handling .opportunistic_graft_ticks(2) .opportunistic_graft_peers(2) .build() @@ -4351,30 +4390,30 @@ fn test_opportunistic_grafting() { .scoring(Some((peer_score_params, thresholds))) .create_network(); - //fill mesh with 5 peers + // fill mesh with 5 peers for peer in &peers { gs.handle_graft(peer, topics.clone()); } - //add additional 5 peers + // add additional 5 peers let others: Vec<_> = (0..5) .map(|_| add_peer(&mut gs, &topics, false, false)) .collect(); - //currently mesh equals peers + // currently mesh equals peers assert_eq!(gs.mesh[&topics[0]], peers.iter().cloned().collect()); - //give others high scores (but the first two have not high enough scores) + // give others high scores (but the first two have not high enough scores) for (i, peer) in peers.iter().enumerate().take(5) { gs.set_application_score(peer, 0.0 + i as f64); } - //set scores for peers in the mesh + // set scores for peers in the mesh for (i, (peer, _receiver)) in others.iter().enumerate().take(5) { gs.set_application_score(peer, 0.0 + i as f64); } - //this gives a median of exactly 2.0 => should not apply opportunistic grafting + // this gives a median of exactly 2.0 => should not apply opportunistic grafting gs.heartbeat(); gs.heartbeat(); @@ -4384,10 +4423,10 @@ fn test_opportunistic_grafting() { "should not apply opportunistic grafting" ); - //reduce middle score to 1.0 giving a median of 1.0 + // reduce middle score to 1.0 giving a median of 1.0 gs.set_application_score(&peers[2], 1.0); - //opportunistic grafting after two heartbeats + // opportunistic grafting after two heartbeats gs.heartbeat(); assert_eq!( @@ -4417,17 +4456,17 @@ fn test_opportunistic_grafting() { #[test] fn test_ignore_graft_from_unknown_topic() { - //build gossipsub without subscribing to any topics + // build gossipsub without subscribing to any topics let (mut gs, peers, receivers, _) = inject_nodes1() .peer_no(1) .topics(vec![]) .to_subscribe(false) .create_network(); - //handle an incoming graft for some topic + // handle an incoming graft for some topic gs.handle_graft(&peers[0], vec![Topic::new("test").hash()]); - //assert that no prune got created + // assert that no prune got created let (control_msgs, _) = count_control_msgs(receivers, |_, a| matches!(a, RpcOut::Prune { .. })); assert_eq!( control_msgs, 0, @@ -4438,18 +4477,18 @@ fn test_ignore_graft_from_unknown_topic() { #[test] fn test_ignore_too_many_iwants_from_same_peer_for_same_message() { let config = Config::default(); - //build gossipsub with full mesh + // build gossipsub with full mesh let (mut gs, _, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(false) .create_network(); - //add another peer not in the mesh + // add another peer not in the mesh let (peer, receiver) = add_peer(&mut gs, &topics, false, false); receivers.insert(peer, receiver); - //receive a message + // receive a message let mut seq = 0; let m1 = random_message(&mut seq, &topics); @@ -4460,10 +4499,10 @@ fn test_ignore_too_many_iwants_from_same_peer_for_same_message() { gs.handle_received_message(m1, &PeerId::random()); - //clear events + // clear events let receivers = flush_events(&mut gs, receivers); - //the first gossip_retransimission many iwants return the valid message, all others are + // the first gossip_retransimission many iwants return the valid message, all others are // ignored. for _ in 0..(2 * config.gossip_retransimission() + 10) { gs.handle_iwant(&peer, vec![id.clone()]); @@ -4490,7 +4529,7 @@ fn test_ignore_too_many_ihaves() { .max_ihave_messages(10) .build() .unwrap(); - //build gossipsub with full mesh + // build gossipsub with full mesh let (mut gs, _, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) @@ -4498,15 +4537,15 @@ fn test_ignore_too_many_ihaves() { .gs_config(config.clone()) .create_network(); - //add another peer not in the mesh + // add another peer not in the mesh let (peer, receiver) = add_peer(&mut gs, &topics, false, false); receivers.insert(peer, receiver); - //peer has 20 messages + // peer has 20 messages let mut seq = 0; let messages: Vec<_> = (0..20).map(|_| random_message(&mut seq, &topics)).collect(); - //peer sends us one ihave for each message in order + // peer sends us one ihave for each message in order for raw_message in &messages { // Transform the inbound message let message = &gs @@ -4527,7 +4566,7 @@ fn test_ignore_too_many_ihaves() { .map(|m| config.message_id(&m)) .collect(); - //we send iwant only for the first 10 messages + // we send iwant only for the first 10 messages let (control_msgs, receivers) = count_control_msgs(receivers, |p, action| { p == &peer && matches!(action, RpcOut::IWant(IWant { message_ids }) if message_ids.len() == 1 && first_ten.contains(&message_ids[0])) @@ -4537,7 +4576,7 @@ fn test_ignore_too_many_ihaves() { "exactly the first ten ihaves should be processed and one iwant for each created" ); - //after a heartbeat everything is forgotten + // after a heartbeat everything is forgotten gs.heartbeat(); for raw_message in messages[10..].iter() { @@ -4553,7 +4592,7 @@ fn test_ignore_too_many_ihaves() { ); } - //we sent iwant for all 10 messages + // we sent iwant for all 10 messages let (control_msgs, _) = count_control_msgs(receivers, |p, action| { p == &peer && matches!(action, RpcOut::IWant(IWant { message_ids }) if message_ids.len() == 1) @@ -4568,7 +4607,7 @@ fn test_ignore_too_many_messages_in_ihave() { .max_ihave_length(10) .build() .unwrap(); - //build gossipsub with full mesh + // build gossipsub with full mesh let (mut gs, _, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) @@ -4576,19 +4615,19 @@ fn test_ignore_too_many_messages_in_ihave() { .gs_config(config.clone()) .create_network(); - //add another peer not in the mesh + // add another peer not in the mesh let (peer, receiver) = add_peer(&mut gs, &topics, false, false); receivers.insert(peer, receiver); - //peer has 20 messages + // peer has 30 messages let mut seq = 0; - let message_ids: Vec<_> = (0..20) + let message_ids: Vec<_> = (0..30) .map(|_| random_message(&mut seq, &topics)) .map(|msg| gs.data_transform.inbound_transform(msg).unwrap()) .map(|msg| config.message_id(&msg)) .collect(); - //peer sends us three ihaves + // peer sends us three ihaves gs.handle_ihave(&peer, vec![(topics[0].clone(), message_ids[0..8].to_vec())]); gs.handle_ihave( &peer, @@ -4601,7 +4640,7 @@ fn test_ignore_too_many_messages_in_ihave() { let first_twelve: HashSet<_> = message_ids.iter().take(12).collect(); - //we send iwant only for the first 10 messages + // we send iwant only for the first 10 messages let mut sum = 0; let (control_msgs, receivers) = count_control_msgs(receivers, |p, rpc| match rpc { RpcOut::IWant(IWant { message_ids }) => { @@ -4620,14 +4659,14 @@ fn test_ignore_too_many_messages_in_ihave() { assert_eq!(sum, 10, "exactly the first ten ihaves should be processed"); - //after a heartbeat everything is forgotten + // after a heartbeat everything is forgotten gs.heartbeat(); gs.handle_ihave( &peer, - vec![(topics[0].clone(), message_ids[10..20].to_vec())], + vec![(topics[0].clone(), message_ids[20..30].to_vec())], ); - //we sent 10 iwant messages ids via a IWANT rpc. + // we sent 10 iwant messages ids via a IWANT rpc. let mut sum = 0; let (control_msgs, _) = count_control_msgs(receivers, |p, rpc| match rpc { RpcOut::IWant(IWant { message_ids }) => { @@ -4649,7 +4688,7 @@ fn test_limit_number_of_message_ids_inside_ihave() { .max_ihave_length(100) .build() .unwrap(); - //build gossipsub with full mesh + // build gossipsub with full mesh let (mut gs, peers, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) @@ -4657,24 +4696,24 @@ fn test_limit_number_of_message_ids_inside_ihave() { .gs_config(config) .create_network(); - //graft to all peers to really fill the mesh with all the peers + // graft to all peers to really fill the mesh with all the peers for peer in peers { gs.handle_graft(&peer, topics.clone()); } - //add two other peers not in the mesh + // add two other peers not in the mesh let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); receivers.insert(p1, receiver1); let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); receivers.insert(p2, receiver2); - //receive 200 messages from another peer + // receive 200 messages from another peer let mut seq = 0; for _ in 0..200 { gs.handle_received_message(random_message(&mut seq, &topics), &PeerId::random()); } - //emit gossip + // emit gossip gs.emit_gossip(); // both peers should have gotten 100 random ihave messages, to assert the randomness, we @@ -4727,12 +4766,7 @@ fn test_limit_number_of_message_ids_inside_ihave() { #[test] fn test_iwant_penalties() { - /* - use tracing_subscriber::EnvFilter; - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); - */ + libp2p_test_utils::with_default_env_filter(); let config = ConfigBuilder::default() .iwant_followup_time(Duration::from_secs(4)) .build() @@ -4862,7 +4896,7 @@ fn test_publish_to_floodsub_peers_without_flood_publish() { .gs_config(config) .create_network(); - //add two floodsub peer, one explicit, one implicit + // add two floodsub peer, one explicit, one implicit let (p1, receiver1) = add_peer_with_addr_and_kind( &mut gs, &topics, @@ -4877,10 +4911,10 @@ fn test_publish_to_floodsub_peers_without_flood_publish() { add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); receivers.insert(p2, receiver2); - //p1 and p2 are not in the mesh + // p1 and p2 are not in the mesh assert!(!gs.mesh[&topics[0]].contains(&p1) && !gs.mesh[&topics[0]].contains(&p2)); - //publish a message + // publish a message let publish_data = vec![0; 42]; gs.publish(Topic::new("test"), publish_data).unwrap(); @@ -4921,7 +4955,7 @@ fn test_do_not_use_floodsub_in_fanout() { let topic = Topic::new("test"); let topics = vec![topic.hash()]; - //add two floodsub peer, one explicit, one implicit + // add two floodsub peer, one explicit, one implicit let (p1, receiver1) = add_peer_with_addr_and_kind( &mut gs, &topics, @@ -4936,7 +4970,7 @@ fn test_do_not_use_floodsub_in_fanout() { add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); receivers.insert(p2, receiver2); - //publish a message + // publish a message let publish_data = vec![0; 42]; gs.publish(Topic::new("test"), publish_data).unwrap(); @@ -4977,7 +5011,7 @@ fn test_dont_add_floodsub_peers_to_mesh_on_join() { let topic = Topic::new("test"); let topics = vec![topic.hash()]; - //add two floodsub peer, one explicit, one implicit + // add two floodsub peer, one explicit, one implicit let _p1 = add_peer_with_addr_and_kind( &mut gs, &topics, @@ -5004,7 +5038,7 @@ fn test_dont_send_px_to_old_gossipsub_peers() { .to_subscribe(false) .create_network(); - //add an old gossipsub peer + // add an old gossipsub peer let (p1, _receiver1) = add_peer_with_addr_and_kind( &mut gs, &topics, @@ -5014,14 +5048,14 @@ fn test_dont_send_px_to_old_gossipsub_peers() { Some(PeerKind::Gossipsub), ); - //prune the peer + // prune the peer gs.send_graft_prune( HashMap::new(), vec![(p1, topics.clone())].into_iter().collect(), HashSet::new(), ); - //check that prune does not contain px + // check that prune does not contain px let (control_msgs, _) = count_control_msgs(receivers, |_, m| match m { RpcOut::Prune(Prune { peers: px, .. }) => !px.is_empty(), _ => false, @@ -5031,14 +5065,14 @@ fn test_dont_send_px_to_old_gossipsub_peers() { #[test] fn test_dont_send_floodsub_peers_in_px() { - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) .create_network(); - //add two floodsub peers + // add two floodsub peers let _p1 = add_peer_with_addr_and_kind( &mut gs, &topics, @@ -5049,14 +5083,14 @@ fn test_dont_send_floodsub_peers_in_px() { ); let _p2 = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); - //prune only mesh node + // prune only mesh node gs.send_graft_prune( HashMap::new(), vec![(peers[0], topics.clone())].into_iter().collect(), HashSet::new(), ); - //check that px in prune message is empty + // check that px in prune message is empty let (control_msgs, _) = count_control_msgs(receivers, |_, m| match m { RpcOut::Prune(Prune { peers: px, .. }) => !px.is_empty(), _ => false, @@ -5072,7 +5106,7 @@ fn test_dont_add_floodsub_peers_to_mesh_in_heartbeat() { .to_subscribe(false) .create_network(); - //add two floodsub peer, one explicit, one implicit + // add two floodsub peer, one explicit, one implicit let _p1 = add_peer_with_addr_and_kind( &mut gs, &topics, @@ -5139,7 +5173,7 @@ fn test_subscribe_to_invalid_topic() { #[test] fn test_subscribe_and_graft_with_negative_score() { - //simulate a communication between two gossipsub instances + // simulate a communication between two gossipsub instances let (mut gs1, _, _, topic_hashes) = inject_nodes1() .topics(vec!["test".into()]) .scoring(Some(( @@ -5157,12 +5191,12 @@ fn test_subscribe_and_graft_with_negative_score() { let (p2, _receiver1) = add_peer(&mut gs1, &Vec::new(), true, false); let (p1, _receiver2) = add_peer(&mut gs2, &topic_hashes, false, false); - //add penalty to peer p2 + // add penalty to peer p2 gs1.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); let original_score = gs1.peer_score.as_ref().unwrap().0.score(&p2); - //subscribe to topic in gs2 + // subscribe to topic in gs2 gs2.subscribe(&topic).unwrap(); let forward_messages_to_p1 = |gs1: &mut Behaviour<_, _>, @@ -5191,17 +5225,17 @@ fn test_subscribe_and_graft_with_negative_score() { new_receivers }; - //forward the subscribe message + // forward the subscribe message let receivers = forward_messages_to_p1(&mut gs1, p1, p2, connection_id, receivers); - //heartbeats on both + // heartbeats on both gs1.heartbeat(); gs2.heartbeat(); - //forward messages again + // forward messages again forward_messages_to_p1(&mut gs1, p1, p2, connection_id, receivers); - //nobody got penalized + // nobody got penalized assert!(gs1.peer_score.as_ref().unwrap().0.score(&p2) >= original_score); } @@ -5237,6 +5271,236 @@ fn test_graft_without_subscribe() { let _ = gs.unsubscribe(&Topic::new(topic)); } +/// Test that a node sends IDONTWANT messages to the mesh peers +/// that run Gossipsub v1.2. +#[test] +fn sends_idontwant() { + let (mut gs, peers, receivers, topic_hashes) = inject_nodes1() + .peer_no(5) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(1) + .peer_kind(PeerKind::Gossipsubv1_2) + .create_network(); + + let local_id = PeerId::random(); + + let message = RawMessage { + source: Some(peers[1]), + data: vec![12u8; 1024], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; + gs.handle_received_message(message.clone(), &local_id); + assert_eq!( + receivers + .into_iter() + .fold(0, |mut idontwants, (peer_id, c)| { + let non_priority = c.non_priority.get_ref(); + while !non_priority.is_empty() { + if let Ok(RpcOut::IDontWant(_)) = non_priority.try_recv() { + assert_ne!(peer_id, peers[1]); + idontwants += 1; + } + } + idontwants + }), + 3, + "IDONTWANT was not sent" + ); +} + +#[test] +fn doesnt_sends_idontwant_for_lower_message_size() { + let (mut gs, peers, receivers, topic_hashes) = inject_nodes1() + .peer_no(5) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(1) + .peer_kind(PeerKind::Gossipsubv1_2) + .create_network(); + + let local_id = PeerId::random(); + + let message = RawMessage { + source: Some(peers[1]), + data: vec![12], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; + + gs.handle_received_message(message.clone(), &local_id); + assert_eq!( + receivers + .into_iter() + .fold(0, |mut idontwants, (peer_id, c)| { + let non_priority = c.non_priority.get_ref(); + while !non_priority.is_empty() { + if let Ok(RpcOut::IDontWant(_)) = non_priority.try_recv() { + assert_ne!(peer_id, peers[1]); + idontwants += 1; + } + } + idontwants + }), + 0, + "IDONTWANT was sent" + ); +} + +/// Test that a node doesn't send IDONTWANT messages to the mesh peers +/// that don't run Gossipsub v1.2. +#[test] +fn doesnt_send_idontwant() { + let (mut gs, peers, receivers, topic_hashes) = inject_nodes1() + .peer_no(5) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(1) + .peer_kind(PeerKind::Gossipsubv1_1) + .create_network(); + + let local_id = PeerId::random(); + + let message = RawMessage { + source: Some(peers[1]), + data: vec![12], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; + gs.handle_received_message(message.clone(), &local_id); + assert_eq!( + receivers + .into_iter() + .fold(0, |mut idontwants, (peer_id, c)| { + let non_priority = c.non_priority.get_ref(); + while !non_priority.is_empty() { + if matches!(non_priority.try_recv(), Ok(RpcOut::IDontWant(_)) if peer_id != peers[1]) { + idontwants += 1; + } + } + idontwants + }), + 0, + "IDONTWANT were sent" + ); +} + +/// Test that a node doesn't forward a messages to the mesh peers +/// that sent IDONTWANT. +#[test] +fn doesnt_forward_idontwant() { + let (mut gs, peers, receivers, topic_hashes) = inject_nodes1() + .peer_no(4) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(1) + .peer_kind(PeerKind::Gossipsubv1_2) + .create_network(); + + let local_id = PeerId::random(); + + let raw_message = RawMessage { + source: Some(peers[1]), + data: vec![12], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; + let message = gs + .data_transform + .inbound_transform(raw_message.clone()) + .unwrap(); + let message_id = gs.config.message_id(&message); + let peer = gs.connected_peers.get_mut(&peers[2]).unwrap(); + peer.dont_send.insert(message_id, Instant::now()); + + gs.handle_received_message(raw_message.clone(), &local_id); + assert_eq!( + receivers.into_iter().fold(0, |mut fwds, (peer_id, c)| { + let non_priority = c.non_priority.get_ref(); + while !non_priority.is_empty() { + if let Ok(RpcOut::Forward { .. }) = non_priority.try_recv() { + assert_ne!(peer_id, peers[2]); + fwds += 1; + } + } + fwds + }), + 2, + "IDONTWANT was not sent" + ); +} + +/// Test that a node parses an +/// IDONTWANT message to the respective peer. +#[test] +fn parses_idontwant() { + let (mut gs, peers, _receivers, _topic_hashes) = inject_nodes1() + .peer_no(2) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(1) + .peer_kind(PeerKind::Gossipsubv1_2) + .create_network(); + + let message_id = MessageId::new(&[0, 1, 2, 3]); + let rpc = Rpc { + messages: vec![], + subscriptions: vec![], + control_msgs: vec![ControlAction::IDontWant(IDontWant { + message_ids: vec![message_id.clone()], + })], + }; + gs.on_connection_handler_event( + peers[1], + ConnectionId::new_unchecked(0), + HandlerEvent::Message { + rpc, + invalid_messages: vec![], + }, + ); + let peer = gs.connected_peers.get_mut(&peers[1]).unwrap(); + assert!(peer.dont_send.get(&message_id).is_some()); +} + +/// Test that a node clears stale IDONTWANT messages. +#[test] +fn clear_stale_idontwant() { + let (mut gs, peers, _receivers, _topic_hashes) = inject_nodes1() + .peer_no(4) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(1) + .peer_kind(PeerKind::Gossipsubv1_2) + .create_network(); + + let peer = gs.connected_peers.get_mut(&peers[2]).unwrap(); + peer.dont_send + .insert(MessageId::new(&[1, 2, 3, 4]), Instant::now()); + std::thread::sleep(Duration::from_secs(3)); + gs.heartbeat(); + let peer = gs.connected_peers.get_mut(&peers[2]).unwrap(); + assert!(peer.dont_send.is_empty()); +} + #[test] fn test_all_queues_full() { let gs_config = ConfigBuilder::default() @@ -5260,6 +5524,7 @@ fn test_all_queues_full() { connections: vec![ConnectionId::new_unchecked(0)], topics: topics.clone(), sender: Sender::new(2), + dont_send: LinkedHashMap::new(), }, ); @@ -5294,6 +5559,7 @@ fn test_slow_peer_returns_failed_publish() { connections: vec![ConnectionId::new_unchecked(0)], topics: topics.clone(), sender: Sender::new(2), + dont_send: LinkedHashMap::new(), }, ); let peer_id = PeerId::random(); @@ -5305,6 +5571,7 @@ fn test_slow_peer_returns_failed_publish() { connections: vec![ConnectionId::new_unchecked(0)], topics: topics.clone(), sender: Sender::new(gs.config.connection_handler_queue_len()), + dont_send: LinkedHashMap::new(), }, ); @@ -5357,7 +5624,6 @@ fn test_slow_peer_returns_failed_ihave_handling() { topics.insert(topic_hash.clone()); let slow_peer_id = PeerId::random(); - peers.push(slow_peer_id); gs.connected_peers.insert( slow_peer_id, PeerConnections { @@ -5365,6 +5631,7 @@ fn test_slow_peer_returns_failed_ihave_handling() { connections: vec![ConnectionId::new_unchecked(0)], topics: topics.clone(), sender: Sender::new(2), + dont_send: LinkedHashMap::new(), }, ); peers.push(slow_peer_id); @@ -5380,9 +5647,11 @@ fn test_slow_peer_returns_failed_ihave_handling() { connections: vec![ConnectionId::new_unchecked(0)], topics: topics.clone(), sender: Sender::new(gs.config.connection_handler_queue_len()), + dont_send: LinkedHashMap::new(), }, ); + // First message. let publish_data = vec![1; 59]; let transformed = gs .data_transform @@ -5402,6 +5671,22 @@ fn test_slow_peer_returns_failed_ihave_handling() { &slow_peer_id, vec![(topic_hash.clone(), vec![msg_id.clone()])], ); + + // Second message. + let publish_data = vec![2; 59]; + let transformed = gs + .data_transform + .outbound_transform(&topic_hash, publish_data.clone()) + .unwrap(); + let raw_message = gs + .build_raw_message(topic_hash.clone(), transformed) + .unwrap(); + let msg_id = gs.config.message_id(&Message { + source: raw_message.source, + data: publish_data, + sequence_number: raw_message.sequence_number, + topic: raw_message.topic.clone(), + }); gs.handle_ihave(&slow_peer_id, vec![(topic_hash, vec![msg_id.clone()])]); gs.heartbeat(); @@ -5458,6 +5743,7 @@ fn test_slow_peer_returns_failed_iwant_handling() { connections: vec![ConnectionId::new_unchecked(0)], topics: topics.clone(), sender: Sender::new(2), + dont_send: LinkedHashMap::new(), }, ); peers.push(slow_peer_id); @@ -5473,6 +5759,7 @@ fn test_slow_peer_returns_failed_iwant_handling() { connections: vec![ConnectionId::new_unchecked(0)], topics: topics.clone(), sender: Sender::new(gs.config.connection_handler_queue_len()), + dont_send: LinkedHashMap::new(), }, ); @@ -5548,6 +5835,7 @@ fn test_slow_peer_returns_failed_forward() { connections: vec![ConnectionId::new_unchecked(0)], topics: topics.clone(), sender: Sender::new(2), + dont_send: LinkedHashMap::new(), }, ); peers.push(slow_peer_id); @@ -5563,6 +5851,7 @@ fn test_slow_peer_returns_failed_forward() { connections: vec![ConnectionId::new_unchecked(0)], topics: topics.clone(), sender: Sender::new(gs.config.connection_handler_queue_len()), + dont_send: LinkedHashMap::new(), }, ); @@ -5643,6 +5932,7 @@ fn test_slow_peer_is_downscored_on_publish() { connections: vec![ConnectionId::new_unchecked(0)], topics: topics.clone(), sender: Sender::new(2), + dont_send: LinkedHashMap::new(), }, ); gs.peer_score.as_mut().unwrap().0.add_peer(slow_peer_id); @@ -5655,6 +5945,7 @@ fn test_slow_peer_is_downscored_on_publish() { connections: vec![ConnectionId::new_unchecked(0)], topics: topics.clone(), sender: Sender::new(gs.config.connection_handler_queue_len()), + dont_send: LinkedHashMap::new(), }, ); diff --git a/protocols/gossipsub/src/config.rs b/protocols/gossipsub/src/config.rs index 6e7861bae10..3b0eeafcbb6 100644 --- a/protocols/gossipsub/src/config.rs +++ b/protocols/gossipsub/src/config.rs @@ -18,22 +18,22 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::borrow::Cow; -use std::sync::Arc; -use std::time::Duration; - -use crate::error::ConfigBuilderError; -use crate::protocol::{ProtocolConfig, ProtocolId, FLOODSUB_PROTOCOL}; -use crate::types::{Message, MessageId, PeerKind}; +use std::{borrow::Cow, sync::Arc, time::Duration}; use libp2p_identity::PeerId; use libp2p_swarm::StreamProtocol; +use crate::{ + error::ConfigBuilderError, + protocol::{ProtocolConfig, ProtocolId, FLOODSUB_PROTOCOL}, + types::{Message, MessageId, PeerKind}, +}; + /// The types of message validation that can be employed by gossipsub. #[derive(Debug, Clone)] pub enum ValidationMode { - /// This is the default setting. This requires the message author to be a valid [`PeerId`] and to - /// be present as well as the sequence number. All messages must have valid signatures. + /// This is the default setting. This requires the message author to be a valid [`PeerId`] and + /// to be present as well as the sequence number. All messages must have valid signatures. /// /// NOTE: This setting will reject messages from nodes using /// [`crate::behaviour::MessageAuthenticity::Anonymous`] and all messages that do not have @@ -98,6 +98,8 @@ pub struct Config { connection_handler_queue_len: usize, connection_handler_publish_duration: Duration, connection_handler_forward_duration: Duration, + idontwant_message_size_threshold: usize, + idontwant_on_publish: bool, } impl Config { @@ -134,8 +136,8 @@ impl Config { /// Affects how peers are selected when pruning a mesh due to over subscription. /// - /// At least `retain_scores` of the retained peers will be high-scoring, while the remainder are - /// chosen randomly (D_score in the spec, default is 4). + /// At least `retain_scores` of the retained peers will be high-scoring, while the remainder + /// are chosen randomly (D_score in the spec, default is 4). pub fn retain_scores(&self) -> usize { self.retain_scores } @@ -371,6 +373,23 @@ impl Config { pub fn forward_queue_duration(&self) -> Duration { self.connection_handler_forward_duration } + + /// The message size threshold for which IDONTWANT messages are sent. + /// Sending IDONTWANT messages for small messages can have a negative effect to the overall + /// traffic and CPU load. This acts as a lower bound cutoff for the message size to which + /// IDONTWANT won't be sent to peers. Only works if the peers support Gossipsub1.2 + /// (see ) + /// default is 1kB + pub fn idontwant_message_size_threshold(&self) -> usize { + self.idontwant_message_size_threshold + } + + /// Send IDONTWANT messages after publishing message on gossip. This is an optimisation + /// to avoid bandwidth consumption by downloading the published message over gossip. + /// By default it is false. + pub fn idontwant_on_publish(&self) -> bool { + self.idontwant_on_publish + } } impl Default for Config { @@ -423,7 +442,9 @@ impl Default for ConfigBuilder { }), allow_self_origin: false, do_px: false, - prune_peers: 0, // NOTE: Increasing this currently has little effect until Signed records are implemented. + // NOTE: Increasing this currently has little effect until Signed + // records are implemented. + prune_peers: 0, prune_backoff: Duration::from_secs(60), unsubscribe_backoff: Duration::from_secs(10), backoff_slack: 1, @@ -441,6 +462,8 @@ impl Default for ConfigBuilder { connection_handler_queue_len: 5000, connection_handler_publish_duration: Duration::from_secs(5), connection_handler_forward_duration: Duration::from_secs(1), + idontwant_message_size_threshold: 1000, + idontwant_on_publish: false, }, invalid_protocol: false, } @@ -457,7 +480,8 @@ impl From for ConfigBuilder { } impl ConfigBuilder { - /// The protocol id prefix to negotiate this protocol (default is `/meshsub/1.1.0` and `/meshsub/1.0.0`). + /// The protocol id prefix to negotiate this protocol (default is `/meshsub/1.1.0` and + /// `/meshsub/1.0.0`). pub fn protocol_id_prefix( &mut self, protocol_id_prefix: impl Into>, @@ -547,8 +571,8 @@ impl ConfigBuilder { /// Affects how peers are selected when pruning a mesh due to over subscription. /// - /// At least [`Self::retain_scores`] of the retained peers will be high-scoring, while the remainder are - /// chosen randomly (D_score in the spec, default is 4). + /// At least [`Self::retain_scores`] of the retained peers will be high-scoring, while the + /// remainder are chosen randomly (D_score in the spec, default is 4). pub fn retain_scores(&mut self, retain_scores: usize) -> &mut Self { self.config.retain_scores = retain_scores; self @@ -826,6 +850,25 @@ impl ConfigBuilder { self } + /// The message size threshold for which IDONTWANT messages are sent. + /// Sending IDONTWANT messages for small messages can have a negative effect to the overall + /// traffic and CPU load. This acts as a lower bound cutoff for the message size to which + /// IDONTWANT won't be sent to peers. Only works if the peers support Gossipsub1.2 + /// (see ) + /// default is 1kB + pub fn idontwant_message_size_threshold(&mut self, size: usize) -> &mut Self { + self.config.idontwant_message_size_threshold = size; + self + } + + /// Send IDONTWANT messages after publishing message on gossip. This is an optimisation + /// to avoid bandwidth consumption by downloading the published message over gossip. + /// By default it is false. + pub fn idontwant_on_publish(&mut self, idontwant_on_publish: bool) -> &mut Self { + self.config.idontwant_on_publish = idontwant_on_publish; + self + } + /// Constructs a [`Config`] from the given configuration and validates the settings. pub fn build(&self) -> Result { // check all constraints on config @@ -896,18 +939,26 @@ impl std::fmt::Debug for Config { "published_message_ids_cache_time", &self.published_message_ids_cache_time, ); + let _ = builder.field( + "idontwant_message_size_threhold", + &self.idontwant_message_size_threshold, + ); + let _ = builder.field("idontwant_on_publish", &self.idontwant_on_publish); builder.finish() } } #[cfg(test)] mod test { - use super::*; - use crate::topic::IdentityHash; - use crate::Topic; + use std::{ + collections::hash_map::DefaultHasher, + hash::{Hash, Hasher}, + }; + use libp2p_core::UpgradeInfo; - use std::collections::hash_map::DefaultHasher; - use std::hash::{Hash, Hasher}; + + use super::*; + use crate::{topic::IdentityHash, Topic}; #[test] fn create_config_with_message_id_as_plain_function() { diff --git a/protocols/gossipsub/src/error.rs b/protocols/gossipsub/src/error.rs index 047d50f2338..eae4c51214e 100644 --- a/protocols/gossipsub/src/error.rs +++ b/protocols/gossipsub/src/error.rs @@ -36,8 +36,8 @@ pub enum PublishError { MessageTooLarge, /// The compression algorithm failed. TransformFailed(std::io::Error), - /// Messages could not be sent because the queues for all peers were full. The usize represents the - /// number of peers that were attempted. + /// Messages could not be sent because the queues for all peers were full. The usize represents + /// the number of peers that were attempted. AllQueuesFull(usize), } diff --git a/protocols/gossipsub/src/generated/gossipsub/pb.rs b/protocols/gossipsub/src/generated/gossipsub/pb.rs index 9a074fd61fc..24ac80d2755 100644 --- a/protocols/gossipsub/src/generated/gossipsub/pb.rs +++ b/protocols/gossipsub/src/generated/gossipsub/pb.rs @@ -154,6 +154,7 @@ pub struct ControlMessage { pub iwant: Vec, pub graft: Vec, pub prune: Vec, + pub idontwant: Vec, } impl<'a> MessageRead<'a> for ControlMessage { @@ -165,6 +166,7 @@ impl<'a> MessageRead<'a> for ControlMessage { Ok(18) => msg.iwant.push(r.read_message::(bytes)?), Ok(26) => msg.graft.push(r.read_message::(bytes)?), Ok(34) => msg.prune.push(r.read_message::(bytes)?), + Ok(42) => msg.idontwant.push(r.read_message::(bytes)?), Ok(t) => { r.read_unknown(bytes, t)?; } Err(e) => return Err(e), } @@ -180,6 +182,7 @@ impl MessageWrite for ControlMessage { + self.iwant.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + self.graft.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + self.prune.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + + self.idontwant.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() } fn write_message(&self, w: &mut Writer) -> Result<()> { @@ -187,6 +190,7 @@ impl MessageWrite for ControlMessage { for s in &self.iwant { w.write_with_tag(18, |w| w.write_message(s))?; } for s in &self.graft { w.write_with_tag(26, |w| w.write_message(s))?; } for s in &self.prune { w.write_with_tag(34, |w| w.write_message(s))?; } + for s in &self.idontwant { w.write_with_tag(42, |w| w.write_message(s))?; } Ok(()) } } @@ -331,6 +335,38 @@ impl MessageWrite for ControlPrune { } } +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct ControlIDontWant { + pub message_ids: Vec>, +} + +impl<'a> MessageRead<'a> for ControlIDontWant { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.message_ids.push(r.read_bytes(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for ControlIDontWant { + fn get_size(&self) -> usize { + 0 + + self.message_ids.iter().map(|s| 1 + sizeof_len((s).len())).sum::() + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + for s in &self.message_ids { w.write_with_tag(10, |w| w.write_bytes(&**s))?; } + Ok(()) + } +} + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Debug, Default, PartialEq, Clone)] pub struct PeerInfo { diff --git a/protocols/gossipsub/src/generated/rpc.proto b/protocols/gossipsub/src/generated/rpc.proto index 2ce12f3f37f..fe4d3bc9366 100644 --- a/protocols/gossipsub/src/generated/rpc.proto +++ b/protocols/gossipsub/src/generated/rpc.proto @@ -19,8 +19,8 @@ message Message { optional bytes data = 2; optional bytes seqno = 3; required string topic = 4; - optional bytes signature = 5; - optional bytes key = 6; + optional bytes signature = 5; + optional bytes key = 6; } message ControlMessage { @@ -28,6 +28,7 @@ message ControlMessage { repeated ControlIWant iwant = 2; repeated ControlGraft graft = 3; repeated ControlPrune prune = 4; + repeated ControlIDontWant idontwant = 5; } message ControlIHave { @@ -49,6 +50,10 @@ message ControlPrune { optional uint64 backoff = 3; // gossipsub v1.1 backoff time (in seconds) } +message ControlIDontWant { + repeated bytes message_ids = 1; +} + message PeerInfo { optional bytes peer_id = 1; optional bytes signed_peer_record = 2; diff --git a/protocols/gossipsub/src/gossip_promises.rs b/protocols/gossipsub/src/gossip_promises.rs index bdf58b74fc2..284ba7cab01 100644 --- a/protocols/gossipsub/src/gossip_promises.rs +++ b/protocols/gossipsub/src/gossip_promises.rs @@ -18,13 +18,13 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::peer_score::RejectReason; -use crate::MessageId; -use crate::ValidationError; -use libp2p_identity::PeerId; use std::collections::HashMap; + +use libp2p_identity::PeerId; use web_time::Instant; +use crate::{peer_score::RejectReason, MessageId, ValidationError}; + /// Tracks recently sent `IWANT` messages and checks if peers respond to them. #[derive(Default)] pub(crate) struct GossipPromises { @@ -41,6 +41,14 @@ impl GossipPromises { self.promises.contains_key(message) } + /// Get the peers we sent IWANT the input message id. + pub(crate) fn peers_for_message(&self, message_id: &MessageId) -> Vec { + self.promises + .get(message_id) + .map(|peers| peers.keys().copied().collect()) + .unwrap_or_default() + } + /// Track a promise to deliver a message from a list of [`MessageId`]s we are requesting. pub(crate) fn add_promise(&mut self, peer: PeerId, messages: &[MessageId], expires: Instant) { for message_id in messages { diff --git a/protocols/gossipsub/src/handler.rs b/protocols/gossipsub/src/handler.rs index 5f9669c02c2..f93e993a854 100644 --- a/protocols/gossipsub/src/handler.rs +++ b/protocols/gossipsub/src/handler.rs @@ -18,27 +18,31 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::protocol::{GossipsubCodec, ProtocolConfig}; -use crate::rpc::Receiver; -use crate::rpc_proto::proto; -use crate::types::{PeerKind, RawMessage, Rpc, RpcOut}; -use crate::ValidationError; -use asynchronous_codec::Framed; -use futures::future::Either; -use futures::prelude::*; -use futures::StreamExt; -use libp2p_core::upgrade::DeniedUpgrade; -use libp2p_swarm::handler::{ - ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol, -}; -use libp2p_swarm::Stream; use std::{ pin::Pin, task::{Context, Poll}, }; + +use asynchronous_codec::Framed; +use futures::{future::Either, prelude::*, StreamExt}; +use libp2p_core::upgrade::DeniedUpgrade; +use libp2p_swarm::{ + handler::{ + ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, + FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol, + }, + Stream, +}; use web_time::Instant; +use crate::{ + protocol::{GossipsubCodec, ProtocolConfig}, + rpc::Receiver, + rpc_proto::proto, + types::{PeerKind, RawMessage, Rpc, RpcOut}, + ValidationError, +}; + /// The event emitted by the Handler. This informs the behaviour of various events created /// by the handler. #[derive(Debug)] @@ -111,7 +115,6 @@ pub struct EnabledHandler { peer_kind: Option, /// Keeps track on whether we have sent the peer kind to the behaviour. - // // NOTE: Use this flag rather than checking the substream count each poll. peer_kind_sent: bool, @@ -195,7 +198,6 @@ impl EnabledHandler { &mut self, FullyNegotiatedOutbound { protocol, .. }: FullyNegotiatedOutbound< ::OutboundProtocol, - ::OutboundOpenInfo, >, ) { let (substream, peer_kind) = protocol; @@ -218,7 +220,7 @@ impl EnabledHandler { ) -> Poll< ConnectionHandlerEvent< ::OutboundProtocol, - ::OutboundOpenInfo, + (), ::ToBehaviour, >, > { @@ -226,7 +228,7 @@ impl EnabledHandler { if let Some(peer_kind) = self.peer_kind.as_ref() { self.peer_kind_sent = true; return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - HandlerEvent::PeerKind(peer_kind.clone()), + HandlerEvent::PeerKind(*peer_kind), )); } } @@ -424,7 +426,7 @@ impl ConnectionHandler for Handler { type OutboundOpenInfo = (); type OutboundProtocol = ProtocolConfig; - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { match self { Handler::Enabled(handler) => { SubstreamProtocol::new(either::Either::Left(handler.listen_protocol.clone()), ()) @@ -459,9 +461,7 @@ impl ConnectionHandler for Handler { fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent, - > { + ) -> Poll> { match self { Handler::Enabled(handler) => handler.poll(cx), Handler::Disabled(DisabledHandler::ProtocolUnsupported { peer_kind_sent }) => { @@ -480,12 +480,7 @@ impl ConnectionHandler for Handler { fn on_connection_event( &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + event: ConnectionEvent, ) { match self { Handler::Enabled(handler) => { diff --git a/protocols/gossipsub/src/lib.rs b/protocols/gossipsub/src/lib.rs index f6a51da4a51..87db1b771d1 100644 --- a/protocols/gossipsub/src/lib.rs +++ b/protocols/gossipsub/src/lib.rs @@ -43,22 +43,23 @@ //! implementations, due to undefined elements in the current specification. //! //! - **Topics** - In gossipsub, topics configurable by the `hash_topics` configuration parameter. -//! Topics are of type [`TopicHash`]. The current go implementation uses raw utf-8 strings, and this -//! is default configuration in rust-libp2p. Topics can be hashed (SHA256 hashed then base64 +//! Topics are of type [`TopicHash`]. The current go implementation uses raw utf-8 strings, and +//! this is default configuration in rust-libp2p. Topics can be hashed (SHA256 hashed then base64 //! encoded) by setting the `hash_topics` configuration parameter to true. //! //! - **Sequence Numbers** - A message on the gossipsub network is identified by the source -//! [`PeerId`](libp2p_identity::PeerId) and a nonce (sequence number) of the message. The sequence numbers in -//! this implementation are sent as raw bytes across the wire. They are 64-bit big-endian unsigned -//! integers. When messages are signed, they are monotonically increasing integers starting from a -//! random value and wrapping around u64::MAX. When messages are unsigned, they are chosen at random. -//! NOTE: These numbers are sequential in the current go implementation. +//! [`PeerId`](libp2p_identity::PeerId) and a nonce (sequence number) of the message. The sequence +//! numbers in this implementation are sent as raw bytes across the wire. They are 64-bit +//! big-endian unsigned integers. When messages are signed, they are monotonically increasing +//! integers starting from a random value and wrapping around u64::MAX. When messages are +//! unsigned, they are chosen at random. NOTE: These numbers are sequential in the current go +//! implementation. //! //! # Peer Discovery //! //! Gossipsub does not provide peer discovery by itself. Peer discovery is the process by which -//! peers in a p2p network exchange information about each other among other reasons to become resistant -//! against the failure or replacement of the +//! peers in a p2p network exchange information about each other among other reasons to become +//! resistant against the failure or replacement of the //! [boot nodes](https://docs.libp2p.io/reference/glossary/#boot-node) of the network. //! //! Peer @@ -111,22 +112,24 @@ mod topic; mod transform; mod types; -pub use self::behaviour::{Behaviour, Event, MessageAuthenticity}; -pub use self::config::{Config, ConfigBuilder, ValidationMode, Version}; -pub use self::error::{ConfigBuilderError, PublishError, SubscriptionError, ValidationError}; -pub use self::metrics::Config as MetricsConfig; -pub use self::peer_score::{ - score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, PeerScoreThresholds, - TopicScoreParams, +pub use self::{ + behaviour::{Behaviour, Event, MessageAuthenticity}, + config::{Config, ConfigBuilder, ValidationMode, Version}, + error::{ConfigBuilderError, PublishError, SubscriptionError, ValidationError}, + metrics::Config as MetricsConfig, + peer_score::{ + score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, + PeerScoreThresholds, TopicScoreParams, + }, + subscription_filter::{ + AllowAllSubscriptionFilter, CallbackSubscriptionFilter, CombinedSubscriptionFilters, + MaxCountSubscriptionFilter, RegexSubscriptionFilter, TopicSubscriptionFilter, + WhitelistSubscriptionFilter, + }, + topic::{Hasher, Topic, TopicHash}, + transform::{DataTransform, IdentityTransform}, + types::{FailedMessages, Message, MessageAcceptance, MessageId, RawMessage}, }; -pub use self::subscription_filter::{ - AllowAllSubscriptionFilter, CallbackSubscriptionFilter, CombinedSubscriptionFilters, - MaxCountSubscriptionFilter, RegexSubscriptionFilter, TopicSubscriptionFilter, - WhitelistSubscriptionFilter, -}; -pub use self::topic::{Hasher, Topic, TopicHash}; -pub use self::transform::{DataTransform, IdentityTransform}; -pub use self::types::{FailedMessages, Message, MessageAcceptance, MessageId, RawMessage}; #[deprecated(note = "Will be removed from the public API.")] pub type Rpc = self::types::Rpc; diff --git a/protocols/gossipsub/src/mcache.rs b/protocols/gossipsub/src/mcache.rs index aa65e3b7f1d..8ed71ea07f2 100644 --- a/protocols/gossipsub/src/mcache.rs +++ b/protocols/gossipsub/src/mcache.rs @@ -18,14 +18,17 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::topic::TopicHash; -use crate::types::{MessageId, RawMessage}; -use libp2p_identity::PeerId; -use std::collections::hash_map::Entry; -use std::fmt::Debug; use std::{ - collections::{HashMap, HashSet}, + collections::{hash_map::Entry, HashMap, HashSet}, fmt, + fmt::Debug, +}; + +use libp2p_identity::PeerId; + +use crate::{ + topic::TopicHash, + types::{MessageId, RawMessage}, }; /// CacheEntry stored in the history. @@ -210,7 +213,7 @@ impl MessageCache { &mut self, message_id: &MessageId, ) -> Option<(RawMessage, HashSet)> { - //We only remove the message from msgs and iwant_count and keep the message_id in the + // We only remove the message from msgs and iwant_count and keep the message_id in the // history vector. Zhe id in the history vector will simply be ignored on popping. self.iwant_counts.remove(message_id); diff --git a/protocols/gossipsub/src/metrics.rs b/protocols/gossipsub/src/metrics.rs index 40af1af2cac..42dedc000b7 100644 --- a/protocols/gossipsub/src/metrics.rs +++ b/protocols/gossipsub/src/metrics.rs @@ -23,15 +23,21 @@ use std::collections::HashMap; -use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::{Family, MetricConstructor}; -use prometheus_client::metrics::gauge::Gauge; -use prometheus_client::metrics::histogram::{linear_buckets, Histogram}; -use prometheus_client::registry::Registry; - -use crate::topic::TopicHash; -use crate::types::{MessageAcceptance, PeerKind}; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{ + counter::Counter, + family::{Family, MetricConstructor}, + gauge::Gauge, + histogram::{linear_buckets, Histogram}, + }, + registry::Registry, +}; + +use crate::{ + topic::TopicHash, + types::{MessageAcceptance, PeerKind}, +}; // Default value that limits for how many topics do we store metrics. const DEFAULT_MAX_TOPICS: usize = 300; @@ -100,7 +106,7 @@ type EverSubscribed = bool; /// A collection of metrics used throughout the Gossipsub behaviour. pub(crate) struct Metrics { - /* Configuration parameters */ + // Configuration parameters /// Maximum number of topics for which we store metrics. This helps keep the metrics bounded. max_topics: usize, /// Maximum number of topics for which we store metrics, where the topic in not one to which we @@ -108,11 +114,11 @@ pub(crate) struct Metrics { /// from received messages and not explicit application subscriptions. max_never_subscribed_topics: usize, - /* Auxiliary variables */ + // Auxiliary variables /// Information needed to decide if a topic is allowed or not. topic_info: HashMap, - /* Metrics per known topic */ + // Metrics per known topic /// Status of our subscription to this topic. This metric allows analyzing other topic metrics /// filtered by our current subscription status. topic_subscription_status: Family, @@ -134,7 +140,7 @@ pub(crate) struct Metrics { /// The number of messages that timed out and could not be sent. timedout_messages_dropped: Family, - /* Metrics regarding mesh state */ + // Metrics regarding mesh state /// Number of peers in our mesh. This metric should be updated with the count of peers for a /// topic in the mesh regardless of inclusion and churn events. mesh_peer_counts: Family, @@ -143,7 +149,7 @@ pub(crate) struct Metrics { /// Number of times we remove peers in a topic mesh for different reasons. mesh_peer_churn_events: Family, - /* Metrics regarding messages sent/received */ + // Metrics regarding messages sent/received /// Number of gossip messages sent to each topic. topic_msg_sent_counts: Family, /// Bytes from gossip messages sent to each topic. @@ -158,13 +164,13 @@ pub(crate) struct Metrics { /// Bytes received from gossip messages for each topic. topic_msg_recv_bytes: Family, - /* Metrics related to scoring */ + // Metrics related to scoring /// Histogram of the scores for each mesh topic. score_per_mesh: Family, /// A counter of the kind of penalties being applied to peers. scoring_penalties: Family, - /* General Metrics */ + // General Metrics /// Gossipsub supports floodsub, gossipsub v1.0 and gossipsub v1.1. Peers are classified based /// on which protocol they support. This metric keeps track of the number of peers that are /// connected of each type. @@ -172,7 +178,7 @@ pub(crate) struct Metrics { /// The time it takes to complete one iteration of the heartbeat. heartbeat_duration: Histogram, - /* Performance metrics */ + // Performance metrics /// When the user validates a message, it tries to re propagate it to its mesh peers. If the /// message expires from the memcache before it can be validated, we count this a cache miss /// and it is an indicator that the memcache size should be increased. @@ -181,6 +187,12 @@ pub(crate) struct Metrics { /// topic. A very high metric might indicate an underperforming network. topic_iwant_msgs: Family, + /// The number of times we have received an IDONTWANT control message. + idontwant_msgs: Counter, + + /// The number of msg_id's we have received in every IDONTWANT control message. + idontwant_msgs_ids: Counter, + /// The size of the priority queue. priority_queue_size: Histogram, /// The size of the non-priority queue. @@ -318,6 +330,27 @@ impl Metrics { "topic_iwant_msgs", "Number of times we have decided an IWANT is required for this topic" ); + + let idontwant_msgs = { + let metric = Counter::default(); + registry.register( + "idontwant_msgs", + "The number of times we have received an IDONTWANT control message", + metric.clone(), + ); + metric + }; + + let idontwant_msgs_ids = { + let metric = Counter::default(); + registry.register( + "idontwant_msgs_ids", + "The number of msg_id's we have received in every total of all IDONTWANT control message.", + metric.clone(), + ); + metric + }; + let memcache_misses = { let metric = Counter::default(); registry.register( @@ -370,6 +403,8 @@ impl Metrics { heartbeat_duration, memcache_misses, topic_iwant_msgs, + idontwant_msgs, + idontwant_msgs_ids, priority_queue_size, non_priority_queue_size, } @@ -414,7 +449,7 @@ impl Metrics { } } - /* Mesh related methods */ + // Mesh related methods /// Registers the subscription to a topic if the configured limits allow it. /// Sets the registered number of peers in the mesh to 0. @@ -568,6 +603,12 @@ impl Metrics { } } + /// Register receiving an IDONTWANT msg for this topic. + pub(crate) fn register_idontwant(&mut self, msgs: usize) { + self.idontwant_msgs.inc(); + self.idontwant_msgs_ids.inc_by(msgs as u64); + } + /// Observes a heartbeat duration. pub(crate) fn observe_heartbeat_duration(&mut self, millis: u64) { self.heartbeat_duration.observe(millis as f64); diff --git a/protocols/gossipsub/src/peer_score.rs b/protocols/gossipsub/src/peer_score.rs index e8d1a6e5f97..33573ebeacc 100644 --- a/protocols/gossipsub/src/peer_score.rs +++ b/protocols/gossipsub/src/peer_score.rs @@ -18,25 +18,31 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! //! Manages and stores the Scoring logic of a particular peer on the gossipsub behaviour. -use crate::metrics::{Metrics, Penalty}; -use crate::time_cache::TimeCache; -use crate::{MessageId, TopicHash}; +use std::{ + collections::{hash_map, HashMap, HashSet}, + net::IpAddr, + time::Duration, +}; + use libp2p_identity::PeerId; -use std::collections::{hash_map, HashMap, HashSet}; -use std::net::IpAddr; -use std::time::Duration; use web_time::Instant; +use crate::{ + metrics::{Metrics, Penalty}, + time_cache::TimeCache, + MessageId, TopicHash, +}; + mod params; -use crate::ValidationError; pub use params::{ score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, PeerScoreThresholds, TopicScoreParams, }; +use crate::ValidationError; + #[cfg(test)] mod tests; @@ -96,8 +102,9 @@ impl Default for PeerStats { } impl PeerStats { - /// Returns a mutable reference to topic stats if they exist, otherwise if the supplied parameters score the - /// topic, inserts the default stats and returns a reference to those. If neither apply, returns None. + /// Returns a mutable reference to topic stats if they exist, otherwise if the supplied + /// parameters score the topic, inserts the default stats and returns a reference to those. + /// If neither apply, returns None. pub(crate) fn stats_or_default_mut( &mut self, topic_hash: TopicHash, @@ -285,12 +292,14 @@ impl PeerScore { } // P3b: - // NOTE: the weight of P3b is negative (validated in TopicScoreParams.validate), so this detracts. + // NOTE: the weight of P3b is negative (validated in TopicScoreParams.validate), so + // this detracts. let p3b = topic_stats.mesh_failure_penalty; topic_score += p3b * topic_params.mesh_failure_penalty_weight; // P4: invalid messages - // NOTE: the weight of P4 is negative (validated in TopicScoreParams.validate), so this detracts. + // NOTE: the weight of P4 is negative (validated in TopicScoreParams.validate), so + // this detracts. let p4 = topic_stats.invalid_message_deliveries * topic_stats.invalid_message_deliveries; topic_score += p4 * topic_params.invalid_message_deliveries_weight; @@ -391,8 +400,8 @@ impl PeerScore { } // we don't decay retained scores, as the peer is not active. - // this way the peer cannot reset a negative score by simply disconnecting and reconnecting, - // unless the retention period has elapsed. + // this way the peer cannot reset a negative score by simply disconnecting and + // reconnecting, unless the retention period has elapsed. // similarly, a well behaved peer does not lose its score by getting disconnected. return true; } @@ -638,7 +647,8 @@ impl PeerScore { } } - /// Similar to `reject_message` except does not require the message id or reason for an invalid message. + /// Similar to `reject_message` except does not require the message id or reason for an invalid + /// message. pub(crate) fn reject_invalid_message(&mut self, from: &PeerId, topic_hash: &TopicHash) { tracing::debug!( peer=%from, @@ -679,8 +689,8 @@ impl PeerScore { } if let RejectReason::ValidationIgnored = reason { - // we were explicitly instructed by the validator to ignore the message but not penalize - // the peer + // we were explicitly instructed by the validator to ignore the message but not + // penalize the peer record.status = DeliveryStatus::Ignored; record.peers.clear(); return; @@ -882,13 +892,14 @@ impl PeerScore { .get(topic_hash) .expect("Topic must exist if there are known topic_stats"); - // check against the mesh delivery window -- if the validated time is passed as 0, then - // the message was received before we finished validation and thus falls within the mesh + // check against the mesh delivery window -- if the validated time is passed as + // 0, then the message was received before we finished + // validation and thus falls within the mesh // delivery window. let mut falls_in_mesh_deliver_window = true; if let Some(validated_time) = validated_time { if let Some(now) = &now { - //should always be true + // should always be true let window_time = validated_time .checked_add(topic_params.mesh_message_deliveries_window) .unwrap_or(*now); diff --git a/protocols/gossipsub/src/peer_score/params.rs b/protocols/gossipsub/src/peer_score/params.rs index ae70991f7fb..cc48df8f61b 100644 --- a/protocols/gossipsub/src/peer_score/params.rs +++ b/protocols/gossipsub/src/peer_score/params.rs @@ -18,10 +18,13 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + collections::{HashMap, HashSet}, + net::IpAddr, + time::Duration, +}; + use crate::TopicHash; -use std::collections::{HashMap, HashSet}; -use std::net::IpAddr; -use std::time::Duration; /// The default number of seconds for a decay interval. const DEFAULT_DECAY_INTERVAL: u64 = 1; @@ -117,12 +120,13 @@ pub struct PeerScoreParams { /// P6: IP-colocation factor. /// The parameter has an associated counter which counts the number of peers with the same IP. - /// If the number of peers in the same IP exceeds `ip_colocation_factor_threshold, then the value - /// is the square of the difference, ie `(peers_in_same_ip - ip_colocation_threshold)^2`. - /// If the number of peers in the same IP is less than the threshold, then the value is 0. - /// The weight of the parameter MUST be negative, unless you want to disable for testing. - /// Note: In order to simulate many IPs in a manageable manner when testing, you can set the weight to 0 - /// thus disabling the IP colocation penalty. + /// If the number of peers in the same IP exceeds `ip_colocation_factor_threshold, then the + /// value is the square of the difference, ie `(peers_in_same_ip - + /// ip_colocation_threshold)^2`. If the number of peers in the same IP is less than the + /// threshold, then the value is 0. The weight of the parameter MUST be negative, unless + /// you want to disable for testing. Note: In order to simulate many IPs in a manageable + /// manner when testing, you can set the weight to 0 thus disabling the IP + /// colocation penalty. pub ip_colocation_factor_weight: f64, pub ip_colocation_factor_threshold: f64, pub ip_colocation_factor_whitelist: HashSet, @@ -239,16 +243,16 @@ pub struct TopicScoreParams { /// P1: time in the mesh /// This is the time the peer has been grafted in the mesh. - /// The value of the parameter is the `time/time_in_mesh_quantum`, capped by `time_in_mesh_cap` - /// The weight of the parameter must be positive (or zero to disable). + /// The value of the parameter is the `time/time_in_mesh_quantum`, capped by + /// `time_in_mesh_cap` The weight of the parameter must be positive (or zero to disable). pub time_in_mesh_weight: f64, pub time_in_mesh_quantum: Duration, pub time_in_mesh_cap: f64, /// P2: first message deliveries /// This is the number of message deliveries in the topic. - /// The value of the parameter is a counter, decaying with `first_message_deliveries_decay`, and capped - /// by `first_message_deliveries_cap`. + /// The value of the parameter is a counter, decaying with `first_message_deliveries_decay`, + /// and capped by `first_message_deliveries_cap`. /// The weight of the parameter MUST be positive (or zero to disable). pub first_message_deliveries_weight: f64, pub first_message_deliveries_decay: f64, @@ -264,8 +268,8 @@ pub struct TopicScoreParams { /// before we have forwarded it to them. /// The parameter has an associated counter, decaying with `mesh_message_deliveries_decay`. /// If the counter exceeds the threshold, its value is 0. - /// If the counter is below the `mesh_message_deliveries_threshold`, the value is the square of - /// the deficit, ie (`message_deliveries_threshold - counter)^2` + /// If the counter is below the `mesh_message_deliveries_threshold`, the value is the square + /// of the deficit, ie (`message_deliveries_threshold - counter)^2` /// The penalty is only activated after `mesh_message_deliveries_activation` time in the mesh. /// The weight of the parameter MUST be negative (or zero to disable). pub mesh_message_deliveries_weight: f64, diff --git a/protocols/gossipsub/src/peer_score/tests.rs b/protocols/gossipsub/src/peer_score/tests.rs index 064e277eed7..9e20cea2dde 100644 --- a/protocols/gossipsub/src/peer_score/tests.rs +++ b/protocols/gossipsub/src/peer_score/tests.rs @@ -20,9 +20,7 @@ /// A collection of unit tests mostly ported from the go implementation. use super::*; - -use crate::types::RawMessage; -use crate::{IdentTopic as Topic, Message}; +use crate::{types::RawMessage, IdentTopic as Topic, Message}; // estimates a value within variance fn within_variance(value: f64, expected: f64, variance: f64) -> bool { @@ -447,7 +445,8 @@ fn test_score_mesh_message_deliveries_decay() { } let score_a = peer_score.score(&peer_id_a); - // the penalty is the difference between the threshold and the (decayed) mesh deliveries, squared. + // the penalty is the difference between the threshold and the (decayed) + // mesh deliveries, squared. let deficit = topic_params.mesh_message_deliveries_threshold - decayed_delivery_count; let penalty = deficit * deficit; let expected = diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 8d33fe51a90..7ee6d5c8245 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -18,15 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::config::ValidationMode; -use crate::handler::HandlerEvent; -use crate::rpc_proto::proto; -use crate::topic::TopicHash; -use crate::types::{ - ControlAction, Graft, IHave, IWant, MessageId, PeerInfo, PeerKind, Prune, RawMessage, Rpc, - Subscription, SubscriptionAction, -}; -use crate::ValidationError; +use std::{convert::Infallible, pin::Pin}; + use asynchronous_codec::{Decoder, Encoder, Framed}; use byteorder::{BigEndian, ByteOrder}; use bytes::BytesMut; @@ -35,11 +28,26 @@ use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use libp2p_identity::{PeerId, PublicKey}; use libp2p_swarm::StreamProtocol; use quick_protobuf::Writer; -use std::convert::Infallible; -use std::pin::Pin; + +use crate::{ + config::ValidationMode, + handler::HandlerEvent, + rpc_proto::proto, + topic::TopicHash, + types::{ + ControlAction, Graft, IDontWant, IHave, IWant, MessageId, PeerInfo, PeerKind, Prune, + RawMessage, Rpc, Subscription, SubscriptionAction, + }, + ValidationError, +}; pub(crate) const SIGNING_PREFIX: &[u8] = b"libp2p-pubsub:"; +pub(crate) const GOSSIPSUB_1_2_0_PROTOCOL: ProtocolId = ProtocolId { + protocol: StreamProtocol::new("/meshsub/1.2.0"), + kind: PeerKind::Gossipsubv1_2, +}; + pub(crate) const GOSSIPSUB_1_1_0_PROTOCOL: ProtocolId = ProtocolId { protocol: StreamProtocol::new("/meshsub/1.1.0"), kind: PeerKind::Gossipsubv1_1, @@ -69,7 +77,11 @@ impl Default for ProtocolConfig { Self { max_transmit_size: 65536, validation_mode: ValidationMode::Strict, - protocol_ids: vec![GOSSIPSUB_1_1_0_PROTOCOL, GOSSIPSUB_1_0_0_PROTOCOL], + protocol_ids: vec![ + GOSSIPSUB_1_2_0_PROTOCOL, + GOSSIPSUB_1_1_0_PROTOCOL, + GOSSIPSUB_1_0_0_PROTOCOL, + ], } } } @@ -136,7 +148,7 @@ where } } -/* Gossip codec for the framing */ +// Gossip codec for the framing pub struct GossipsubCodec { /// Determines the level of validation performed on incoming messages. @@ -476,10 +488,25 @@ impl Decoder for GossipsubCodec { })); } + let idontwant_msgs: Vec = rpc_control + .idontwant + .into_iter() + .map(|idontwant| { + ControlAction::IDontWant(IDontWant { + message_ids: idontwant + .message_ids + .into_iter() + .map(MessageId::from) + .collect::>(), + }) + }) + .collect(); + control_msgs.extend(ihave_msgs); control_msgs.extend(iwant_msgs); control_msgs.extend(graft_msgs); control_msgs.extend(prune_msgs); + control_msgs.extend(idontwant_msgs); } Ok(Some(HandlerEvent::Message { @@ -506,13 +533,14 @@ impl Decoder for GossipsubCodec { #[cfg(test)] mod tests { - use super::*; - use crate::config::Config; - use crate::{Behaviour, ConfigBuilder, MessageAuthenticity}; - use crate::{IdentTopic as Topic, Version}; use libp2p_identity::Keypair; use quickcheck::*; + use super::*; + use crate::{ + config::Config, Behaviour, ConfigBuilder, IdentTopic as Topic, MessageAuthenticity, Version, + }; + #[derive(Clone, Debug)] struct Message(RawMessage); diff --git a/protocols/gossipsub/src/rpc.rs b/protocols/gossipsub/src/rpc.rs index c90e46a85da..41b338267e9 100644 --- a/protocols/gossipsub/src/rpc.rs +++ b/protocols/gossipsub/src/rpc.rs @@ -18,7 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::{stream::Peekable, Stream, StreamExt}; use std::{ future::Future, pin::Pin, @@ -29,6 +28,8 @@ use std::{ task::{Context, Poll}, }; +use futures::{stream::Peekable, Stream, StreamExt}; + use crate::types::RpcOut; /// `RpcOut` sender that is priority aware. @@ -88,7 +89,7 @@ impl Sender { | RpcOut::Prune(_) | RpcOut::Subscribe(_) | RpcOut::Unsubscribe(_) => &self.priority_sender, - RpcOut::Forward { .. } | RpcOut::IHave(_) | RpcOut::IWant(_) => { + RpcOut::Forward { .. } | RpcOut::IHave(_) | RpcOut::IWant(_) | RpcOut::IDontWant(_) => { &self.non_priority_sender } }; diff --git a/protocols/gossipsub/src/rpc_proto.rs b/protocols/gossipsub/src/rpc_proto.rs index 94c7aafbc3e..2f6832a01a1 100644 --- a/protocols/gossipsub/src/rpc_proto.rs +++ b/protocols/gossipsub/src/rpc_proto.rs @@ -26,12 +26,12 @@ pub(crate) mod proto { #[cfg(test)] mod test { - use crate::rpc_proto::proto::compat; - use crate::IdentTopic as Topic; use libp2p_identity::PeerId; use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer}; use rand::Rng; + use crate::{rpc_proto::proto::compat, IdentTopic as Topic}; + #[test] fn test_multi_topic_message_compatibility() { let topic1 = Topic::new("t1").hash(); diff --git a/protocols/gossipsub/src/subscription_filter.rs b/protocols/gossipsub/src/subscription_filter.rs index 02bb9b4eab6..c051b6c333b 100644 --- a/protocols/gossipsub/src/subscription_filter.rs +++ b/protocols/gossipsub/src/subscription_filter.rs @@ -18,10 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::types::Subscription; -use crate::TopicHash; use std::collections::{BTreeSet, HashMap, HashSet}; +use crate::{types::Subscription, TopicHash}; + pub trait TopicSubscriptionFilter { /// Returns true iff the topic is of interest and we can subscribe to it. fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool; @@ -82,7 +82,7 @@ pub trait TopicSubscriptionFilter { } } -//some useful implementers +// some useful implementers /// Allows all subscriptions #[derive(Default, Clone)] @@ -199,7 +199,7 @@ where } } -///A subscription filter that filters topics based on a regular expression. +/// A subscription filter that filters topics based on a regular expression. pub struct RegexSubscriptionFilter(pub regex::Regex); impl TopicSubscriptionFilter for RegexSubscriptionFilter { diff --git a/protocols/gossipsub/src/time_cache.rs b/protocols/gossipsub/src/time_cache.rs index a3e5c01ac4c..ace02606e88 100644 --- a/protocols/gossipsub/src/time_cache.rs +++ b/protocols/gossipsub/src/time_cache.rs @@ -20,13 +20,18 @@ //! This implements a time-based LRU cache for checking gossipsub message duplicates. -use fnv::FnvHashMap; -use std::collections::hash_map::{ - self, - Entry::{Occupied, Vacant}, +use std::{ + collections::{ + hash_map::{ + self, + Entry::{Occupied, Vacant}, + }, + VecDeque, + }, + time::Duration, }; -use std::collections::VecDeque; -use std::time::Duration; + +use fnv::FnvHashMap; use web_time::Instant; struct ExpiringElement { @@ -206,7 +211,7 @@ mod test { cache.insert("t"); assert!(!cache.insert("t")); cache.insert("e"); - //assert!(!cache.insert("t")); + // assert!(!cache.insert("t")); assert!(!cache.insert("e")); // sleep until cache expiry std::thread::sleep(Duration::from_millis(101)); diff --git a/protocols/gossipsub/src/topic.rs b/protocols/gossipsub/src/topic.rs index a73496b53f2..4793c23a8e1 100644 --- a/protocols/gossipsub/src/topic.rs +++ b/protocols/gossipsub/src/topic.rs @@ -18,12 +18,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::rpc_proto::proto; +use std::fmt; + use base64::prelude::*; use prometheus_client::encoding::EncodeLabelSet; use quick_protobuf::Writer; use sha2::{Digest, Sha256}; -use std::fmt; + +use crate::rpc_proto::proto; /// A generic trait that can be extended for various hashing types for a topic. pub trait Hasher { diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index bb1916fefd0..6681eca1d93 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -19,19 +19,19 @@ // DEALINGS IN THE SOFTWARE. //! A collection of types using the Gossipsub system. -use crate::rpc::Sender; -use crate::TopicHash; +use std::{collections::BTreeSet, fmt, fmt::Debug}; + use futures_timer::Delay; +use hashlink::LinkedHashMap; use libp2p_identity::PeerId; use libp2p_swarm::ConnectionId; use prometheus_client::encoding::EncodeLabelValue; use quick_protobuf::MessageWrite; -use std::fmt::Debug; -use std::{collections::BTreeSet, fmt}; - -use crate::rpc_proto::proto; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +use web_time::Instant; + +use crate::{rpc::Sender, rpc_proto::proto, TopicHash}; /// Messages that have expired while attempting to be sent to a peer. #[derive(Clone, Debug, Default)] @@ -42,7 +42,8 @@ pub struct FailedMessages { pub forward: usize, /// The number of messages that were failed to be sent to the priority queue as it was full. pub priority: usize, - /// The number of messages that were failed to be sent to the non-priority queue as it was full. + /// The number of messages that were failed to be sent to the non-priority queue as it was + /// full. pub non_priority: usize, /// The number of messages that timed out and could not be sent. pub timeout: usize, @@ -110,11 +111,15 @@ pub(crate) struct PeerConnections { pub(crate) topics: BTreeSet, /// The rpc sender to the connection handler(s). pub(crate) sender: Sender, + /// Don't send messages. + pub(crate) dont_send: LinkedHashMap, } /// Describes the types of peers that can exist in the gossipsub context. -#[derive(Debug, Clone, PartialEq, Hash, EncodeLabelValue, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Hash, EncodeLabelValue, Eq)] pub enum PeerKind { + /// A gossipsub 1.2 peer. + Gossipsubv1_2, /// A gossipsub 1.1 peer. Gossipsubv1_1, /// A gossipsub 1.0 peer. @@ -150,6 +155,16 @@ pub struct RawMessage { pub validated: bool, } +impl PeerKind { + /// Returns true if peer speaks any gossipsub version. + pub(crate) fn is_gossipsub(&self) -> bool { + matches!( + self, + Self::Gossipsubv1_2 | Self::Gossipsubv1_1 | Self::Gossipsub + ) + } +} + impl RawMessage { /// Calculates the encoded length of this message (used for calculating metrics). pub fn raw_protobuf_len(&self) -> usize { @@ -230,9 +245,9 @@ pub enum SubscriptionAction { #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub(crate) struct PeerInfo { pub(crate) peer_id: Option, - //TODO add this when RFC: Signed Address Records got added to the spec (see pull request + // TODO add this when RFC: Signed Address Records got added to the spec (see pull request // https://github.com/libp2p/specs/pull/217) - //pub signed_peer_record: ?, + // pub signed_peer_record: ?, } /// A Control message received by the gossipsub system. @@ -240,12 +255,16 @@ pub(crate) struct PeerInfo { pub enum ControlAction { /// Node broadcasts known messages per topic - IHave control message. IHave(IHave), - /// The node requests specific message ids (peer_id + sequence _number) - IWant control message. + /// The node requests specific message ids (peer_id + sequence _number) - IWant control + /// message. IWant(IWant), /// The node has been added to the mesh - Graft control message. Graft(Graft), /// The node has been removed from the mesh - Prune control message. Prune(Prune), + /// The node requests us to not forward message ids (peer_id + sequence _number) - IDontWant + /// control message. + IDontWant(IDontWant), } /// Node broadcasts known messages per topic - IHave control message. @@ -282,6 +301,13 @@ pub struct Prune { pub(crate) backoff: Option, } +/// The node requests us to not forward message ids - IDontWant control message. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct IDontWant { + /// A list of known message ids. + pub(crate) message_ids: Vec, +} + /// A Gossipsub RPC message sent. #[derive(Debug)] pub enum RpcOut { @@ -303,6 +329,9 @@ pub enum RpcOut { IHave(IHave), /// Send a IWant control message. IWant(IWant), + /// The node requests us to not forward message ids (peer_id + sequence _number) - IDontWant + /// control message. + IDontWant(IDontWant), } impl RpcOut { @@ -363,6 +392,7 @@ impl From for proto::RPC { iwant: vec![], graft: vec![], prune: vec![], + idontwant: vec![], }), }, RpcOut::IWant(IWant { message_ids }) => proto::RPC { @@ -375,6 +405,7 @@ impl From for proto::RPC { }], graft: vec![], prune: vec![], + idontwant: vec![], }), }, RpcOut::Graft(Graft { topic_hash }) => proto::RPC { @@ -387,6 +418,7 @@ impl From for proto::RPC { topic_id: Some(topic_hash.into_string()), }], prune: vec![], + idontwant: vec![], }), }, RpcOut::Prune(Prune { @@ -413,9 +445,23 @@ impl From for proto::RPC { .collect(), backoff, }], + idontwant: vec![], }), } } + RpcOut::IDontWant(IDontWant { message_ids }) => proto::RPC { + publish: Vec::new(), + subscriptions: Vec::new(), + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![], + graft: vec![], + prune: vec![], + idontwant: vec![proto::ControlIDontWant { + message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), + }], + }), + }, } } } @@ -474,6 +520,7 @@ impl From for proto::RPC { iwant: Vec::new(), graft: Vec::new(), prune: Vec::new(), + idontwant: Vec::new(), }; let empty_control_msg = rpc.control_msgs.is_empty(); @@ -522,6 +569,12 @@ impl From for proto::RPC { }; control.prune.push(rpc_prune); } + ControlAction::IDontWant(IDontWant { message_ids }) => { + let rpc_idontwant = proto::ControlIDontWant { + message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), + }; + control.idontwant.push(rpc_idontwant); + } } } @@ -560,6 +613,7 @@ impl PeerKind { Self::Floodsub => "Floodsub", Self::Gossipsub => "Gossipsub v1.0", Self::Gossipsubv1_1 => "Gossipsub v1.1", + Self::Gossipsubv1_2 => "Gossipsub v1.2", } } } diff --git a/protocols/gossipsub/tests/smoke.rs b/protocols/gossipsub/tests/smoke.rs index 3b6261afa54..d5fec2c1985 100644 --- a/protocols/gossipsub/tests/smoke.rs +++ b/protocols/gossipsub/tests/smoke.rs @@ -18,17 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::stream::{FuturesUnordered, SelectAll}; -use futures::StreamExt; +use std::{task::Poll, time::Duration}; + +use futures::{ + stream::{FuturesUnordered, SelectAll}, + StreamExt, +}; use libp2p_gossipsub as gossipsub; use libp2p_gossipsub::{MessageAuthenticity, ValidationMode}; use libp2p_swarm::Swarm; use libp2p_swarm_test::SwarmExt as _; use quickcheck::{QuickCheck, TestResult}; use rand::{seq::SliceRandom, SeedableRng}; -use std::{task::Poll, time::Duration}; use tokio::{runtime::Runtime, time}; -use tracing_subscriber::EnvFilter; struct Graph { nodes: SelectAll>, @@ -129,9 +131,7 @@ async fn build_node() -> Swarm { #[test] fn multi_hop_propagation() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); fn prop(num_nodes: u8, seed: u64) -> TestResult { if !(2..=50).contains(&num_nodes) { diff --git a/protocols/identify/CHANGELOG.md b/protocols/identify/CHANGELOG.md index 9051c331bbc..66c839dfa98 100644 --- a/protocols/identify/CHANGELOG.md +++ b/protocols/identify/CHANGELOG.md @@ -1,12 +1,11 @@ ## 0.46.0 -- Make `identify::Config` fields private and add getter functions. - See [PR 5663](https://github.com/libp2p/rust-libp2p/pull/5663). - -## 0.45.1 - - Add `hide_listen_addrs` option to prevent leaking (local) listen addresses. See [PR 5507](https://github.com/libp2p/rust-libp2p/pull/5507). +- Make `identify::Config` fields private and add getter functions. + See [PR 5663](https://github.com/libp2p/rust-libp2p/pull/5663). +- Discard `Info`s received from remote peers that contain a public key that doesn't match their peer ID. + See [PR 5707](https://github.com/libp2p/rust-libp2p/pull/5707). ## 0.45.0 diff --git a/protocols/identify/Cargo.toml b/protocols/identify/Cargo.toml index d7f6b6eca76..9c4f8ea3707 100644 --- a/protocols/identify/Cargo.toml +++ b/protocols/identify/Cargo.toml @@ -18,7 +18,6 @@ futures-bounded = { workspace = true } libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -lru = "0.12.3" quick-protobuf-codec = { workspace = true } quick-protobuf = "0.8" smallvec = "1.13.2" @@ -30,7 +29,7 @@ either = "1.12.0" async-std = { version = "1.6.2", features = ["attributes"] } libp2p-swarm-test = { path = "../../swarm-test" } libp2p-swarm = { workspace = true, features = ["macros"] } -tracing-subscriber = { workspace = true, features = ["env-filter"] } +libp2p-test-utils = { workspace = true } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/identify/src/behaviour.rs b/protocols/identify/src/behaviour.rs index b69f2014d81..0cd27d90717 100644 --- a/protocols/identify/src/behaviour.rs +++ b/protocols/identify/src/behaviour.rs @@ -18,28 +18,27 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::handler::{self, Handler, InEvent}; -use crate::protocol::{Info, UpgradeError}; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::transport::PortUse; -use libp2p_core::{multiaddr, ConnectedPoint, Endpoint, Multiaddr}; -use libp2p_identity::PeerId; -use libp2p_identity::PublicKey; -use libp2p_swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}; +use std::{ + collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, + num::NonZeroUsize, + task::{Context, Poll}, + time::Duration, +}; + +use libp2p_core::{ + multiaddr, multiaddr::Protocol, transport::PortUse, ConnectedPoint, Endpoint, Multiaddr, +}; +use libp2p_identity::{PeerId, PublicKey}; use libp2p_swarm::{ - ConnectionDenied, DialError, ExternalAddresses, ListenAddresses, NetworkBehaviour, - NotifyHandler, PeerAddresses, StreamUpgradeError, THandlerInEvent, ToSwarm, - _address_translation, + behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}, + ConnectionDenied, ConnectionId, DialError, ExternalAddresses, ListenAddresses, + NetworkBehaviour, NotifyHandler, PeerAddresses, StreamUpgradeError, THandler, THandlerInEvent, + THandlerOutEvent, ToSwarm, _address_translation, }; -use libp2p_swarm::{ConnectionId, THandler, THandlerOutEvent}; -use std::collections::hash_map::Entry; -use std::num::NonZeroUsize; -use std::{ - collections::{HashMap, HashSet, VecDeque}, - task::Context, - task::Poll, - time::Duration, +use crate::{ + handler::{self, Handler, InEvent}, + protocol::{Info, UpgradeError}, }; /// Whether an [`Multiaddr`] is a valid for the QUIC transport. @@ -323,7 +322,8 @@ impl Behaviour { .contains(&connection_id) { // Apply address translation to the candidate address. - // For TCP without port-reuse, the observed address contains an ephemeral port which needs to be replaced by the port of a listen address. + // For TCP without port-reuse, the observed address contains an ephemeral port which + // needs to be replaced by the port of a listen address. let translated_addresses = { let mut addrs: Vec<_> = self .listen_addresses @@ -398,7 +398,8 @@ impl NetworkBehaviour for Behaviour { ) -> Result, ConnectionDenied> { // Contrary to inbound events, outbound events are full-p2p qualified // so we remove /p2p/ in order to be homogeneous - // this will avoid Autonatv2 to probe twice the same address (fully-p2p-qualified + not fully-p2p-qualified) + // this will avoid Autonatv2 to probe twice the same address (fully-p2p-qualified + not + // fully-p2p-qualified) let mut addr = addr.clone(); if matches!(addr.iter().last(), Some(multiaddr::Protocol::P2p(_))) { addr.pop(); @@ -415,7 +416,9 @@ impl NetworkBehaviour for Behaviour { self.config.local_public_key.clone(), self.config.protocol_version.clone(), self.config.agent_version.clone(), - addr.clone(), // TODO: This is weird? That is the public address we dialed, shouldn't need to tell the other party? + // TODO: This is weird? That is the public address we dialed, + // shouldn't need to tell the other party? + addr.clone(), self.all_addresses(), )) } diff --git a/protocols/identify/src/handler.rs b/protocols/identify/src/handler.rs index dd073d50ed6..7acdfceb0a6 100644 --- a/protocols/identify/src/handler.rs +++ b/protocols/identify/src/handler.rs @@ -18,29 +18,38 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::protocol::{Info, PushInfo, UpgradeError}; -use crate::{protocol, PROTOCOL_NAME, PUSH_PROTOCOL_NAME}; +use std::{ + collections::HashSet, + task::{Context, Poll}, + time::Duration, +}; + use either::Either; use futures::prelude::*; use futures_bounded::Timeout; use futures_timer::Delay; -use libp2p_core::upgrade::{ReadyUpgrade, SelectUpgrade}; -use libp2p_core::Multiaddr; -use libp2p_identity::PeerId; -use libp2p_identity::PublicKey; -use libp2p_swarm::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, - ProtocolSupport, +use libp2p_core::{ + upgrade::{ReadyUpgrade, SelectUpgrade}, + Multiaddr, }; +use libp2p_identity::{PeerId, PublicKey}; use libp2p_swarm::{ + handler::{ + ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, + ProtocolSupport, + }, ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, StreamUpgradeError, SubstreamProtocol, SupportedProtocols, }; use smallvec::SmallVec; -use std::collections::HashSet; -use std::{task::Context, task::Poll, time::Duration}; use tracing::Level; +use crate::{ + protocol, + protocol::{Info, PushInfo, UpgradeError}, + PROTOCOL_NAME, PUSH_PROTOCOL_NAME, +}; + const STREAM_TIMEOUT: Duration = Duration::from_secs(60); const MAX_CONCURRENT_STREAMS_PER_CONNECTION: usize = 10; @@ -150,10 +159,7 @@ impl Handler { &mut self, FullyNegotiatedInbound { protocol: output, .. - }: FullyNegotiatedInbound< - ::InboundProtocol, - ::InboundOpenInfo, - >, + }: FullyNegotiatedInbound<::InboundProtocol>, ) { match output { future::Either::Left(stream) => { @@ -189,10 +195,7 @@ impl Handler { &mut self, FullyNegotiatedOutbound { protocol: output, .. - }: FullyNegotiatedOutbound< - ::OutboundProtocol, - ::OutboundOpenInfo, - >, + }: FullyNegotiatedOutbound<::OutboundProtocol>, ) { match output { future::Either::Left(stream) => { @@ -233,10 +236,17 @@ impl Handler { } } - fn handle_incoming_info(&mut self, info: &Info) { + /// If the public key matches the remote peer, handles the given `info` and returns `true`. + fn handle_incoming_info(&mut self, info: &Info) -> bool { + let derived_peer_id = info.public_key.to_peer_id(); + if self.remote_peer_id != derived_peer_id { + return false; + } + self.remote_info.replace(info.clone()); self.update_supported_protocols_for_remote(info); + true } fn update_supported_protocols_for_remote(&mut self, remote_info: &Info) { @@ -287,7 +297,7 @@ impl ConnectionHandler for Handler { type OutboundOpenInfo = (); type InboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { SubstreamProtocol::new( SelectUpgrade::new( ReadyUpgrade::new(PROTOCOL_NAME), @@ -318,7 +328,7 @@ impl ConnectionHandler for Handler { fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll> { + ) -> Poll> { if let Some(event) = self.events.pop() { return Poll::Ready(event); } @@ -335,45 +345,61 @@ impl ConnectionHandler for Handler { return Poll::Ready(event); } - match self.active_streams.poll_unpin(cx) { - Poll::Ready(Ok(Ok(Success::ReceivedIdentify(remote_info)))) => { - self.handle_incoming_info(&remote_info); - - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Event::Identified( - remote_info, - ))); - } - Poll::Ready(Ok(Ok(Success::SentIdentifyPush(info)))) => { - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - Event::IdentificationPushed(info), - )); - } - Poll::Ready(Ok(Ok(Success::SentIdentify))) => { - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - Event::Identification, - )); - } - Poll::Ready(Ok(Ok(Success::ReceivedIdentifyPush(remote_push_info)))) => { - if let Some(mut info) = self.remote_info.clone() { - info.merge(remote_push_info); - self.handle_incoming_info(&info); - + while let Poll::Ready(ready) = self.active_streams.poll_unpin(cx) { + match ready { + Ok(Ok(Success::ReceivedIdentify(remote_info))) => { + if self.handle_incoming_info(&remote_info) { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::Identified(remote_info), + )); + } else { + tracing::warn!( + %self.remote_peer_id, + ?remote_info.public_key, + derived_peer_id=%remote_info.public_key.to_peer_id(), + "Discarding received identify message as public key does not match remote peer ID", + ); + } + } + Ok(Ok(Success::SentIdentifyPush(info))) => { return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - Event::Identified(info), + Event::IdentificationPushed(info), )); - }; - } - Poll::Ready(Ok(Err(e))) => { - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - Event::IdentificationError(StreamUpgradeError::Apply(e)), - )); - } - Poll::Ready(Err(Timeout { .. })) => { - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - Event::IdentificationError(StreamUpgradeError::Timeout), - )); + } + Ok(Ok(Success::SentIdentify)) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::Identification, + )); + } + Ok(Ok(Success::ReceivedIdentifyPush(remote_push_info))) => { + if let Some(mut info) = self.remote_info.clone() { + info.merge(remote_push_info); + + if self.handle_incoming_info(&info) { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::Identified(info), + )); + } else { + tracing::warn!( + %self.remote_peer_id, + ?info.public_key, + derived_peer_id=%info.public_key.to_peer_id(), + "Discarding received identify message as public key does not match remote peer ID", + ); + } + } + } + Ok(Err(e)) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::IdentificationError(StreamUpgradeError::Apply(e)), + )); + } + Err(Timeout { .. }) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::IdentificationError(StreamUpgradeError::Timeout), + )); + } } - Poll::Pending => {} } Poll::Pending @@ -381,12 +407,7 @@ impl ConnectionHandler for Handler { fn on_connection_event( &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + event: ConnectionEvent, ) { match event { ConnectionEvent::FullyNegotiatedInbound(fully_negotiated_inbound) => { diff --git a/protocols/identify/src/lib.rs b/protocols/identify/src/lib.rs index 7d28e5b5cc7..868ace87aeb 100644 --- a/protocols/identify/src/lib.rs +++ b/protocols/identify/src/lib.rs @@ -28,10 +28,10 @@ //! //! # Important Discrepancies //! -//! - **Using Identify with other protocols** Unlike some other libp2p implementations, -//! rust-libp2p does not treat Identify as a core protocol. This means that other protocols cannot -//! rely upon the existence of Identify, and need to be manually hooked up to Identify in order to -//! make use of its capabilities. +//! - **Using Identify with other protocols** Unlike some other libp2p implementations, rust-libp2p +//! does not treat Identify as a core protocol. This means that other protocols cannot rely upon +//! the existence of Identify, and need to be manually hooked up to Identify in order to make use +//! of its capabilities. //! //! # Usage //! @@ -41,8 +41,10 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -pub use self::behaviour::{Behaviour, Config, Event}; -pub use self::protocol::{Info, UpgradeError, PROTOCOL_NAME, PUSH_PROTOCOL_NAME}; +pub use self::{ + behaviour::{Behaviour, Config, Event}, + protocol::{Info, UpgradeError, PROTOCOL_NAME, PUSH_PROTOCOL_NAME}, +}; mod behaviour; mod handler; diff --git a/protocols/identify/src/protocol.rs b/protocols/identify/src/protocol.rs index f4dfd544dd1..257ec1f88d2 100644 --- a/protocols/identify/src/protocol.rs +++ b/protocols/identify/src/protocol.rs @@ -18,16 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; +use std::io; + use asynchronous_codec::{FramedRead, FramedWrite}; use futures::prelude::*; use libp2p_core::{multiaddr, Multiaddr}; use libp2p_identity as identity; use libp2p_identity::PublicKey; use libp2p_swarm::StreamProtocol; -use std::io; use thiserror::Error; +use crate::proto; + const MAX_MESSAGE_SIZE_BYTES: usize = 4096; pub const PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/ipfs/id/1.0.0"); @@ -37,7 +39,7 @@ pub const PUSH_PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/ipfs/id/pus /// Identify information of a peer sent in protocol messages. #[derive(Debug, Clone)] pub struct Info { - /// The public key of the local peer. + /// The public key of the peer. pub public_key: PublicKey, /// Application-specific version of the protocol family used by the peer, /// e.g. `ipfs/1.0.0` or `polkadot/1.0.0`. @@ -77,7 +79,8 @@ impl Info { } /// Identify push information of a peer sent in protocol messages. -/// Note that missing fields should be ignored, as peers may choose to send partial updates containing only the fields whose values have changed. +/// Note that missing fields should be ignored, as peers may choose to send partial updates +/// containing only the fields whose values have changed. #[derive(Debug, Clone)] pub struct PushInfo { pub public_key: Option, @@ -264,9 +267,10 @@ pub enum UpgradeError { #[cfg(test)] mod tests { - use super::*; use libp2p_identity as identity; + use super::*; + #[test] fn skip_invalid_multiaddr() { let valid_multiaddr: Multiaddr = "/ip6/2001:db8::/tcp/1234".parse().unwrap(); diff --git a/protocols/identify/tests/smoke.rs b/protocols/identify/tests/smoke.rs index d624005408e..a152bd75b19 100644 --- a/protocols/identify/tests/smoke.rs +++ b/protocols/identify/tests/smoke.rs @@ -1,17 +1,18 @@ +use std::{ + collections::HashSet, + iter, + time::{Duration, Instant}, +}; + use futures::StreamExt; use libp2p_identify as identify; +use libp2p_identity::Keypair; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; -use std::collections::HashSet; -use std::iter; -use std::time::{Duration, Instant}; -use tracing_subscriber::EnvFilter; #[async_std::test] async fn periodic_identify() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let mut swarm1 = Swarm::new_ephemeral(|identity| { identify::Behaviour::new( @@ -34,8 +35,7 @@ async fn periodic_identify() { let (swarm2_memory_listen, swarm2_tcp_listen_addr) = swarm2.listen().await; swarm2.connect(&mut swarm1).await; - use identify::Event::Received; - use identify::Event::Sent; + use identify::Event::{Received, Sent}; match libp2p_swarm_test::drive(&mut swarm1, &mut swarm2).await { ( @@ -67,7 +67,8 @@ async fn periodic_identify() { assert_eq!(s2_info.agent_version, "b"); assert!(!s2_info.protocols.is_empty()); - // Cannot assert observed address of dialer because memory transport uses ephemeral, outgoing ports. + // Cannot assert observed address of dialer because memory transport uses ephemeral, + // outgoing ports. // assert_eq!( // s2_info.observed_addr, // swarm2_memory_listen.with(Protocol::P2p(swarm2_peer_id.into())) @@ -80,9 +81,7 @@ async fn periodic_identify() { } #[async_std::test] async fn only_emits_address_candidate_once_per_connection() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let mut swarm1 = Swarm::new_ephemeral(|identity| { identify::Behaviour::new( @@ -152,9 +151,7 @@ async fn only_emits_address_candidate_once_per_connection() { #[async_std::test] async fn emits_unique_listen_addresses() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let mut swarm1 = Swarm::new_ephemeral(|identity| { identify::Behaviour::new( @@ -224,9 +221,7 @@ async fn emits_unique_listen_addresses() { #[async_std::test] async fn hides_listen_addresses() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let mut swarm1 = Swarm::new_ephemeral(|identity| { identify::Behaviour::new( @@ -295,9 +290,7 @@ async fn hides_listen_addresses() { #[async_std::test] async fn identify_push() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let mut swarm1 = Swarm::new_ephemeral(|identity| { identify::Behaviour::new(identify::Config::new("a".to_string(), identity.public())) @@ -347,9 +340,7 @@ async fn identify_push() { #[async_std::test] async fn discover_peer_after_disconnect() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let mut swarm1 = Swarm::new_ephemeral(|identity| { identify::Behaviour::new(identify::Config::new("a".to_string(), identity.public())) @@ -400,9 +391,7 @@ async fn discover_peer_after_disconnect() { #[async_std::test] async fn configured_interval_starts_after_first_identify() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let identify_interval = Duration::from_secs(5); @@ -437,3 +426,43 @@ async fn configured_interval_starts_after_first_identify() { assert!(time_to_first_identify < identify_interval) } + +#[async_std::test] +async fn reject_mismatched_public_key() { + libp2p_test_utils::with_default_env_filter(); + + let mut honest_swarm = Swarm::new_ephemeral(|identity| { + identify::Behaviour::new( + identify::Config::new("a".to_string(), identity.public()) + .with_interval(Duration::from_secs(1)), + ) + }); + let mut spoofing_swarm = Swarm::new_ephemeral(|_unused_identity| { + let arbitrary_public_key = Keypair::generate_ed25519().public(); + identify::Behaviour::new( + identify::Config::new("a".to_string(), arbitrary_public_key) + .with_interval(Duration::from_secs(1)), + ) + }); + + honest_swarm.listen().with_memory_addr_external().await; + spoofing_swarm.connect(&mut honest_swarm).await; + + spoofing_swarm + .wait(|event| { + matches!(event, SwarmEvent::Behaviour(identify::Event::Sent { .. })).then_some(()) + }) + .await; + + let honest_swarm_events = futures::stream::poll_fn(|cx| honest_swarm.poll_next_unpin(cx)) + .take(4) + .collect::>() + .await; + + assert!( + !honest_swarm_events + .iter() + .any(|e| matches!(e, SwarmEvent::Behaviour(identify::Event::Received { .. }))), + "should emit no received events as received public key won't match remote peer", + ); +} diff --git a/protocols/kad/CHANGELOG.md b/protocols/kad/CHANGELOG.md index 55d269bf98f..0c6e460afcd 100644 --- a/protocols/kad/CHANGELOG.md +++ b/protocols/kad/CHANGELOG.md @@ -6,6 +6,14 @@ See [PR 5573](https://github.com/libp2p/rust-libp2p/pull/5573). - Add `Behavior::find_closest_local_peers()`. See [PR 5645](https://github.com/libp2p/rust-libp2p/pull/5645). +- Fix `cargo clippy` warnings in `rustc 1.84.0-beta.1`. + See [PR 5700](https://github.com/libp2p/rust-libp2p/pull/5700). +- Expose Distance private field U256 to public. + See [PR 5705](https://github.com/libp2p/rust-libp2p/pull/5705). +- Fix systematic memory allocation when iterating over `KBuckets`. + See [PR 5715](https://github.com/libp2p/rust-libp2p/pull/5715). +- Remove deprecated default constructor for `ProtocolConfig`. + See [PR 5774](https://github.com/libp2p/rust-libp2p/pull/5774). ## 0.46.2 diff --git a/protocols/kad/Cargo.toml b/protocols/kad/Cargo.toml index 295414f6ddd..757c0aed189 100644 --- a/protocols/kad/Cargo.toml +++ b/protocols/kad/Cargo.toml @@ -11,7 +11,6 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -arrayvec = "0.7.4" bytes = "1" either = "1.11" fnv = "1.0" @@ -26,7 +25,7 @@ libp2p-identity = { workspace = true, features = ["rand"] } rand = "0.8" sha2 = "0.10.8" smallvec = "1.13.2" -uint = "0.9" +uint = "0.10" futures-timer = "3.0.3" web-time = { workspace = true } serde = { version = "1.0", optional = true, features = ["derive"] } @@ -42,7 +41,7 @@ libp2p-swarm = { path = "../../swarm", features = ["macros"] } libp2p-swarm-test = { path = "../../swarm-test" } libp2p-yamux = { workspace = true } quickcheck = { workspace = true } -tracing-subscriber = { workspace = true, features = ["env-filter"] } +libp2p-test-utils = { workspace = true } [features] serde = ["dep:serde", "bytes/serde"] diff --git a/protocols/kad/src/addresses.rs b/protocols/kad/src/addresses.rs index 0b3dc71e649..c2168be661e 100644 --- a/protocols/kad/src/addresses.rs +++ b/protocols/kad/src/addresses.rs @@ -18,9 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::fmt; + use libp2p_core::Multiaddr; use smallvec::SmallVec; -use std::fmt; /// A non-empty list of (unique) addresses of a peer in the routing table. /// Every address must be a fully-qualified /p2p address. diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index f577971167f..04ebe7d8174 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -22,41 +22,46 @@ mod test; -use crate::addresses::Addresses; -use crate::handler::{Handler, HandlerEvent, HandlerIn, RequestId}; -use crate::kbucket::{self, Distance, KBucketConfig, KBucketsTable, NodeStatus}; -use crate::protocol::{ConnectionType, KadPeer, ProtocolConfig}; -use crate::query::{Query, QueryConfig, QueryId, QueryPool, QueryPoolState}; -use crate::record::{ - self, - store::{self, RecordStore}, - ProviderRecord, Record, +use std::{ + collections::{BTreeMap, HashMap, HashSet, VecDeque}, + fmt, + num::NonZeroUsize, + task::{Context, Poll, Waker}, + time::Duration, + vec, }; -use crate::{bootstrap, K_VALUE}; -use crate::{jobs::*, protocol}; + use fnv::FnvHashSet; use libp2p_core::{transport::PortUse, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::behaviour::{ - AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm, -}; use libp2p_swarm::{ + behaviour::{AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}, dial_opts::{self, DialOpts}, ConnectionDenied, ConnectionHandler, ConnectionId, DialError, ExternalAddresses, ListenAddresses, NetworkBehaviour, NotifyHandler, StreamProtocol, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::fmt; -use std::num::NonZeroUsize; -use std::task::{Context, Poll, Waker}; -use std::time::Duration; -use std::vec; use thiserror::Error; use tracing::Level; use web_time::Instant; pub use crate::query::QueryStats; +use crate::{ + addresses::Addresses, + bootstrap, + handler::{Handler, HandlerEvent, HandlerIn, RequestId}, + jobs::*, + kbucket::{self, Distance, KBucketConfig, KBucketsTable, NodeStatus}, + protocol, + protocol::{ConnectionType, KadPeer, ProtocolConfig}, + query::{Query, QueryConfig, QueryId, QueryPool, QueryPoolState}, + record::{ + self, + store::{self, RecordStore}, + ProviderRecord, Record, + }, + K_VALUE, +}; /// `Behaviour` is a `NetworkBehaviour` that implements the libp2p /// Kademlia protocol. @@ -157,8 +162,9 @@ pub enum StoreInserts { /// the record is forwarded immediately to the [`RecordStore`]. Unfiltered, /// Whenever a (provider) record is received, an event is emitted. - /// Provider records generate a [`InboundRequest::AddProvider`] under [`Event::InboundRequest`], - /// normal records generate a [`InboundRequest::PutRecord`] under [`Event::InboundRequest`]. + /// Provider records generate a [`InboundRequest::AddProvider`] under + /// [`Event::InboundRequest`], normal records generate a [`InboundRequest::PutRecord`] + /// under [`Event::InboundRequest`]. /// /// When deemed valid, a (provider) record needs to be explicitly stored in /// the [`RecordStore`] via [`RecordStore::put`] or [`RecordStore::add_provider`], @@ -205,9 +211,10 @@ pub enum Caching { /// [`GetRecordOk::FinishedWithNoAdditionalRecord`] is always empty. Disabled, /// Up to `max_peers` peers not returning a record that are closest to the key - /// being looked up are tracked and returned in [`GetRecordOk::FinishedWithNoAdditionalRecord`]. - /// The write-back operation must be performed explicitly, if - /// desired and after choosing a record from the results, via [`Behaviour::put_record_to`]. + /// being looked up are tracked and returned in + /// [`GetRecordOk::FinishedWithNoAdditionalRecord`]. The write-back operation must be + /// performed explicitly, if desired and after choosing a record from the results, via + /// [`Behaviour::put_record_to`]. Enabled { max_peers: u16 }, } @@ -231,29 +238,6 @@ impl Config { } } - /// Returns the default configuration. - #[deprecated(note = "Use `Config::new` instead")] - #[allow(clippy::should_implement_trait)] - pub fn default() -> Self { - Default::default() - } - - /// Sets custom protocol names. - /// - /// Kademlia nodes only communicate with other nodes using the same protocol - /// name. Using custom name(s) therefore allows to segregate the DHT from - /// others, if that is desired. - /// - /// More than one protocol name can be supplied. In this case the node will - /// be able to talk to other nodes supporting any of the provided names. - /// Multiple names must be used with caution to avoid network partitioning. - #[deprecated(note = "Use `Config::new` instead")] - #[allow(deprecated)] - pub fn set_protocol_names(&mut self, names: Vec) -> &mut Self { - self.protocol_config.set_protocol_names(names); - self - } - /// Sets the timeout for a single query. /// /// > **Note**: A single query usually comprises at least as many requests @@ -427,6 +411,8 @@ impl Config { /// Sets the configuration for the k-buckets. /// /// * Default to K_VALUE. + /// + /// **WARNING**: setting a `size` higher that `K_VALUE` may imply additional memory allocations. pub fn set_kbucket_size(&mut self, size: NonZeroUsize) -> &mut Self { self.kbucket_config.set_bucket_size(size); self @@ -442,16 +428,17 @@ impl Config { self } - /// Sets the time to wait before calling [`Behaviour::bootstrap`] after a new peer is inserted in the routing table. - /// This prevent cascading bootstrap requests when multiple peers are inserted into the routing table "at the same time". - /// This also allows to wait a little bit for other potential peers to be inserted into the routing table before - /// triggering a bootstrap, giving more context to the future bootstrap request. + /// Sets the time to wait before calling [`Behaviour::bootstrap`] after a new peer is inserted + /// in the routing table. This prevent cascading bootstrap requests when multiple peers are + /// inserted into the routing table "at the same time". This also allows to wait a little + /// bit for other potential peers to be inserted into the routing table before triggering a + /// bootstrap, giving more context to the future bootstrap request. /// /// * Default to `500` ms. - /// * Set to `Some(Duration::ZERO)` to never wait before triggering a bootstrap request when a new peer - /// is inserted in the routing table. - /// * Set to `None` to disable automatic bootstrap (no bootstrap request will be triggered when a new - /// peer is inserted in the routing table). + /// * Set to `Some(Duration::ZERO)` to never wait before triggering a bootstrap request when a + /// new peer is inserted in the routing table. + /// * Set to `None` to disable automatic bootstrap (no bootstrap request will be triggered when + /// a new peer is inserted in the routing table). #[cfg(test)] pub(crate) fn set_automatic_bootstrap_throttle( &mut self, @@ -573,15 +560,13 @@ where /// /// Explicitly adding addresses of peers serves two purposes: /// - /// 1. In order for a node to join the DHT, it must know about at least - /// one other node of the DHT. + /// 1. In order for a node to join the DHT, it must know about at least one other node of the + /// DHT. /// - /// 2. When a remote peer initiates a connection and that peer is not - /// yet in the routing table, the `Kademlia` behaviour must be - /// informed of an address on which that peer is listening for - /// connections before it can be added to the routing table - /// from where it can subsequently be discovered by all peers - /// in the DHT. + /// 2. When a remote peer initiates a connection and that peer is not yet in the routing + /// table, the `Kademlia` behaviour must be informed of an address on which that peer is + /// listening for connections before it can be added to the routing table from where it can + /// subsequently be discovered by all peers in the DHT. /// /// If the routing table has been updated as a result of this operation, /// a [`Event::RoutingUpdated`] event is emitted. @@ -983,7 +968,8 @@ where /// /// > **Note**: Bootstrap does not require to be called manually. It is periodically /// > invoked at regular intervals based on the configured `periodic_bootstrap_interval` (see - /// > [`Config::set_periodic_bootstrap_interval`] for details) and it is also automatically invoked + /// > [`Config::set_periodic_bootstrap_interval`] for details) and it is also automatically + /// > invoked /// > when a new peer is inserted in the routing table. /// > This parameter is used to call [`Behaviour::bootstrap`] periodically and automatically /// > to ensure a healthy routing table. @@ -1107,10 +1093,12 @@ where /// Set the [`Mode`] in which we should operate. /// - /// By default, we are in [`Mode::Client`] and will swap into [`Mode::Server`] as soon as we have a confirmed, external address via [`FromSwarm::ExternalAddrConfirmed`]. + /// By default, we are in [`Mode::Client`] and will swap into [`Mode::Server`] as soon as we + /// have a confirmed, external address via [`FromSwarm::ExternalAddrConfirmed`]. /// - /// Setting a mode via this function disables this automatic behaviour and unconditionally operates in the specified mode. - /// To reactivate the automatic configuration, pass [`None`] instead. + /// Setting a mode via this function disables this automatic behaviour and unconditionally + /// operates in the specified mode. To reactivate the automatic configuration, pass [`None`] + /// instead. pub fn set_mode(&mut self, mode: Option) { match mode { Some(mode) => { @@ -1191,8 +1179,8 @@ where "Previous match arm handled empty list" ); - // Previously, server-mode, now also server-mode because > 1 external address. Don't log anything to avoid spam. - + // Previously, server-mode, now also server-mode because > 1 external address. + // Don't log anything to avoid spam. Mode::Server } }; @@ -2157,7 +2145,8 @@ where } } - /// Preloads a new [`Handler`] with requests that are waiting to be sent to the newly connected peer. + /// Preloads a new [`Handler`] with requests that are waiting + /// to be sent to the newly connected peer. fn preload_new_handler( &mut self, handler: &mut Handler, @@ -2755,7 +2744,6 @@ pub struct PeerRecord { #[allow(clippy::large_enum_variant)] pub enum Event { /// An inbound request has been received and handled. - // // Note on the difference between 'request' and 'query': A request is a // single request-response style exchange with a single remote peer. A query // is made of multiple requests across multiple remote peers. diff --git a/protocols/kad/src/behaviour/test.rs b/protocols/kad/src/behaviour/test.rs index 7409168ac2a..ab8c980c30c 100644 --- a/protocols/kad/src/behaviour/test.rs +++ b/protocols/kad/src/behaviour/test.rs @@ -20,10 +20,6 @@ #![cfg(test)] -use super::*; - -use crate::record::{store::MemoryStore, Key}; -use crate::{K_VALUE, PROTOCOL_NAME, SHA_256_MH}; use futures::{executor::block_on, future::poll_fn, prelude::*}; use futures_timer::Delay; use libp2p_core::{ @@ -39,6 +35,12 @@ use libp2p_yamux as yamux; use quickcheck::*; use rand::{random, rngs::StdRng, thread_rng, Rng, SeedableRng}; +use super::*; +use crate::{ + record::{store::MemoryStore, Key}, + K_VALUE, PROTOCOL_NAME, SHA_256_MH, +}; + type TestSwarm = Swarm>; fn build_node() -> (Multiaddr, TestSwarm) { @@ -62,8 +64,7 @@ fn build_node_with_config(cfg: Config) -> (Multiaddr, TestSwarm) { transport, behaviour, local_id, - swarm::Config::with_async_std_executor() - .with_idle_connection_timeout(Duration::from_secs(5)), + swarm::Config::with_async_std_executor(), ); let address: Multiaddr = Protocol::Memory(random::()).into(); @@ -164,7 +165,8 @@ fn bootstrap() { let num_group = rng.gen_range(1..(num_total % K_VALUE.get()) + 2); let mut cfg = Config::new(PROTOCOL_NAME); - // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering automatically. + // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from + // triggering automatically. cfg.set_periodic_bootstrap_interval(None); cfg.set_automatic_bootstrap_throttle(None); if rng.gen() { @@ -246,7 +248,8 @@ fn query_iter() { fn run(rng: &mut impl Rng) { let num_total = rng.gen_range(2..20); let mut config = Config::new(PROTOCOL_NAME); - // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering automatically. + // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from + // triggering automatically. config.set_periodic_bootstrap_interval(None); config.set_automatic_bootstrap_throttle(None); let mut swarms = build_connected_nodes_with_config(num_total, 1, config) @@ -320,9 +323,7 @@ fn query_iter() { #[test] fn unresponsive_not_returned_direct() { - let _ = tracing_subscriber::fmt() - .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); // Build one node. It contains fake addresses to non-existing nodes. We ask it to find a // random peer. We make sure that no fake address is returned. @@ -561,7 +562,8 @@ fn put_record() { let mut config = Config::new(PROTOCOL_NAME); config.set_replication_factor(replication_factor); - // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering automatically. + // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from + // triggering automatically. config.set_periodic_bootstrap_interval(None); config.set_automatic_bootstrap_throttle(None); if rng.gen() { @@ -933,7 +935,8 @@ fn add_provider() { let mut config = Config::new(PROTOCOL_NAME); config.set_replication_factor(replication_factor); - // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering automatically. + // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from + // triggering automatically. config.set_periodic_bootstrap_interval(None); config.set_automatic_bootstrap_throttle(None); if rng.gen() { @@ -1161,7 +1164,8 @@ fn disjoint_query_does_not_finish_before_all_paths_did() { config.disjoint_query_paths(true); // I.e. setting the amount disjoint paths to be explored to 2. config.set_parallelism(NonZeroUsize::new(2).unwrap()); - // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering automatically. + // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering + // automatically. config.set_periodic_bootstrap_interval(None); config.set_automatic_bootstrap_throttle(None); @@ -1375,7 +1379,7 @@ fn network_behaviour_on_address_change() { port_use: PortUse::Reuse, }; - // Mimick a connection being established. + // Mimic a connection being established. kademlia.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id: remote_peer_id, connection_id, @@ -1397,7 +1401,7 @@ fn network_behaviour_on_address_change() { .unwrap() .is_empty()); - // Mimick the connection handler confirming the protocol for + // Mimic the connection handler confirming the protocol for // the test connection, so that the peer is added to the routing table. kademlia.on_connection_handler_event( remote_peer_id, diff --git a/protocols/kad/src/bootstrap.rs b/protocols/kad/src/bootstrap.rs index 40acdfd88ee..d6576a3ef54 100644 --- a/protocols/kad/src/bootstrap.rs +++ b/protocols/kad/src/bootstrap.rs @@ -1,7 +1,9 @@ -use futures::FutureExt; -use std::task::{Context, Poll, Waker}; -use std::time::Duration; +use std::{ + task::{Context, Poll, Waker}, + time::Duration, +}; +use futures::FutureExt; use futures_timer::Delay; /// Default value chosen at ``. @@ -9,18 +11,18 @@ pub(crate) const DEFAULT_AUTOMATIC_THROTTLE: Duration = Duration::from_millis(50 #[derive(Debug)] pub(crate) struct Status { - /// If the user did not disable periodic bootstrap (by providing `None` for `periodic_interval`) - /// this is the periodic interval and the delay of the current period. When `Delay` finishes, - /// a bootstrap will be triggered and the `Delay` will be reset. + /// If the user did not disable periodic bootstrap (by providing `None` for + /// `periodic_interval`) this is the periodic interval and the delay of the current period. + /// When `Delay` finishes, a bootstrap will be triggered and the `Delay` will be reset. interval_and_delay: Option<(Duration, Delay)>, /// Configured duration to wait before triggering a bootstrap when a new peer /// is inserted in the routing table. `None` if automatic bootstrap is disabled. automatic_throttle: Option, /// Timer that will be set (if automatic bootstrap is not disabled) when a new peer is inserted - /// in the routing table. When it finishes, it will trigger a bootstrap and will be set to `None` - /// again. If an other new peer is inserted in the routing table before this timer finishes, - /// the timer is reset. + /// in the routing table. When it finishes, it will trigger a bootstrap and will be set to + /// `None` again. If an other new peer is inserted in the routing table before this timer + /// finishes, the timer is reset. throttle_timer: Option, /// Number of bootstrap requests currently in progress. We ensure neither periodic bootstrap @@ -108,16 +110,19 @@ impl Status { // A `throttle_timer` has been registered. It means one or more peers have been // inserted into the routing table and that a bootstrap request should be triggered. // However, to not risk cascading bootstrap requests, we wait a little time to ensure - // the user will not add more peers in the routing table in the next "throttle_timer" remaining. + // the user will not add more peers in the routing table in the next "throttle_timer" + // remaining. if throttle_delay.poll_unpin(cx).is_ready() { // The `throttle_timer` is finished, triggering bootstrap right now. // The call to `on_started` will reset `throttle_delay`. return Poll::Ready(()); } - // The `throttle_timer` is not finished but the periodic interval for triggering bootstrap might be reached. + // The `throttle_timer` is not finished but the periodic interval for triggering + // bootstrap might be reached. } else { - // No new peer has recently been inserted into the routing table or automatic bootstrap is disabled. + // No new peer has recently been inserted into the routing table or automatic bootstrap + // is disabled. } // Checking if the user has enabled the periodic bootstrap feature. @@ -131,7 +136,8 @@ impl Status { // The user disabled periodic bootstrap. } - // Registering the `waker` so that we can wake up when calling `on_new_peer_in_routing_table`. + // Registering the `waker` so that we can wake up when calling + // `on_new_peer_in_routing_table`. self.waker = Some(cx.waker().clone()); Poll::Pending } @@ -175,9 +181,10 @@ impl futures::Future for ThrottleTimer { #[cfg(test)] mod tests { - use super::*; use web_time::Instant; + use super::*; + const MS_5: Duration = Duration::from_millis(5); const MS_100: Duration = Duration::from_millis(100); @@ -296,7 +303,8 @@ mod tests { let elapsed = Instant::now().duration_since(start); - assert!(elapsed > (i * MS_100 - Duration::from_millis(10))); // Subtract 10ms to avoid flakes. + // Subtract 10ms to avoid flakes. + assert!(elapsed > (i * MS_100 - Duration::from_millis(10))); } } @@ -308,7 +316,8 @@ mod tests { status.trigger(); for _ in 0..10 { Delay::new(MS_100 / 2).await; - status.trigger(); // should reset throttle_timer + // should reset throttle_timer + status.trigger(); } assert!( status.next().now_or_never().is_none(), @@ -330,9 +339,12 @@ mod tests { ) { let mut status = Status::new(Some(MS_100), None); - status.on_started(); // first manually triggering - status.on_started(); // second manually triggering - status.on_finish(); // one finishes + // first manually triggering + status.on_started(); + // second manually triggering + status.on_started(); + // one finishes + status.on_finish(); assert!( async_std::future::timeout(10 * MS_100, status.next()) diff --git a/protocols/kad/src/handler.rs b/protocols/kad/src/handler.rs index 384ebc3f2b1..b848cf94410 100644 --- a/protocols/kad/src/handler.rs +++ b/protocols/kad/src/handler.rs @@ -18,27 +18,33 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::behaviour::Mode; -use crate::protocol::{ - KadInStreamSink, KadOutStreamSink, KadPeer, KadRequestMsg, KadResponseMsg, ProtocolConfig, +use std::{ + collections::VecDeque, + error, fmt, io, + marker::PhantomData, + pin::Pin, + task::{Context, Poll, Waker}, + time::Duration, }; -use crate::record::{self, Record}; -use crate::QueryId; + use either::Either; -use futures::channel::oneshot; -use futures::prelude::*; -use futures::stream::SelectAll; +use futures::{channel::oneshot, prelude::*, stream::SelectAll}; use libp2p_core::{upgrade, ConnectedPoint}; use libp2p_identity::PeerId; -use libp2p_swarm::handler::{ConnectionEvent, FullyNegotiatedInbound, FullyNegotiatedOutbound}; use libp2p_swarm::{ + handler::{ConnectionEvent, FullyNegotiatedInbound, FullyNegotiatedOutbound}, ConnectionHandler, ConnectionHandlerEvent, Stream, StreamUpgradeError, SubstreamProtocol, SupportedProtocols, }; -use std::collections::VecDeque; -use std::task::Waker; -use std::time::Duration; -use std::{error, fmt, io, marker::PhantomData, pin::Pin, task::Context, task::Poll}; + +use crate::{ + behaviour::Mode, + protocol::{ + KadInStreamSink, KadOutStreamSink, KadPeer, KadRequestMsg, KadResponseMsg, ProtocolConfig, + }, + record::{self, Record}, + QueryId, +}; const MAX_NUM_STREAMS: usize = 32; @@ -470,10 +476,7 @@ impl Handler { FullyNegotiatedOutbound { protocol: stream, info: (), - }: FullyNegotiatedOutbound< - ::OutboundProtocol, - ::OutboundOpenInfo, - >, + }: FullyNegotiatedOutbound<::OutboundProtocol>, ) { if let Some(sender) = self.pending_streams.pop_front() { let _ = sender.send(Ok(stream)); @@ -494,7 +497,6 @@ impl Handler { &mut self, FullyNegotiatedInbound { protocol, .. }: FullyNegotiatedInbound< ::InboundProtocol, - ::InboundOpenInfo, >, ) { // If `self.allow_listening` is false, then we produced a `DeniedUpgrade` and `protocol` @@ -550,7 +552,8 @@ impl Handler { }); } - /// Takes the given [`KadRequestMsg`] and composes it into an outbound request-response protocol handshake using a [`oneshot::channel`]. + /// Takes the given [`KadRequestMsg`] and composes it into an outbound request-response protocol + /// handshake using a [`oneshot::channel`]. fn queue_new_stream(&mut self, id: QueryId, msg: KadRequestMsg) { let (sender, receiver) = oneshot::channel(); @@ -601,7 +604,7 @@ impl ConnectionHandler for Handler { type OutboundOpenInfo = (); type InboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { match self.mode { Mode::Server => SubstreamProtocol::new(Either::Left(self.protocol_config.clone()), ()), Mode::Client => SubstreamProtocol::new(Either::Right(upgrade::DeniedUpgrade), ()), @@ -712,9 +715,7 @@ impl ConnectionHandler for Handler { fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent, - > { + ) -> Poll> { loop { match &mut self.protocol_status { Some(status) if !status.reported => { @@ -781,12 +782,7 @@ impl ConnectionHandler for Handler { fn on_connection_event( &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + event: ConnectionEvent, ) { match event { ConnectionEvent::FullyNegotiatedOutbound(fully_negotiated_outbound) => { @@ -1060,9 +1056,9 @@ fn process_kad_response(event: KadResponseMsg, query_id: QueryId) -> HandlerEven #[cfg(test)] mod tests { - use super::*; use quickcheck::{Arbitrary, Gen}; - use tracing_subscriber::EnvFilter; + + use super::*; impl Arbitrary for ProtocolStatus { fn arbitrary(g: &mut Gen) -> Self { @@ -1075,9 +1071,7 @@ mod tests { #[test] fn compute_next_protocol_status_test() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); fn prop(now_supported: bool, current: Option) { let new = compute_new_protocol_status(now_supported, current); diff --git a/protocols/kad/src/jobs.rs b/protocols/kad/src/jobs.rs index 537f652b7a4..56b3e080d96 100644 --- a/protocols/kad/src/jobs.rs +++ b/protocols/kad/src/jobs.rs @@ -25,12 +25,11 @@ //! To ensure persistence of records in the DHT, a Kademlia node //! must periodically (re-)publish and (re-)replicate its records: //! -//! 1. (Re-)publishing: The original publisher or provider of a record -//! must regularly re-publish in order to prolong the expiration. +//! 1. (Re-)publishing: The original publisher or provider of a record must regularly re-publish +//! in order to prolong the expiration. //! -//! 2. (Re-)replication: Every node storing a replica of a record must -//! regularly re-replicate it to the closest nodes to the key in -//! order to ensure the record is present at these nodes. +//! 2. (Re-)replication: Every node storing a replica of a record must regularly re-replicate it +//! to the closest nodes to the key in order to ensure the record is present at these nodes. //! //! Re-publishing primarily ensures persistence of the record beyond its //! initial TTL, for as long as the publisher stores (or provides) the record, @@ -41,11 +40,10 @@ //! //! This module implements two periodic jobs: //! -//! * [`PutRecordJob`]: For (re-)publication and (re-)replication of -//! regular (value-)records. +//! * [`PutRecordJob`]: For (re-)publication and (re-)replication of regular (value-)records. //! -//! * [`AddProviderJob`]: For (re-)publication of provider records. -//! Provider records currently have no separate replication mechanism. +//! * [`AddProviderJob`]: For (re-)publication of provider records. Provider records currently +//! have no separate replication mechanism. //! //! A periodic job is driven like a `Future` or `Stream` by `poll`ing it. //! Once a job starts running it emits records to send to the `k` closest @@ -61,17 +59,21 @@ //! > to the size of all stored records. As a job runs, the records are moved //! > out of the job to the consumer, where they can be dropped after being sent. -use crate::record::{self, store::RecordStore, ProviderRecord, Record}; +use std::{ + collections::HashSet, + pin::Pin, + task::{Context, Poll}, + time::Duration, + vec, +}; + use futures::prelude::*; use futures_timer::Delay; use libp2p_identity::PeerId; -use std::collections::HashSet; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::time::Duration; -use std::vec; use web_time::Instant; +use crate::record::{self, store::RecordStore, ProviderRecord, Record}; + /// The maximum number of queries towards which background jobs /// are allowed to start new queries on an invocation of /// `Behaviour::poll`. @@ -203,7 +205,7 @@ impl PutRecordJob { T: RecordStore, { if self.inner.check_ready(cx, now) { - let publish = self.next_publish.map_or(false, |t_pub| now >= t_pub); + let publish = self.next_publish.is_some_and(|t_pub| now >= t_pub); let records = store .records() .filter_map(|r| { @@ -335,12 +337,13 @@ impl AddProviderJob { #[cfg(test)] mod tests { - use super::*; - use crate::record::store::MemoryStore; use futures::{executor::block_on, future::poll_fn}; use quickcheck::*; use rand::Rng; + use super::*; + use crate::record::store::MemoryStore; + fn rand_put_record_job() -> PutRecordJob { let mut rng = rand::thread_rng(); let id = PeerId::random(); diff --git a/protocols/kad/src/kbucket.rs b/protocols/kad/src/kbucket.rs index 28d7df03917..f32c34e9bb7 100644 --- a/protocols/kad/src/kbucket.rs +++ b/protocols/kad/src/kbucket.rs @@ -72,13 +72,12 @@ mod entry; #[allow(clippy::assign_op_pattern)] mod key; -pub use bucket::NodeStatus; -pub use entry::*; +use std::{collections::VecDeque, num::NonZeroUsize, time::Duration}; use bucket::KBucket; -use std::collections::VecDeque; -use std::num::NonZeroUsize; -use std::time::Duration; +pub use bucket::NodeStatus; +pub use entry::*; +use smallvec::SmallVec; use web_time::Instant; /// Maximum number of k-buckets. @@ -177,7 +176,7 @@ impl BucketIndex { let lower = usize::pow(2, rem); let upper = usize::pow(2, rem + 1); bytes[31 - quot] = rng.gen_range(lower..upper) as u8; - Distance(U256::from(bytes)) + Distance(U256::from_big_endian(bytes.as_slice())) } } @@ -284,11 +283,8 @@ where iter: None, table: self, buckets_iter: ClosestBucketsIter::new(distance), - fmap: move |b: &KBucket| -> Vec<_> { - let mut vec = Vec::with_capacity(bucket_size); - vec.extend(b.iter().map(|(n, _)| n.key.clone())); - vec - }, + fmap: |(n, _status): (&Node, NodeStatus)| n.key.clone(), + bucket_size, } } @@ -309,15 +305,11 @@ where iter: None, table: self, buckets_iter: ClosestBucketsIter::new(distance), - fmap: move |b: &KBucket<_, TVal>| -> Vec<_> { - b.iter() - .take(bucket_size) - .map(|(n, status)| EntryView { - node: n.clone(), - status, - }) - .collect() + fmap: |(n, status): (&Node, NodeStatus)| EntryView { + node: n.clone(), + status, }, + bucket_size, } } @@ -360,10 +352,12 @@ struct ClosestIter<'a, TTarget, TKey, TVal, TMap, TOut> { /// distance of the local key to the target. buckets_iter: ClosestBucketsIter, /// The iterator over the entries in the currently traversed bucket. - iter: Option>, + iter: Option>, /// The projection function / mapping applied on each bucket as /// it is encountered, producing the next `iter`ator. fmap: TMap, + /// The maximal number of nodes that a bucket can contain. + bucket_size: usize, } /// An iterator over the bucket indices, in the order determined by the `Distance` of @@ -465,41 +459,80 @@ where TTarget: AsRef, TKey: Clone + AsRef, TVal: Clone, - TMap: Fn(&KBucket) -> Vec, + TMap: Fn((&Node, NodeStatus)) -> TOut, TOut: AsRef, { type Item = TOut; fn next(&mut self) -> Option { loop { - match &mut self.iter { - Some(iter) => match iter.next() { - Some(k) => return Some(k), - None => self.iter = None, - }, - None => { - if let Some(i) = self.buckets_iter.next() { - let bucket = &mut self.table.buckets[i.get()]; - if let Some(applied) = bucket.apply_pending() { - self.table.applied_pending.push_back(applied) - } - let mut v = (self.fmap)(bucket); - v.sort_by(|a, b| { - self.target - .as_ref() - .distance(a.as_ref()) - .cmp(&self.target.as_ref().distance(b.as_ref())) - }); - self.iter = Some(v.into_iter()); - } else { - return None; - } + let (mut buffer, bucket_index) = if let Some(mut iter) = self.iter.take() { + if let Some(next) = iter.next() { + self.iter = Some(iter); + return Some(next); } + + let bucket_index = self.buckets_iter.next()?; + + // Reusing the same buffer so if there were any allocation, it only happen once over + // a `ClosestIter` life. + iter.buffer.clear(); + + (iter.buffer, bucket_index) + } else { + let bucket_index = self.buckets_iter.next()?; + + // Allocation only occurs if `kbucket_size` is greater than `K_VALUE`. + (SmallVec::with_capacity(self.bucket_size), bucket_index) + }; + + let bucket = &mut self.table.buckets[bucket_index.get()]; + if let Some(applied) = bucket.apply_pending() { + self.table.applied_pending.push_back(applied) } + + buffer.extend( + bucket + .iter() + .take(self.bucket_size) + .map(|e| (self.fmap)(e)) + .map(Some), + ); + buffer.sort_by(|a, b| { + let a = a.as_ref().expect("just initialized"); + let b = b.as_ref().expect("just initialized"); + self.target + .as_ref() + .distance(a.as_ref()) + .cmp(&self.target.as_ref().distance(b.as_ref())) + }); + + self.iter = Some(ClosestIterBuffer::new(buffer)); } } } +struct ClosestIterBuffer { + buffer: SmallVec<[Option; K_VALUE.get()]>, + index: usize, +} + +impl ClosestIterBuffer { + fn new(buffer: SmallVec<[Option; K_VALUE.get()]>) -> Self { + Self { buffer, index: 0 } + } +} + +impl Iterator for ClosestIterBuffer { + type Item = TOut; + + fn next(&mut self) -> Option { + let entry = self.buffer.get_mut(self.index)?; + self.index += 1; + entry.take() + } +} + /// A reference to a bucket. pub struct KBucketRef<'a, TKey, TVal> { index: BucketIndex, @@ -529,12 +562,12 @@ where /// Returns true if the bucket has a pending node. pub fn has_pending(&self) -> bool { - self.bucket.pending().map_or(false, |n| !n.is_ready()) + self.bucket.pending().is_some_and(|n| !n.is_ready()) } /// Tests whether the given distance falls into this bucket. pub fn contains(&self, d: &Distance) -> bool { - BucketIndex::new(d).map_or(false, |i| i == self.index) + BucketIndex::new(d).is_some_and(|i| i == self.index) } /// Generates a random distance that falls into this bucket. @@ -561,10 +594,11 @@ where #[cfg(test)] mod tests { - use super::*; use libp2p_identity::PeerId; use quickcheck::*; + use super::*; + type TestTable = KBucketsTable; impl Arbitrary for TestTable { @@ -651,7 +685,7 @@ mod tests { fn rand_distance() { fn prop(ix: u8) -> bool { let d = BucketIndex(ix as usize).rand_distance(&mut rand::thread_rng()); - let n = U256::from(<[u8; 32]>::from(d.0)); + let n = d.0; let b = U256::from(2); let e = U256::from(ix); let lower = b.pow(e); diff --git a/protocols/kad/src/kbucket/bucket.rs b/protocols/kad/src/kbucket/bucket.rs index 1426017aa7a..cbc6adf4d5c 100644 --- a/protocols/kad/src/kbucket/bucket.rs +++ b/protocols/kad/src/kbucket/bucket.rs @@ -225,8 +225,8 @@ where // The bucket is full with connected nodes. Drop the pending node. return None; } - debug_assert!(self.first_connected_pos.map_or(true, |p| p > 0)); // (*) - // The pending node will be inserted. + debug_assert!(self.first_connected_pos.is_none_or(|p| p > 0)); // (*) + // The pending node will be inserted. let inserted = pending.node.clone(); // A connected pending node goes at the end of the list for // the connected peers, removing the least-recently connected. @@ -311,19 +311,18 @@ where /// /// The status of the node to insert determines the result as follows: /// - /// * `NodeStatus::Connected`: If the bucket is full and either all nodes are connected - /// or there is already a pending node, insertion fails with `InsertResult::Full`. - /// If the bucket is full but at least one node is disconnected and there is no pending - /// node, the new node is inserted as pending, yielding `InsertResult::Pending`. - /// Otherwise the bucket has free slots and the new node is added to the end of the - /// bucket as the most-recently connected node. + /// * `NodeStatus::Connected`: If the bucket is full and either all nodes are connected or + /// there is already a pending node, insertion fails with `InsertResult::Full`. If the + /// bucket is full but at least one node is disconnected and there is no pending node, the + /// new node is inserted as pending, yielding `InsertResult::Pending`. Otherwise the bucket + /// has free slots and the new node is added to the end of the bucket as the most-recently + /// connected node. /// /// * `NodeStatus::Disconnected`: If the bucket is full, insertion fails with - /// `InsertResult::Full`. Otherwise the bucket has free slots and the new node - /// is inserted at the position preceding the first connected node, - /// i.e. as the most-recently disconnected node. If there are no connected nodes, - /// the new node is added as the last element of the bucket. - /// + /// `InsertResult::Full`. Otherwise the bucket has free slots and the new node is inserted + /// at the position preceding the first connected node, i.e. as the most-recently + /// disconnected node. If there are no connected nodes, the new node is added as the last + /// element of the bucket. pub(crate) fn insert( &mut self, node: Node, @@ -377,7 +376,7 @@ where // Adjust `first_connected_pos` accordingly. match status { NodeStatus::Connected => { - if self.first_connected_pos.map_or(false, |p| p == pos.0) + if self.first_connected_pos.is_some_and(|p| p == pos.0) && pos.0 == self.nodes.len() { // It was the last connected node. @@ -398,7 +397,7 @@ where /// Returns the status of the node at the given position. pub(crate) fn status(&self, pos: Position) -> NodeStatus { - if self.first_connected_pos.map_or(false, |i| pos.0 >= i) { + if self.first_connected_pos.is_some_and(|i| pos.0 >= i) { NodeStatus::Connected } else { NodeStatus::Disconnected @@ -443,10 +442,11 @@ where #[cfg(test)] mod tests { - use super::*; use libp2p_identity::PeerId; use quickcheck::*; + use super::*; + impl Arbitrary for KBucket, ()> { fn arbitrary(g: &mut Gen) -> KBucket, ()> { let timeout = Duration::from_secs(g.gen_range(1..g.size()) as u64); diff --git a/protocols/kad/src/kbucket/entry.rs b/protocols/kad/src/kbucket/entry.rs index 808db08d858..bdf8b9b5a18 100644 --- a/protocols/kad/src/kbucket/entry.rs +++ b/protocols/kad/src/kbucket/entry.rs @@ -23,7 +23,6 @@ pub(crate) use super::bucket::{AppliedPending, InsertResult, Node, K_VALUE}; pub use super::key::*; - use super::*; /// An immutable by-reference view of a bucket entry. diff --git a/protocols/kad/src/kbucket/key.rs b/protocols/kad/src/kbucket/key.rs index f35849c6b26..ce14a3f779a 100644 --- a/protocols/kad/src/kbucket/key.rs +++ b/protocols/kad/src/kbucket/key.rs @@ -18,18 +18,24 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::record; +use std::{ + borrow::Borrow, + hash::{Hash, Hasher}, +}; + use libp2p_core::multihash::Multihash; use libp2p_identity::PeerId; -use sha2::digest::generic_array::{typenum::U32, GenericArray}; -use sha2::{Digest, Sha256}; -use std::borrow::Borrow; -use std::hash::{Hash, Hasher}; +use sha2::{ + digest::generic_array::{typenum::U32, GenericArray}, + Digest, Sha256, +}; use uint::*; +use crate::record; + construct_uint! { /// 256-bit unsigned integer. - pub(super) struct U256(4); + pub struct U256(4); } /// A `Key` in the DHT keyspace with preserved preimage. @@ -163,8 +169,8 @@ impl KeyBytes { where U: AsRef, { - let a = U256::from(self.0.as_slice()); - let b = U256::from(other.as_ref().0.as_slice()); + let a = U256::from_big_endian(self.0.as_slice()); + let b = U256::from_big_endian(other.as_ref().0.as_slice()); Distance(a ^ b) } @@ -174,8 +180,8 @@ impl KeyBytes { /// /// `self xor other = distance <==> other = self xor distance` pub fn for_distance(&self, d: Distance) -> KeyBytes { - let key_int = U256::from(self.0.as_slice()) ^ d.0; - KeyBytes(GenericArray::from(<[u8; 32]>::from(key_int))) + let key_int = U256::from_big_endian(self.0.as_slice()) ^ d.0; + KeyBytes(GenericArray::from(key_int.to_big_endian())) } } @@ -187,7 +193,7 @@ impl AsRef for KeyBytes { /// A distance between two keys in the DHT keyspace. #[derive(Copy, Clone, PartialEq, Eq, Default, PartialOrd, Ord, Debug)] -pub struct Distance(pub(super) U256); +pub struct Distance(pub U256); impl Distance { /// Returns the integer part of the base 2 logarithm of the [`Distance`]. @@ -200,9 +206,10 @@ impl Distance { #[cfg(test)] mod tests { + use quickcheck::*; + use super::*; use crate::SHA_256_MH; - use quickcheck::*; impl Arbitrary for Key { fn arbitrary(_: &mut Gen) -> Key { diff --git a/protocols/kad/src/lib.rs b/protocols/kad/src/lib.rs index 060bfc518e4..91983b9aaf7 100644 --- a/protocols/kad/src/lib.rs +++ b/protocols/kad/src/lib.rs @@ -54,36 +54,34 @@ mod proto { }; } +use std::num::NonZeroUsize; + pub use addresses::Addresses; pub use behaviour::{ AddProviderContext, AddProviderError, AddProviderOk, AddProviderPhase, AddProviderResult, - BootstrapError, BootstrapOk, BootstrapResult, GetClosestPeersError, GetClosestPeersOk, - GetClosestPeersResult, GetProvidersError, GetProvidersOk, GetProvidersResult, GetRecordError, - GetRecordOk, GetRecordResult, InboundRequest, Mode, NoKnownPeers, PeerInfo, PeerRecord, - PutRecordContext, PutRecordError, PutRecordOk, PutRecordPhase, PutRecordResult, QueryInfo, - QueryMut, QueryRef, QueryResult, QueryStats, RoutingUpdate, -}; -pub use behaviour::{ - Behaviour, BucketInserts, Caching, Config, Event, ProgressStep, Quorum, StoreInserts, + Behaviour, BootstrapError, BootstrapOk, BootstrapResult, BucketInserts, Caching, Config, Event, + GetClosestPeersError, GetClosestPeersOk, GetClosestPeersResult, GetProvidersError, + GetProvidersOk, GetProvidersResult, GetRecordError, GetRecordOk, GetRecordResult, + InboundRequest, Mode, NoKnownPeers, PeerInfo, PeerRecord, ProgressStep, PutRecordContext, + PutRecordError, PutRecordOk, PutRecordPhase, PutRecordResult, QueryInfo, QueryMut, QueryRef, + QueryResult, QueryStats, Quorum, RoutingUpdate, StoreInserts, }; pub use kbucket::{ - Distance as KBucketDistance, EntryView, KBucketRef, Key as KBucketKey, NodeStatus, + Distance as KBucketDistance, EntryView, KBucketRef, Key as KBucketKey, NodeStatus, U256, }; +use libp2p_swarm::StreamProtocol; pub use protocol::{ConnectionType, KadPeer}; pub use query::QueryId; pub use record::{store, Key as RecordKey, ProviderRecord, Record}; -use libp2p_swarm::StreamProtocol; -use std::num::NonZeroUsize; - /// The `k` parameter of the Kademlia specification. /// /// This parameter determines: /// /// 1) The (fixed) maximum number of nodes in a bucket. -/// 2) The (default) replication factor, which in turn determines: -/// a) The number of closer peers returned in response to a request. -/// b) The number of closest peers to a key to search for in an iterative query. +/// 2) The (default) replication factor, which in turn determines: a) The number of closer peers +/// returned in response to a request. b) The number of closest peers to a key to search for in +/// an iterative query. /// /// The choice of (1) is fixed to this constant. The replication factor is configurable /// but should generally be no greater than `K_VALUE`. All nodes in a Kademlia diff --git a/protocols/kad/src/protocol.rs b/protocols/kad/src/protocol.rs index 9d2ef56f5d8..059b6ae6fd1 100644 --- a/protocols/kad/src/protocol.rs +++ b/protocols/kad/src/protocol.rs @@ -26,21 +26,25 @@ //! to poll the underlying transport for incoming messages, and the `Sink` component //! is used to send messages to remote peers. -use crate::proto; -use crate::record::{self, Record}; +use std::{io, marker::PhantomData, time::Duration}; + use asynchronous_codec::{Decoder, Encoder, Framed}; use bytes::BytesMut; use futures::prelude::*; -use libp2p_core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; -use libp2p_core::Multiaddr; +use libp2p_core::{ + upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}, + Multiaddr, +}; use libp2p_identity::PeerId; use libp2p_swarm::StreamProtocol; -use std::marker::PhantomData; -use std::time::Duration; -use std::{io, iter}; use tracing::debug; use web_time::Instant; +use crate::{ + proto, + record::{self, Record}, +}; + /// The protocol name used for negotiating with multistream-select. pub(crate) const DEFAULT_PROTO_NAME: StreamProtocol = StreamProtocol::new("/ipfs/kad/1.0.0"); /// The default maximum size for a varint length-delimited packet. @@ -152,43 +156,17 @@ impl ProtocolConfig { } } - /// Returns the default configuration. - #[deprecated(note = "Use `ProtocolConfig::new` instead")] - #[allow(clippy::should_implement_trait)] - pub fn default() -> Self { - Default::default() - } - /// Returns the configured protocol name. pub fn protocol_names(&self) -> &[StreamProtocol] { &self.protocol_names } - /// Modifies the protocol names used on the wire. Can be used to create incompatibilities - /// between networks on purpose. - #[deprecated(note = "Use `ProtocolConfig::new` instead")] - pub fn set_protocol_names(&mut self, names: Vec) { - self.protocol_names = names; - } - /// Modifies the maximum allowed size of a single Kademlia packet. pub fn set_max_packet_size(&mut self, size: usize) { self.max_packet_size = size; } } -impl Default for ProtocolConfig { - /// Returns the default configuration. - /// - /// Deprecated: use `ProtocolConfig::new` instead. - fn default() -> Self { - ProtocolConfig { - protocol_names: iter::once(DEFAULT_PROTO_NAME).collect(), - max_packet_size: DEFAULT_MAX_PACKET_SIZE, - } - } -} - impl UpgradeInfo for ProtocolConfig { type Info = StreamProtocol; type InfoIter = std::vec::IntoIter; @@ -667,92 +645,92 @@ mod tests { assert_eq!(peer.multiaddrs, vec![valid_multiaddr]) } - /*// TODO: restore - use self::libp2p_tcp::TcpTransport; - use self::tokio::runtime::current_thread::Runtime; - use futures::{Future, Sink, Stream}; - use libp2p_core::{PeerId, PublicKey, Transport}; - use multihash::{encode, Hash}; - use protocol::{ConnectionType, KadPeer, ProtocolConfig}; - use std::sync::mpsc; - use std::thread; - - #[test] - fn correct_transfer() { - // We open a server and a client, send a message between the two, and check that they were - // successfully received. - - test_one(KadMsg::Ping); - test_one(KadMsg::FindNodeReq { - key: PeerId::random(), - }); - test_one(KadMsg::FindNodeRes { - closer_peers: vec![KadPeer { - node_id: PeerId::random(), - multiaddrs: vec!["/ip4/100.101.102.103/tcp/20105".parse().unwrap()], - connection_ty: ConnectionType::Connected, - }], - }); - test_one(KadMsg::GetProvidersReq { - key: encode(Hash::SHA2256, &[9, 12, 0, 245, 245, 201, 28, 95]).unwrap(), - }); - test_one(KadMsg::GetProvidersRes { - closer_peers: vec![KadPeer { - node_id: PeerId::random(), - multiaddrs: vec!["/ip4/100.101.102.103/tcp/20105".parse().unwrap()], - connection_ty: ConnectionType::Connected, - }], - provider_peers: vec![KadPeer { - node_id: PeerId::random(), - multiaddrs: vec!["/ip4/200.201.202.203/tcp/1999".parse().unwrap()], - connection_ty: ConnectionType::NotConnected, - }], - }); - test_one(KadMsg::AddProvider { - key: encode(Hash::SHA2256, &[9, 12, 0, 245, 245, 201, 28, 95]).unwrap(), - provider_peer: KadPeer { - node_id: PeerId::random(), - multiaddrs: vec!["/ip4/9.1.2.3/udp/23".parse().unwrap()], - connection_ty: ConnectionType::Connected, - }, - }); - // TODO: all messages - - fn test_one(msg_server: KadMsg) { - let msg_client = msg_server.clone(); - let (tx, rx) = mpsc::channel(); - - let bg_thread = thread::spawn(move || { - let transport = TcpTransport::default().with_upgrade(ProtocolConfig); - - let (listener, addr) = transport - .listen_on( "/ip4/127.0.0.1/tcp/0".parse().unwrap()) - .unwrap(); - tx.send(addr).unwrap(); - - let future = listener - .into_future() - .map_err(|(err, _)| err) - .and_then(|(client, _)| client.unwrap().0) - .and_then(|proto| proto.into_future().map_err(|(err, _)| err).map(|(v, _)| v)) - .map(|recv_msg| { - assert_eq!(recv_msg.unwrap(), msg_server); - () - }); - let mut rt = Runtime::new().unwrap(); - let _ = rt.block_on(future).unwrap(); - }); - - let transport = TcpTransport::default().with_upgrade(ProtocolConfig); - - let future = transport - .dial(rx.recv().unwrap()) - .unwrap() - .and_then(|proto| proto.send(msg_client)) - .map(|_| ()); - let mut rt = Runtime::new().unwrap(); - let _ = rt.block_on(future).unwrap(); - bg_thread.join().unwrap(); - } - }*/ + // // TODO: restore + // use self::libp2p_tcp::TcpTransport; + // use self::tokio::runtime::current_thread::Runtime; + // use futures::{Future, Sink, Stream}; + // use libp2p_core::{PeerId, PublicKey, Transport}; + // use multihash::{encode, Hash}; + // use protocol::{ConnectionType, KadPeer, ProtocolConfig}; + // use std::sync::mpsc; + // use std::thread; + // + // #[test] + // fn correct_transfer() { + // We open a server and a client, send a message between the two, and check that they were + // successfully received. + // + // test_one(KadMsg::Ping); + // test_one(KadMsg::FindNodeReq { + // key: PeerId::random(), + // }); + // test_one(KadMsg::FindNodeRes { + // closer_peers: vec![KadPeer { + // node_id: PeerId::random(), + // multiaddrs: vec!["/ip4/100.101.102.103/tcp/20105".parse().unwrap()], + // connection_ty: ConnectionType::Connected, + // }], + // }); + // test_one(KadMsg::GetProvidersReq { + // key: encode(Hash::SHA2256, &[9, 12, 0, 245, 245, 201, 28, 95]).unwrap(), + // }); + // test_one(KadMsg::GetProvidersRes { + // closer_peers: vec![KadPeer { + // node_id: PeerId::random(), + // multiaddrs: vec!["/ip4/100.101.102.103/tcp/20105".parse().unwrap()], + // connection_ty: ConnectionType::Connected, + // }], + // provider_peers: vec![KadPeer { + // node_id: PeerId::random(), + // multiaddrs: vec!["/ip4/200.201.202.203/tcp/1999".parse().unwrap()], + // connection_ty: ConnectionType::NotConnected, + // }], + // }); + // test_one(KadMsg::AddProvider { + // key: encode(Hash::SHA2256, &[9, 12, 0, 245, 245, 201, 28, 95]).unwrap(), + // provider_peer: KadPeer { + // node_id: PeerId::random(), + // multiaddrs: vec!["/ip4/9.1.2.3/udp/23".parse().unwrap()], + // connection_ty: ConnectionType::Connected, + // }, + // }); + // TODO: all messages + // + // fn test_one(msg_server: KadMsg) { + // let msg_client = msg_server.clone(); + // let (tx, rx) = mpsc::channel(); + // + // let bg_thread = thread::spawn(move || { + // let transport = TcpTransport::default().with_upgrade(ProtocolConfig); + // + // let (listener, addr) = transport + // .listen_on( "/ip4/127.0.0.1/tcp/0".parse().unwrap()) + // .unwrap(); + // tx.send(addr).unwrap(); + // + // let future = listener + // .into_future() + // .map_err(|(err, _)| err) + // .and_then(|(client, _)| client.unwrap().0) + // .and_then(|proto| proto.into_future().map_err(|(err, _)| err).map(|(v, _)| v)) + // .map(|recv_msg| { + // assert_eq!(recv_msg.unwrap(), msg_server); + // () + // }); + // let mut rt = Runtime::new().unwrap(); + // let _ = rt.block_on(future).unwrap(); + // }); + // + // let transport = TcpTransport::default().with_upgrade(ProtocolConfig); + // + // let future = transport + // .dial(rx.recv().unwrap()) + // .unwrap() + // .and_then(|proto| proto.send(msg_client)) + // .map(|_| ()); + // let mut rt = Runtime::new().unwrap(); + // let _ = rt.block_on(future).unwrap(); + // bg_thread.join().unwrap(); + // } + // } } diff --git a/protocols/kad/src/query.rs b/protocols/kad/src/query.rs index 1a895d9627c..69257f73b26 100644 --- a/protocols/kad/src/query.rs +++ b/protocols/kad/src/query.rs @@ -20,24 +20,27 @@ mod peers; -use libp2p_core::Multiaddr; -use peers::closest::{ - disjoint::ClosestDisjointPeersIter, ClosestPeersIter, ClosestPeersIterConfig, -}; -use peers::fixed::FixedPeersIter; -use peers::PeersIterState; -use smallvec::SmallVec; +use std::{num::NonZeroUsize, time::Duration}; -use crate::behaviour::PeerInfo; -use crate::handler::HandlerIn; -use crate::kbucket::{Key, KeyBytes}; -use crate::{QueryInfo, ALPHA_VALUE, K_VALUE}; use either::Either; use fnv::FnvHashMap; +use libp2p_core::Multiaddr; use libp2p_identity::PeerId; -use std::{num::NonZeroUsize, time::Duration}; +use peers::{ + closest::{disjoint::ClosestDisjointPeersIter, ClosestPeersIter, ClosestPeersIterConfig}, + fixed::FixedPeersIter, + PeersIterState, +}; +use smallvec::SmallVec; use web_time::Instant; +use crate::{ + behaviour::PeerInfo, + handler::HandlerIn, + kbucket::{Key, KeyBytes}, + QueryInfo, ALPHA_VALUE, K_VALUE, +}; + /// A `QueryPool` provides an aggregate state machine for driving `Query`s to completion. /// /// Internally, a `Query` is in turn driven by an underlying `QueryPeerIter` diff --git a/protocols/kad/src/query/peers.rs b/protocols/kad/src/query/peers.rs index 11b8f974de9..fe8ada51e44 100644 --- a/protocols/kad/src/query/peers.rs +++ b/protocols/kad/src/query/peers.rs @@ -23,13 +23,11 @@ //! Using a peer iterator in a query involves performing the following steps //! repeatedly and in an alternating fashion: //! -//! 1. Calling `next` to observe the next state of the iterator and determine -//! what to do, which is to either issue new requests to peers or continue -//! waiting for responses. +//! 1. Calling `next` to observe the next state of the iterator and determine what to do, which is +//! to either issue new requests to peers or continue waiting for responses. //! -//! 2. When responses are received or requests fail, providing input to the -//! iterator via the `on_success` and `on_failure` callbacks, -//! respectively, followed by repeating step (1). +//! 2. When responses are received or requests fail, providing input to the iterator via the +//! `on_success` and `on_failure` callbacks, respectively, followed by repeating step (1). //! //! When a call to `next` returns [`Finished`], no more peers can be obtained //! from the iterator and the results can be obtained from `into_result`. @@ -40,9 +38,10 @@ pub(crate) mod closest; pub(crate) mod fixed; -use libp2p_identity::PeerId; use std::borrow::Cow; +use libp2p_identity::PeerId; + /// The state of a peer iterator. #[derive(Debug, Clone, PartialEq, Eq)] pub enum PeersIterState<'a> { diff --git a/protocols/kad/src/query/peers/closest.rs b/protocols/kad/src/query/peers/closest.rs index 2505ee2e9b2..2d1f91f050c 100644 --- a/protocols/kad/src/query/peers/closest.rs +++ b/protocols/kad/src/query/peers/closest.rs @@ -18,14 +18,20 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::*; +use std::{ + collections::btree_map::{BTreeMap, Entry}, + num::NonZeroUsize, + time::Duration, +}; -use crate::kbucket::{Distance, Key, KeyBytes}; -use crate::{ALPHA_VALUE, K_VALUE}; -use std::collections::btree_map::{BTreeMap, Entry}; -use std::{num::NonZeroUsize, time::Duration}; use web_time::Instant; +use super::*; +use crate::{ + kbucket::{Distance, Key, KeyBytes}, + ALPHA_VALUE, K_VALUE, +}; + pub(crate) mod disjoint; /// A peer iterator for a dynamically changing list of peers, sorted by increasing /// distance to a chosen target. @@ -494,12 +500,14 @@ enum PeerState { #[cfg(test)] mod tests { - use super::*; - use crate::SHA_256_MH; + use std::iter; + use libp2p_core::multihash::Multihash; use quickcheck::*; use rand::{rngs::StdRng, Rng, SeedableRng}; - use std::iter; + + use super::*; + use crate::SHA_256_MH; fn random_peers(n: usize, g: &mut R) -> Vec { (0..n) diff --git a/protocols/kad/src/query/peers/closest/disjoint.rs b/protocols/kad/src/query/peers/closest/disjoint.rs index cafe87b6ef4..a26a31b5215 100644 --- a/protocols/kad/src/query/peers/closest/disjoint.rs +++ b/protocols/kad/src/query/peers/closest/disjoint.rs @@ -18,13 +18,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::*; use std::{ collections::HashMap, iter::{Cycle, Map, Peekable}, ops::{Index, IndexMut, Range}, }; +use super::*; + /// Wraps around a set of [`ClosestPeersIter`], enforcing a disjoint discovery /// path per configured parallelism according to the S/Kademlia paper. pub(crate) struct ClosestDisjointPeersIter { @@ -373,7 +374,6 @@ enum ResponseState { /// Iterator combining the result of multiple [`ClosestPeersIter`] into a single /// deduplicated ordered iterator. -// // Note: This operates under the assumption that `I` is ordered. #[derive(Clone, Debug)] struct ResultIter @@ -433,13 +433,13 @@ impl>> Iterator for ResultIter { #[cfg(test)] mod tests { - use super::*; + use std::{collections::HashSet, iter}; - use crate::SHA_256_MH; use libp2p_core::multihash::Multihash; use quickcheck::*; - use std::collections::HashSet; - use std::iter; + + use super::*; + use crate::SHA_256_MH; impl Arbitrary for ResultIter>> { fn arbitrary(g: &mut Gen) -> Self { @@ -552,7 +552,6 @@ mod tests { .flatten() .collect::>() .into_iter() - .map(Key::from) .collect::>(); deduplicated.sort_unstable_by(|a, b| { diff --git a/protocols/kad/src/query/peers/fixed.rs b/protocols/kad/src/query/peers/fixed.rs index 2d0b312454d..41cb3559f1b 100644 --- a/protocols/kad/src/query/peers/fixed.rs +++ b/protocols/kad/src/query/peers/fixed.rs @@ -18,10 +18,11 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::*; +use std::{collections::hash_map::Entry, num::NonZeroUsize, vec}; use fnv::FnvHashMap; -use std::{collections::hash_map::Entry, num::NonZeroUsize, vec}; + +use super::*; /// A peer iterator for a fixed set of peers. pub(crate) struct FixedPeersIter { diff --git a/protocols/kad/src/record.rs b/protocols/kad/src/record.rs index cb7c4b866fc..fea17f826a4 100644 --- a/protocols/kad/src/record.rs +++ b/protocols/kad/src/record.rs @@ -22,13 +22,16 @@ pub mod store; +use std::{ + borrow::Borrow, + hash::{Hash, Hasher}, +}; + use bytes::Bytes; use libp2p_core::{multihash::Multihash, Multiaddr}; use libp2p_identity::PeerId; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use std::borrow::Borrow; -use std::hash::{Hash, Hasher}; use web_time::Instant; /// The (opaque) key of a record. @@ -101,7 +104,7 @@ impl Record { /// Checks whether the record is expired w.r.t. the given `Instant`. pub fn is_expired(&self, now: Instant) -> bool { - self.expires.map_or(false, |t| now >= t) + self.expires.is_some_and(|t| now >= t) } } @@ -154,16 +157,18 @@ impl ProviderRecord { /// Checks whether the provider record is expired w.r.t. the given `Instant`. pub fn is_expired(&self, now: Instant) -> bool { - self.expires.map_or(false, |t| now >= t) + self.expires.is_some_and(|t| now >= t) } } #[cfg(test)] mod tests { + use std::time::Duration; + + use quickcheck::*; + use super::*; use crate::SHA_256_MH; - use quickcheck::*; - use std::time::Duration; impl Arbitrary for Key { fn arbitrary(g: &mut Gen) -> Key { diff --git a/protocols/kad/src/record/store.rs b/protocols/kad/src/record/store.rs index 5c25bc8b2fa..ee40f568bb3 100644 --- a/protocols/kad/src/record/store.rs +++ b/protocols/kad/src/record/store.rs @@ -20,12 +20,13 @@ mod memory; +use std::borrow::Cow; + pub use memory::{MemoryStore, MemoryStoreConfig}; use thiserror::Error; use super::*; use crate::K_VALUE; -use std::borrow::Cow; /// The result of an operation on a `RecordStore`. pub type Result = std::result::Result; @@ -50,20 +51,16 @@ pub enum Error { /// /// There are two types of records managed by a `RecordStore`: /// -/// 1. Regular (value-)records. These records store an arbitrary value -/// associated with a key which is distributed to the closest nodes -/// to the key in the Kademlia DHT as per the standard Kademlia "push-model". -/// These records are subject to re-replication and re-publication as +/// 1. Regular (value-)records. These records store an arbitrary value associated with a key which +/// is distributed to the closest nodes to the key in the Kademlia DHT as per the standard +/// Kademlia "push-model". These records are subject to re-replication and re-publication as /// per the standard Kademlia protocol. /// -/// 2. Provider records. These records associate the ID of a peer with a key -/// who can supposedly provide the associated value. These records are -/// mere "pointers" to the data which may be followed by contacting these -/// providers to obtain the value. These records are specific to the -/// libp2p Kademlia specification and realise a "pull-model" for distributed -/// content. Just like a regular record, a provider record is distributed -/// to the closest nodes to the key. -/// +/// 2. Provider records. These records associate the ID of a peer with a key who can supposedly +/// provide the associated value. These records are mere "pointers" to the data which may be +/// followed by contacting these providers to obtain the value. These records are specific to +/// the libp2p Kademlia specification and realise a "pull-model" for distributed content. Just +/// like a regular record, a provider record is distributed to the closest nodes to the key. pub trait RecordStore { type RecordsIter<'a>: Iterator> where diff --git a/protocols/kad/src/record/store/memory.rs b/protocols/kad/src/record/store/memory.rs index 3fb6d2be3e8..28f6a55044f 100644 --- a/protocols/kad/src/record/store/memory.rs +++ b/protocols/kad/src/record/store/memory.rs @@ -18,12 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::*; +use std::{ + collections::{hash_map, hash_set, HashMap, HashSet}, + iter, +}; -use crate::kbucket; use smallvec::SmallVec; -use std::collections::{hash_map, hash_set, HashMap, HashSet}; -use std::iter; + +use super::*; +use crate::kbucket; /// In-memory implementation of a `RecordStore`. pub struct MemoryStore { @@ -208,11 +211,12 @@ impl RecordStore for MemoryStore { #[cfg(test)] mod tests { - use super::*; - use crate::SHA_256_MH; use quickcheck::*; use rand::Rng; + use super::*; + use crate::SHA_256_MH; + fn random_multihash() -> Multihash<64> { Multihash::wrap(SHA_256_MH, &rand::thread_rng().gen::<[u8; 32]>()).unwrap() } diff --git a/protocols/kad/tests/client_mode.rs b/protocols/kad/tests/client_mode.rs index 2c8d11beac7..09e24c6f6ea 100644 --- a/protocols/kad/tests/client_mode.rs +++ b/protocols/kad/tests/client_mode.rs @@ -1,18 +1,14 @@ use libp2p_identify as identify; use libp2p_identity as identity; -use libp2p_kad::store::MemoryStore; -use libp2p_kad::{Behaviour, Config, Event, Mode}; +use libp2p_kad::{store::MemoryStore, Behaviour, Config, Event, Mode}; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; -use tracing_subscriber::EnvFilter; use Event::*; use MyBehaviourEvent::*; #[async_std::test] async fn server_gets_added_to_routing_table_by_client() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let mut client = Swarm::new_ephemeral(MyBehaviour::new); let mut server = Swarm::new_ephemeral(MyBehaviour::new); @@ -42,9 +38,7 @@ async fn server_gets_added_to_routing_table_by_client() { #[async_std::test] async fn two_servers_add_each_other_to_routing_table() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let mut server1 = Swarm::new_ephemeral(MyBehaviour::new); let mut server2 = Swarm::new_ephemeral(MyBehaviour::new); @@ -83,9 +77,7 @@ async fn two_servers_add_each_other_to_routing_table() { #[async_std::test] async fn adding_an_external_addresses_activates_server_mode_on_existing_connections() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let mut client = Swarm::new_ephemeral(MyBehaviour::new); let mut server = Swarm::new_ephemeral(MyBehaviour::new); @@ -104,7 +96,9 @@ async fn adding_an_external_addresses_activates_server_mode_on_existing_connecti // Server learns its external address (this could be through AutoNAT or some other mechanism). server.add_external_address(memory_addr); - // The server reconfigured its connection to the client to be in server mode, pushes that information to client which as a result updates its routing table and triggers a mode change to Mode::Server. + // The server reconfigured its connection to the client to be in server mode, + // pushes that information to client which as a result updates its routing + // table and triggers a mode change to Mode::Server. match libp2p_swarm_test::drive(&mut client, &mut server).await { ( [Identify(identify::Event::Received { .. }), Kad(RoutingUpdated { peer: peer1, .. })], @@ -119,9 +113,7 @@ async fn adding_an_external_addresses_activates_server_mode_on_existing_connecti #[async_std::test] async fn set_client_to_server_mode() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let mut client = Swarm::new_ephemeral(MyBehaviour::new); client.behaviour_mut().kad.set_mode(Some(Mode::Client)); diff --git a/protocols/mdns/CHANGELOG.md b/protocols/mdns/CHANGELOG.md index 67b1d669f60..45a479bf4af 100644 --- a/protocols/mdns/CHANGELOG.md +++ b/protocols/mdns/CHANGELOG.md @@ -1,3 +1,10 @@ +## 0.46.1 + +- Emit `ToSwarm::NewExternalAddrOfPeer` on discovery. + See [PR 5753](https://github.com/libp2p/rust-libp2p/pull/5753) +- Upgrade `hickory-proto`. + See [PR 5727](https://github.com/libp2p/rust-libp2p/pull/5727) + ## 0.46.0 diff --git a/protocols/mdns/Cargo.toml b/protocols/mdns/Cargo.toml index 338501aa896..ba86a82d5bb 100644 --- a/protocols/mdns/Cargo.toml +++ b/protocols/mdns/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-mdns" edition = "2021" rust-version = { workspace = true } -version = "0.46.0" +version = "0.46.1" description = "Implementation of the libp2p mDNS discovery method" authors = ["Parity Technologies "] license = "MIT" @@ -13,7 +13,6 @@ categories = ["network-programming", "asynchronous"] [dependencies] async-std = { version = "1.12.0", optional = true } async-io = { version = "2.3.3", optional = true } -data-encoding = "2.6.0" futures = { workspace = true } if-watch = "3.2.0" libp2p-core = { workspace = true } @@ -24,7 +23,7 @@ smallvec = "1.13.2" socket2 = { version = "0.5.7", features = ["all"] } tokio = { workspace = true, default-features = false, features = ["net", "time"], optional = true} tracing = { workspace = true } -hickory-proto = { version = "0.24.1", default-features = false, features = ["mdns"] } +hickory-proto = { workspace = true, features = ["mdns"] } [features] tokio = ["dep:tokio", "if-watch/tokio"] @@ -32,13 +31,10 @@ async-io = ["dep:async-io", "dep:async-std", "if-watch/smol"] [dev-dependencies] async-std = { version = "1.9.0", features = ["attributes"] } -libp2p-noise = { workspace = true } libp2p-swarm = { workspace = true, features = ["tokio", "async-std"] } -libp2p-tcp = { workspace = true, features = ["tokio", "async-io"] } -libp2p-yamux = { workspace = true } tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread", "time"] } libp2p-swarm-test = { path = "../../swarm-test" } -tracing-subscriber = { workspace = true, features = ["env-filter"] } +libp2p-test-utils = { workspace = true } [[test]] name = "use-async-std" diff --git a/protocols/mdns/src/behaviour.rs b/protocols/mdns/src/behaviour.rs index cecd27bf78b..68e28cf3d63 100644 --- a/protocols/mdns/src/behaviour.rs +++ b/protocols/mdns/src/behaviour.rs @@ -22,25 +22,38 @@ mod iface; mod socket; mod timer; -use self::iface::InterfaceState; -use crate::behaviour::{socket::AsyncSocket, timer::Builder}; -use crate::Config; -use futures::channel::mpsc; -use futures::{Stream, StreamExt}; +use std::{ + cmp, + collections::{ + hash_map::{Entry, HashMap}, + VecDeque, + }, + convert::Infallible, + fmt, + future::Future, + io, + net::IpAddr, + pin::Pin, + sync::{Arc, RwLock}, + task::{Context, Poll}, + time::Instant, +}; + +use futures::{channel::mpsc, Stream, StreamExt}; use if_watch::IfEvent; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::behaviour::FromSwarm; use libp2p_swarm::{ - dummy, ConnectionDenied, ConnectionId, ListenAddresses, NetworkBehaviour, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + behaviour::FromSwarm, dummy, ConnectionDenied, ConnectionId, ListenAddresses, NetworkBehaviour, + THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use smallvec::SmallVec; -use std::collections::hash_map::{Entry, HashMap}; -use std::future::Future; -use std::sync::{Arc, RwLock}; -use std::{cmp, fmt, io, net::IpAddr, pin::Pin, task::Context, task::Poll, time::Instant}; + +use self::iface::InterfaceState; +use crate::{ + behaviour::{socket::AsyncSocket, timer::Builder}, + Config, +}; /// An abstraction to allow for compatibility with various async runtimes. pub trait Provider: 'static { @@ -68,11 +81,13 @@ pub trait Abort { /// The type of a [`Behaviour`] using the `async-io` implementation. #[cfg(feature = "async-io")] pub mod async_io { - use super::Provider; - use crate::behaviour::{socket::asio::AsyncUdpSocket, timer::asio::AsyncTimer, Abort}; + use std::future::Future; + use async_std::task::JoinHandle; use if_watch::smol::IfWatcher; - use std::future::Future; + + use super::Provider; + use crate::behaviour::{socket::asio::AsyncUdpSocket, timer::asio::AsyncTimer, Abort}; #[doc(hidden)] pub enum AsyncIo {} @@ -104,12 +119,14 @@ pub mod async_io { /// The type of a [`Behaviour`] using the `tokio` implementation. #[cfg(feature = "tokio")] pub mod tokio { - use super::Provider; - use crate::behaviour::{socket::tokio::TokioUdpSocket, timer::tokio::TokioTimer, Abort}; - use if_watch::tokio::IfWatcher; use std::future::Future; + + use if_watch::tokio::IfWatcher; use tokio::task::JoinHandle; + use super::Provider; + use crate::behaviour::{socket::tokio::TokioUdpSocket, timer::tokio::TokioTimer, Abort}; + #[doc(hidden)] pub enum Tokio {} @@ -170,10 +187,14 @@ where /// The current set of listen addresses. /// /// This is shared across all interface tasks using an [`RwLock`]. - /// The [`Behaviour`] updates this upon new [`FromSwarm`] events where as [`InterfaceState`]s read from it to answer inbound mDNS queries. + /// The [`Behaviour`] updates this upon new [`FromSwarm`] + /// events where as [`InterfaceState`]s read from it to answer inbound mDNS queries. listen_addresses: Arc>, local_peer_id: PeerId, + + /// Pending behaviour events to be emitted. + pending_events: VecDeque>, } impl

Behaviour

@@ -194,6 +215,7 @@ where closest_expiration: Default::default(), listen_addresses: Default::default(), local_peer_id, + pending_events: Default::default(), }) } @@ -290,93 +312,113 @@ where &mut self, cx: &mut Context<'_>, ) -> Poll>> { - // Poll ifwatch. - while let Poll::Ready(Some(event)) = Pin::new(&mut self.if_watch).poll_next(cx) { - match event { - Ok(IfEvent::Up(inet)) => { - let addr = inet.addr(); - if addr.is_loopback() { - continue; - } - if addr.is_ipv4() && self.config.enable_ipv6 - || addr.is_ipv6() && !self.config.enable_ipv6 - { - continue; - } - if let Entry::Vacant(e) = self.if_tasks.entry(addr) { - match InterfaceState::::new( - addr, - self.config.clone(), - self.local_peer_id, - self.listen_addresses.clone(), - self.query_response_sender.clone(), - ) { - Ok(iface_state) => { - e.insert(P::spawn(iface_state)); - } - Err(err) => { - tracing::error!("failed to create `InterfaceState`: {}", err) + loop { + // Check for pending events and emit them. + if let Some(event) = self.pending_events.pop_front() { + return Poll::Ready(event); + } + + // Poll ifwatch. + while let Poll::Ready(Some(event)) = Pin::new(&mut self.if_watch).poll_next(cx) { + match event { + Ok(IfEvent::Up(inet)) => { + let addr = inet.addr(); + if addr.is_loopback() { + continue; + } + if addr.is_ipv4() && self.config.enable_ipv6 + || addr.is_ipv6() && !self.config.enable_ipv6 + { + continue; + } + if let Entry::Vacant(e) = self.if_tasks.entry(addr) { + match InterfaceState::::new( + addr, + self.config.clone(), + self.local_peer_id, + self.listen_addresses.clone(), + self.query_response_sender.clone(), + ) { + Ok(iface_state) => { + e.insert(P::spawn(iface_state)); + } + Err(err) => { + tracing::error!("failed to create `InterfaceState`: {}", err) + } } } } - } - Ok(IfEvent::Down(inet)) => { - if let Some(handle) = self.if_tasks.remove(&inet.addr()) { - tracing::info!(instance=%inet.addr(), "dropping instance"); + Ok(IfEvent::Down(inet)) => { + if let Some(handle) = self.if_tasks.remove(&inet.addr()) { + tracing::info!(instance=%inet.addr(), "dropping instance"); - handle.abort(); + handle.abort(); + } } + Err(err) => tracing::error!("if watch returned an error: {}", err), } - Err(err) => tracing::error!("if watch returned an error: {}", err), } - } - // Emit discovered event. - let mut discovered = Vec::new(); - - while let Poll::Ready(Some((peer, addr, expiration))) = - self.query_response_receiver.poll_next_unpin(cx) - { - if let Some((_, _, cur_expires)) = self - .discovered_nodes - .iter_mut() - .find(|(p, a, _)| *p == peer && *a == addr) + // Emit discovered event. + let mut discovered = Vec::new(); + + while let Poll::Ready(Some((peer, addr, expiration))) = + self.query_response_receiver.poll_next_unpin(cx) { - *cur_expires = cmp::max(*cur_expires, expiration); - } else { - tracing::info!(%peer, address=%addr, "discovered peer on address"); - self.discovered_nodes.push((peer, addr.clone(), expiration)); - discovered.push((peer, addr)); + if let Some((_, _, cur_expires)) = self + .discovered_nodes + .iter_mut() + .find(|(p, a, _)| *p == peer && *a == addr) + { + *cur_expires = cmp::max(*cur_expires, expiration); + } else { + tracing::info!(%peer, address=%addr, "discovered peer on address"); + self.discovered_nodes.push((peer, addr.clone(), expiration)); + discovered.push((peer, addr.clone())); + + self.pending_events + .push_back(ToSwarm::NewExternalAddrOfPeer { + peer_id: peer, + address: addr, + }); + } } - } - if !discovered.is_empty() { - let event = Event::Discovered(discovered); - return Poll::Ready(ToSwarm::GenerateEvent(event)); - } - // Emit expired event. - let now = Instant::now(); - let mut closest_expiration = None; - let mut expired = Vec::new(); - self.discovered_nodes.retain(|(peer, addr, expiration)| { - if *expiration <= now { - tracing::info!(%peer, address=%addr, "expired peer on address"); - expired.push((*peer, addr.clone())); - return false; + if !discovered.is_empty() { + let event = Event::Discovered(discovered); + // Push to the front of the queue so that the behavior event is reported before + // the individual discovered addresses. + self.pending_events + .push_front(ToSwarm::GenerateEvent(event)); + continue; + } + // Emit expired event. + let now = Instant::now(); + let mut closest_expiration = None; + let mut expired = Vec::new(); + self.discovered_nodes.retain(|(peer, addr, expiration)| { + if *expiration <= now { + tracing::info!(%peer, address=%addr, "expired peer on address"); + expired.push((*peer, addr.clone())); + return false; + } + closest_expiration = + Some(closest_expiration.unwrap_or(*expiration).min(*expiration)); + true + }); + if !expired.is_empty() { + let event = Event::Expired(expired); + self.pending_events.push_back(ToSwarm::GenerateEvent(event)); + continue; + } + if let Some(closest_expiration) = closest_expiration { + let mut timer = P::Timer::at(closest_expiration); + let _ = Pin::new(&mut timer).poll_next(cx); + + self.closest_expiration = Some(timer); } - closest_expiration = Some(closest_expiration.unwrap_or(*expiration).min(*expiration)); - true - }); - if !expired.is_empty() { - let event = Event::Expired(expired); - return Poll::Ready(ToSwarm::GenerateEvent(event)); - } - if let Some(closest_expiration) = closest_expiration { - let mut timer = P::Timer::at(closest_expiration); - let _ = Pin::new(&mut timer).poll_next(cx); - self.closest_expiration = Some(timer); + return Poll::Pending; } - Poll::Pending } } diff --git a/protocols/mdns/src/behaviour/iface.rs b/protocols/mdns/src/behaviour/iface.rs index 9302065cde2..873bb8a307b 100644 --- a/protocols/mdns/src/behaviour/iface.rs +++ b/protocols/mdns/src/behaviour/iface.rs @@ -21,27 +21,32 @@ mod dns; mod query; -use self::dns::{build_query, build_query_response, build_service_discovery_response}; -use self::query::MdnsPacket; -use crate::behaviour::{socket::AsyncSocket, timer::Builder}; -use crate::Config; -use futures::channel::mpsc; -use futures::{SinkExt, StreamExt}; -use libp2p_core::Multiaddr; -use libp2p_identity::PeerId; -use libp2p_swarm::ListenAddresses; -use socket2::{Domain, Socket, Type}; -use std::future::Future; -use std::sync::{Arc, RwLock}; use std::{ collections::VecDeque, + future::Future, io, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}, pin::Pin, + sync::{Arc, RwLock}, task::{Context, Poll}, time::{Duration, Instant}, }; +use futures::{channel::mpsc, SinkExt, StreamExt}; +use libp2p_core::Multiaddr; +use libp2p_identity::PeerId; +use libp2p_swarm::ListenAddresses; +use socket2::{Domain, Socket, Type}; + +use self::{ + dns::{build_query, build_query_response, build_service_discovery_response}, + query::MdnsPacket, +}; +use crate::{ + behaviour::{socket::AsyncSocket, timer::Builder}, + Config, +}; + /// Initial interval for starting probe const INITIAL_TIMEOUT_INTERVAL: Duration = Duration::from_millis(500); diff --git a/protocols/mdns/src/behaviour/iface/dns.rs b/protocols/mdns/src/behaviour/iface/dns.rs index 39dbf08c731..35cba44f4af 100644 --- a/protocols/mdns/src/behaviour/iface/dns.rs +++ b/protocols/mdns/src/behaviour/iface/dns.rs @@ -20,12 +20,13 @@ //! (M)DNS encoding and decoding on top of the `dns_parser` library. -use crate::{META_QUERY_SERVICE, SERVICE_NAME}; +use std::{borrow::Cow, cmp, error, fmt, str, time::Duration}; + use libp2p_core::Multiaddr; use libp2p_identity::PeerId; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; -use std::{borrow::Cow, cmp, error, fmt, str, time::Duration}; +use rand::{distributions::Alphanumeric, thread_rng, Rng}; + +use crate::{META_QUERY_SERVICE, SERVICE_NAME}; /// DNS TXT records can have up to 255 characters as a single string value. /// @@ -293,7 +294,6 @@ fn generate_peer_name() -> Vec { /// Panics if `name` has a zero-length component or a component that is too long. /// This is fine considering that this function is not public and is only called in a controlled /// environment. -/// fn append_qname(out: &mut Vec, name: &[u8]) { debug_assert!(name.is_ascii()); @@ -394,10 +394,11 @@ impl error::Error for MdnsResponseError {} #[cfg(test)] mod tests { - use super::*; use hickory_proto::op::Message; use libp2p_identity as identity; + use super::*; + #[test] fn build_query_correct() { let query = build_query(); diff --git a/protocols/mdns/src/behaviour/iface/query.rs b/protocols/mdns/src/behaviour/iface/query.rs index 70b84816d0f..a2a2c200b3b 100644 --- a/protocols/mdns/src/behaviour/iface/query.rs +++ b/protocols/mdns/src/behaviour/iface/query.rs @@ -18,18 +18,23 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::dns; -use crate::{META_QUERY_SERVICE_FQDN, SERVICE_NAME_FQDN}; +use std::{ + fmt, + net::SocketAddr, + str, + time::{Duration, Instant}, +}; + use hickory_proto::{ op::Message, rr::{Name, RData}, }; use libp2p_core::multiaddr::{Multiaddr, Protocol}; +use libp2p_identity::PeerId; use libp2p_swarm::_address_translation; -use libp2p_identity::PeerId; -use std::time::Instant; -use std::{fmt, net::SocketAddr, str, time::Duration}; +use super::dns; +use crate::{META_QUERY_SERVICE_FQDN, SERVICE_NAME_FQDN}; /// A valid mDNS packet received by the service. #[derive(Debug)] @@ -46,7 +51,7 @@ impl MdnsPacket { pub(crate) fn new_from_bytes( buf: &[u8], from: SocketAddr, - ) -> Result, hickory_proto::error::ProtoError> { + ) -> Result, hickory_proto::ProtoError> { let packet = Message::from_vec(buf)?; if packet.query().is_none() { @@ -69,7 +74,8 @@ impl MdnsPacket { .iter() .any(|q| q.name().to_utf8() == META_QUERY_SERVICE_FQDN) { - // TODO: what if multiple questions, one with SERVICE_NAME and one with META_QUERY_SERVICE? + // TODO: what if multiple questions, + // one with SERVICE_NAME and one with META_QUERY_SERVICE? return Ok(Some(MdnsPacket::ServiceDiscovery(MdnsServiceDiscovery { from, query_id: packet.header().id(), @@ -155,7 +161,7 @@ impl MdnsResponse { return None; } - let RData::PTR(record_value) = record.data()? else { + let RData::PTR(record_value) = record.data() else { return None; }; @@ -237,7 +243,7 @@ impl MdnsPeer { return None; } - if let Some(RData::TXT(ref txt)) = add_record.data() { + if let RData::TXT(ref txt) = add_record.data() { Some(txt) } else { None @@ -307,8 +313,7 @@ impl fmt::Debug for MdnsPeer { #[cfg(test)] mod tests { - use super::super::dns::build_query_response; - use super::*; + use super::{super::dns::build_query_response, *}; #[test] fn test_create_mdns_peer() { @@ -336,7 +341,7 @@ mod tests { if record.name().to_utf8() != SERVICE_NAME_FQDN { return None; } - let Some(RData::PTR(record_value)) = record.data() else { + let RData::PTR(record_value) = record.data() else { return None; }; Some(record_value) diff --git a/protocols/mdns/src/behaviour/socket.rs b/protocols/mdns/src/behaviour/socket.rs index ebaad17e45f..cf11450fb4b 100644 --- a/protocols/mdns/src/behaviour/socket.rs +++ b/protocols/mdns/src/behaviour/socket.rs @@ -24,7 +24,8 @@ use std::{ task::{Context, Poll}, }; -/// Interface that must be implemented by the different runtimes to use the [`UdpSocket`] in async mode +/// Interface that must be implemented by the different runtimes to use the [`UdpSocket`] in async +/// mode #[allow(unreachable_pub)] // Users should not depend on this. pub trait AsyncSocket: Unpin + Send + 'static { /// Create the async socket from the [`std::net::UdpSocket`] @@ -32,7 +33,8 @@ pub trait AsyncSocket: Unpin + Send + 'static { where Self: Sized; - /// Attempts to receive a single packet on the socket from the remote address to which it is connected. + /// Attempts to receive a single packet on the socket + /// from the remote address to which it is connected. fn poll_read( &mut self, _cx: &mut Context, @@ -50,10 +52,11 @@ pub trait AsyncSocket: Unpin + Send + 'static { #[cfg(feature = "async-io")] pub(crate) mod asio { - use super::*; use async_io::Async; use futures::FutureExt; + use super::*; + /// AsyncIo UdpSocket pub(crate) type AsyncUdpSocket = Async; impl AsyncSocket for AsyncUdpSocket { @@ -92,9 +95,10 @@ pub(crate) mod asio { #[cfg(feature = "tokio")] pub(crate) mod tokio { - use super::*; use ::tokio::{io::ReadBuf, net::UdpSocket as TkUdpSocket}; + use super::*; + /// Tokio ASync Socket` pub(crate) type TokioUdpSocket = TkUdpSocket; impl AsyncSocket for TokioUdpSocket { diff --git a/protocols/mdns/src/behaviour/timer.rs b/protocols/mdns/src/behaviour/timer.rs index 5e284654676..5fdb1beffae 100644 --- a/protocols/mdns/src/behaviour/timer.rs +++ b/protocols/mdns/src/behaviour/timer.rs @@ -42,14 +42,16 @@ pub trait Builder: Send + Unpin + 'static { #[cfg(feature = "async-io")] pub(crate) mod asio { - use super::*; - use async_io::Timer as AsioTimer; - use futures::Stream; use std::{ pin::Pin, task::{Context, Poll}, }; + use async_io::Timer as AsioTimer; + use futures::Stream; + + use super::*; + /// Async Timer pub(crate) type AsyncTimer = Timer; impl Builder for AsyncTimer { @@ -83,14 +85,16 @@ pub(crate) mod asio { #[cfg(feature = "tokio")] pub(crate) mod tokio { - use super::*; - use ::tokio::time::{self, Instant as TokioInstant, Interval, MissedTickBehavior}; - use futures::Stream; use std::{ pin::Pin, task::{Context, Poll}, }; + use ::tokio::time::{self, Instant as TokioInstant, Interval, MissedTickBehavior}; + use futures::Stream; + + use super::*; + /// Tokio wrapper pub(crate) type TokioTimer = Timer; impl Builder for TokioTimer { diff --git a/protocols/mdns/src/lib.rs b/protocols/mdns/src/lib.rs index 4823d740272..a0086a0e2d5 100644 --- a/protocols/mdns/src/lib.rs +++ b/protocols/mdns/src/lib.rs @@ -31,21 +31,20 @@ //! This crate provides a `Mdns` and `TokioMdns`, depending on the enabled features, which //! implements the `NetworkBehaviour` trait. This struct will automatically discover other //! libp2p nodes on the local network. -//! #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use std::net::{Ipv4Addr, Ipv6Addr}; -use std::time::Duration; +use std::{ + net::{Ipv4Addr, Ipv6Addr}, + time::Duration, +}; mod behaviour; -pub use crate::behaviour::{Behaviour, Event}; - #[cfg(feature = "async-io")] pub use crate::behaviour::async_io; - #[cfg(feature = "tokio")] pub use crate::behaviour::tokio; +pub use crate::behaviour::{Behaviour, Event}; /// The DNS service name for all libp2p peers used to query for addresses. const SERVICE_NAME: &[u8] = b"_p2p._udp.local"; diff --git a/protocols/mdns/tests/use-async-std.rs b/protocols/mdns/tests/use-async-std.rs index 549f70978af..9ee2b7659ea 100644 --- a/protocols/mdns/tests/use-async-std.rs +++ b/protocols/mdns/tests/use-async-std.rs @@ -18,28 +18,23 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE.use futures::StreamExt; +use std::time::Duration; + use futures::future::Either; -use libp2p_mdns::Event; -use libp2p_mdns::{async_io::Behaviour, Config}; +use libp2p_mdns::{async_io::Behaviour, Config, Event}; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; -use std::time::Duration; -use tracing_subscriber::EnvFilter; #[async_std::test] async fn test_discovery_async_std_ipv4() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); run_discovery_test(Config::default()).await } #[async_std::test] async fn test_discovery_async_std_ipv6() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let config = Config { enable_ipv6: true, @@ -50,9 +45,7 @@ async fn test_discovery_async_std_ipv6() { #[async_std::test] async fn test_expired_async_std() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let config = Config { ttl: Duration::from_secs(1), @@ -85,9 +78,7 @@ async fn test_expired_async_std() { #[async_std::test] async fn test_no_expiration_on_close_async_std() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let config = Config { ttl: Duration::from_secs(120), query_interval: Duration::from_secs(10), diff --git a/protocols/mdns/tests/use-tokio.rs b/protocols/mdns/tests/use-tokio.rs index cf0d9f4bed4..a48f84217a3 100644 --- a/protocols/mdns/tests/use-tokio.rs +++ b/protocols/mdns/tests/use-tokio.rs @@ -17,27 +17,23 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE.use futures::StreamExt; +use std::time::Duration; + use futures::future::Either; use libp2p_mdns::{tokio::Behaviour, Config, Event}; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; -use std::time::Duration; -use tracing_subscriber::EnvFilter; #[tokio::test] async fn test_discovery_tokio_ipv4() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); run_discovery_test(Config::default()).await } #[tokio::test] async fn test_discovery_tokio_ipv6() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let config = Config { enable_ipv6: true, @@ -48,9 +44,7 @@ async fn test_discovery_tokio_ipv6() { #[tokio::test] async fn test_expired_tokio() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let config = Config { ttl: Duration::from_secs(1), diff --git a/protocols/perf/Cargo.toml b/protocols/perf/Cargo.toml index cd499a8c949..0b994447525 100644 --- a/protocols/perf/Cargo.toml +++ b/protocols/perf/Cargo.toml @@ -19,9 +19,7 @@ futures-timer = "3.0" web-time = { workspace = true } libp2p = { workspace = true, features = ["tokio", "tcp", "quic", "tls", "yamux", "dns"] } libp2p-core = { workspace = true } -libp2p-dns = { workspace = true, features = ["tokio"] } libp2p-identity = { workspace = true, features = ["rand"] } -libp2p-quic = { workspace = true, features = ["tokio"] } libp2p-swarm = { workspace = true, features = ["macros", "tokio"] } libp2p-tcp = { workspace = true, features = ["tokio"] } libp2p-tls = { workspace = true } @@ -31,10 +29,10 @@ serde_json = "1.0" thiserror = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } +libp2p-test-utils = { workspace = true } tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] } [dev-dependencies] -rand = "0.8" libp2p-swarm-test = { path = "../../swarm-test" } # Passing arguments to the docsrs builder in order to properly document cfg's. diff --git a/protocols/perf/Dockerfile b/protocols/perf/Dockerfile index f68ea6ef211..bb3124df02f 100644 --- a/protocols/perf/Dockerfile +++ b/protocols/perf/Dockerfile @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:1.5-labs -FROM rust:1.81.0 as builder +FROM rust:1.83.0 as builder # Run with access to the target cache to speed up builds WORKDIR /workspace diff --git a/protocols/perf/src/bin/perf.rs b/protocols/perf/src/bin/perf.rs index 9a4cfb8bcac..506455f081a 100644 --- a/protocols/perf/src/bin/perf.rs +++ b/protocols/perf/src/bin/perf.rs @@ -23,12 +23,13 @@ use std::{net::SocketAddr, str::FromStr}; use anyhow::{bail, Result}; use clap::Parser; use futures::StreamExt; -use libp2p::core::{multiaddr::Protocol, upgrade, Multiaddr}; -use libp2p::identity::PeerId; -use libp2p::swarm::{NetworkBehaviour, Swarm, SwarmEvent}; -use libp2p::SwarmBuilder; -use libp2p_perf::{client, server}; -use libp2p_perf::{Final, Intermediate, Run, RunParams, RunUpdate}; +use libp2p::{ + core::{multiaddr::Protocol, upgrade, Multiaddr}, + identity::PeerId, + swarm::{NetworkBehaviour, Swarm, SwarmEvent}, + SwarmBuilder, +}; +use libp2p_perf::{client, server, Final, Intermediate, Run, RunParams, RunUpdate}; use serde::{Deserialize, Serialize}; use tracing_subscriber::EnvFilter; use web_time::{Duration, Instant}; diff --git a/protocols/perf/src/client.rs b/protocols/perf/src/client.rs index 9f984a5bba1..7699bc85c17 100644 --- a/protocols/perf/src/client.rs +++ b/protocols/perf/src/client.rs @@ -21,11 +21,13 @@ mod behaviour; mod handler; -use std::sync::atomic::{AtomicUsize, Ordering}; +use std::{ + convert::Infallible, + sync::atomic::{AtomicUsize, Ordering}, +}; pub use behaviour::{Behaviour, Event}; use libp2p_swarm::StreamUpgradeError; -use std::convert::Infallible; static NEXT_RUN_ID: AtomicUsize = AtomicUsize::new(1); diff --git a/protocols/perf/src/client/behaviour.rs b/protocols/perf/src/client/behaviour.rs index 1b181557acc..86c85d61da9 100644 --- a/protocols/perf/src/client/behaviour.rs +++ b/protocols/perf/src/client/behaviour.rs @@ -32,10 +32,8 @@ use libp2p_swarm::{ NetworkBehaviour, NotifyHandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use crate::RunParams; -use crate::{client::handler::Handler, RunUpdate}; - use super::{RunError, RunId}; +use crate::{client::handler::Handler, RunParams, RunUpdate}; #[derive(Debug)] pub struct Event { diff --git a/protocols/perf/src/client/handler.rs b/protocols/perf/src/client/handler.rs index 85e864949f8..043790822b5 100644 --- a/protocols/perf/src/client/handler.rs +++ b/protocols/perf/src/client/handler.rs @@ -36,8 +36,10 @@ use libp2p_swarm::{ ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol, }; -use crate::client::{RunError, RunId}; -use crate::{RunParams, RunUpdate}; +use crate::{ + client::{RunError, RunId}, + RunParams, RunUpdate, +}; #[derive(Debug)] pub struct Command { @@ -56,7 +58,7 @@ pub struct Handler { queued_events: VecDeque< ConnectionHandlerEvent< ::OutboundProtocol, - ::OutboundOpenInfo, + (), ::ToBehaviour, >, >, @@ -90,7 +92,7 @@ impl ConnectionHandler for Handler { type OutboundOpenInfo = (); type InboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { SubstreamProtocol::new(DeniedUpgrade, ()) } @@ -104,12 +106,7 @@ impl ConnectionHandler for Handler { fn on_connection_event( &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + event: ConnectionEvent, ) { match event { // TODO: remove when Rust 1.82 is MSRV @@ -159,9 +156,7 @@ impl ConnectionHandler for Handler { fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent, - > { + ) -> Poll> { if let Some(event) = self.queued_events.pop_front() { return Poll::Ready(event); } diff --git a/protocols/perf/src/protocol.rs b/protocols/perf/src/protocol.rs index f995bbe2d3b..d07c90fa951 100644 --- a/protocols/perf/src/protocol.rs +++ b/protocols/perf/src/protocol.rs @@ -18,14 +18,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures_timer::Delay; use std::time::Duration; -use web_time::Instant; use futures::{ future::{select, Either}, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, FutureExt, SinkExt, Stream, StreamExt, }; +use futures_timer::Delay; +use web_time::Instant; use crate::{Final, Intermediate, Run, RunDuration, RunParams, RunUpdate}; diff --git a/protocols/perf/src/server/behaviour.rs b/protocols/perf/src/server/behaviour.rs index 5408029e85d..22466bfe56a 100644 --- a/protocols/perf/src/server/behaviour.rs +++ b/protocols/perf/src/server/behaviour.rs @@ -31,8 +31,7 @@ use libp2p_swarm::{ ConnectionId, FromSwarm, NetworkBehaviour, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use crate::server::handler::Handler; -use crate::Run; +use crate::{server::handler::Handler, Run}; #[derive(Debug)] pub struct Event { diff --git a/protocols/perf/src/server/handler.rs b/protocols/perf/src/server/handler.rs index c1363ae2380..6ecb19dbc18 100644 --- a/protocols/perf/src/server/handler.rs +++ b/protocols/perf/src/server/handler.rs @@ -18,7 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::task::{Context, Poll}; +use std::{ + convert::Infallible, + task::{Context, Poll}, +}; use futures::FutureExt; use libp2p_core::upgrade::{DeniedUpgrade, ReadyUpgrade}; @@ -29,7 +32,6 @@ use libp2p_swarm::{ }, ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol, }; -use std::convert::Infallible; use tracing::error; use crate::Run; @@ -68,7 +70,7 @@ impl ConnectionHandler for Handler { type OutboundOpenInfo = Infallible; type InboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { SubstreamProtocol::new(ReadyUpgrade::new(crate::PROTOCOL_NAME), ()) } @@ -80,12 +82,7 @@ impl ConnectionHandler for Handler { fn on_connection_event( &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + event: ConnectionEvent, ) { match event { ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { @@ -127,9 +124,7 @@ impl ConnectionHandler for Handler { fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent, - > { + ) -> Poll> { loop { match self.inbound.poll_unpin(cx) { Poll::Ready(Ok(Ok(stats))) => { diff --git a/protocols/perf/tests/lib.rs b/protocols/perf/tests/lib.rs index 017d475befd..c265b0e2e61 100644 --- a/protocols/perf/tests/lib.rs +++ b/protocols/perf/tests/lib.rs @@ -24,13 +24,10 @@ use libp2p_perf::{ }; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; -use tracing_subscriber::EnvFilter; #[tokio::test] async fn perf() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let mut server = Swarm::new_ephemeral(|_| server::Behaviour::new()); let server_peer_id = *server.local_peer_id(); diff --git a/protocols/ping/Cargo.toml b/protocols/ping/Cargo.toml index 0fad9678aec..83f3b6460c9 100644 --- a/protocols/ping/Cargo.toml +++ b/protocols/ping/Cargo.toml @@ -11,7 +11,6 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -either = "1.11.0" futures = { workspace = true } futures-timer = "3.0.3" web-time = { workspace = true } @@ -25,7 +24,6 @@ tracing = { workspace = true } libp2p-swarm = { workspace = true, features = ["macros"] } libp2p-swarm-test = { path = "../../swarm-test" } quickcheck = { workspace = true } -tracing-subscriber = { workspace = true, features = ["env-filter"] } tokio = {workspace = true, features = ["rt", "macros"]} # Passing arguments to the docsrs builder in order to properly document cfg's. diff --git a/protocols/ping/src/handler.rs b/protocols/ping/src/handler.rs index 961716e934a..510ff0553de 100644 --- a/protocols/ping/src/handler.rs +++ b/protocols/ping/src/handler.rs @@ -18,27 +18,29 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::{protocol, PROTOCOL_NAME}; -use futures::future::{BoxFuture, Either}; -use futures::prelude::*; -use futures_timer::Delay; -use libp2p_core::upgrade::ReadyUpgrade; -use libp2p_swarm::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, -}; -use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError, - SubstreamProtocol, -}; -use std::collections::VecDeque; -use std::convert::Infallible; use std::{ + collections::VecDeque, + convert::Infallible, error::Error, fmt, io, task::{Context, Poll}, time::Duration, }; +use futures::{ + future::{BoxFuture, Either}, + prelude::*, +}; +use futures_timer::Delay; +use libp2p_core::upgrade::ReadyUpgrade; +use libp2p_swarm::{ + handler::{ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound}, + ConnectionHandler, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError, + SubstreamProtocol, +}; + +use crate::{protocol, PROTOCOL_NAME}; + /// The configuration for outbound pings. #[derive(Debug, Clone)] pub struct Config { @@ -57,8 +59,7 @@ impl Config { /// These settings have the following effect: /// /// * A ping is sent every 15 seconds on a healthy connection. - /// * Every ping sent must yield a response within 20 seconds in order to - /// be successful. + /// * Every ping sent must yield a response within 20 seconds in order to be successful. pub fn new() -> Self { Self { timeout: Duration::from_secs(20), @@ -178,7 +179,7 @@ impl Handler { fn on_dial_upgrade_error( &mut self, DialUpgradeError { error, .. }: DialUpgradeError< - ::OutboundOpenInfo, + (), ::OutboundProtocol, >, ) { @@ -228,7 +229,7 @@ impl ConnectionHandler for Handler { type OutboundOpenInfo = (); type InboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol, ()> { + fn listen_protocol(&self) -> SubstreamProtocol> { SubstreamProtocol::new(ReadyUpgrade::new(PROTOCOL_NAME), ()) } @@ -339,12 +340,7 @@ impl ConnectionHandler for Handler { fn on_connection_event( &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + event: ConnectionEvent, ) { match event { ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { diff --git a/protocols/ping/src/lib.rs b/protocols/ping/src/lib.rs index 82f240cab6b..d48bcbc98ab 100644 --- a/protocols/ping/src/lib.rs +++ b/protocols/ping/src/lib.rs @@ -27,9 +27,11 @@ //! # Usage //! //! The [`Behaviour`] struct implements the [`NetworkBehaviour`] trait. -//! It will respond to inbound ping requests and periodically send outbound ping requests on every established connection. +//! It will respond to inbound ping requests and periodically send outbound ping requests on every +//! established connection. //! -//! It is up to the user to implement a health-check / connection management policy based on the ping protocol. +//! It is up to the user to implement a health-check / connection management policy based on the +//! ping protocol. //! //! For example: //! @@ -39,8 +41,10 @@ //! //! Users should inspect emitted [`Event`]s and call APIs on [`Swarm`]: //! -//! - [`Swarm::close_connection`](libp2p_swarm::Swarm::close_connection) to close a specific connection -//! - [`Swarm::disconnect_peer_id`](libp2p_swarm::Swarm::disconnect_peer_id) to close all connections to a peer +//! - [`Swarm::close_connection`](libp2p_swarm::Swarm::close_connection) to close a specific +//! connection +//! - [`Swarm::disconnect_peer_id`](libp2p_swarm::Swarm::disconnect_peer_id) to close all +//! connections to a peer //! //! [`Swarm`]: libp2p_swarm::Swarm //! [`Transport`]: libp2p_core::Transport @@ -50,22 +54,22 @@ mod handler; mod protocol; +use std::{ + collections::VecDeque, + task::{Context, Poll}, + time::Duration, +}; + use handler::Handler; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +pub use handler::{Config, Failure}; +use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ behaviour::FromSwarm, ConnectionDenied, ConnectionId, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use std::time::Duration; -use std::{ - collections::VecDeque, - task::{Context, Poll}, -}; pub use self::protocol::PROTOCOL_NAME; -pub use handler::{Config, Failure}; /// A [`NetworkBehaviour`] that responds to inbound pings and /// periodically sends outbound pings on every established connection. diff --git a/protocols/ping/src/protocol.rs b/protocols/ping/src/protocol.rs index 101c219aac4..5e84f55e090 100644 --- a/protocols/ping/src/protocol.rs +++ b/protocols/ping/src/protocol.rs @@ -18,10 +18,11 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{io, time::Duration}; + use futures::prelude::*; use libp2p_swarm::StreamProtocol; use rand::{distributions, prelude::*}; -use std::{io, time::Duration}; use web_time::Instant; pub const PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/ipfs/ping/1.0.0"); @@ -40,10 +41,10 @@ pub const PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/ipfs/ping/1.0.0" /// Successful pings report the round-trip time. /// /// > **Note**: The round-trip time of a ping may be subject to delays induced -/// > by the underlying transport, e.g. in the case of TCP there is -/// > Nagle's algorithm, delayed acks and similar configuration options -/// > which can affect latencies especially on otherwise low-volume -/// > connections. +/// > by the underlying transport, e.g. in the case of TCP there is +/// > Nagle's algorithm, delayed acks and similar configuration options +/// > which can affect latencies especially on otherwise low-volume +/// > connections. const PING_SIZE: usize = 32; /// Sends a ping and waits for the pong. @@ -81,7 +82,6 @@ where #[cfg(test)] mod tests { - use super::*; use futures::StreamExt; use libp2p_core::{ multiaddr::multiaddr, @@ -89,6 +89,8 @@ mod tests { Endpoint, }; + use super::*; + #[tokio::test] async fn ping_pong() { let mem_addr = multiaddr![Memory(thread_rng().gen::())]; diff --git a/protocols/ping/tests/ping.rs b/protocols/ping/tests/ping.rs index 0752b1fced9..210f9435e4a 100644 --- a/protocols/ping/tests/ping.rs +++ b/protocols/ping/tests/ping.rs @@ -20,12 +20,12 @@ //! Integration tests for the `Ping` network behaviour. +use std::{num::NonZeroU8, time::Duration}; + use libp2p_ping as ping; -use libp2p_swarm::dummy; -use libp2p_swarm::{Swarm, SwarmEvent}; +use libp2p_swarm::{dummy, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use quickcheck::*; -use std::{num::NonZeroU8, time::Duration}; #[tokio::test] async fn ping_pong() { diff --git a/protocols/relay/Cargo.toml b/protocols/relay/Cargo.toml index 6c2c7b90304..e7e447e7d16 100644 --- a/protocols/relay/Cargo.toml +++ b/protocols/relay/Cargo.toml @@ -36,8 +36,7 @@ libp2p-swarm = { workspace = true, features = ["macros", "async-std"] } libp2p-swarm-test = { workspace = true } libp2p-yamux = { workspace = true } quickcheck = { workspace = true } -tracing-subscriber = { workspace = true, features = ["env-filter"] } - +libp2p-test-utils = { workspace = true } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/relay/src/behaviour.rs b/protocols/relay/src/behaviour.rs index e854ed2a1ff..968642b3f1f 100644 --- a/protocols/relay/src/behaviour.rs +++ b/protocols/relay/src/behaviour.rs @@ -22,27 +22,31 @@ pub(crate) mod handler; pub(crate) mod rate_limiter; -use crate::behaviour::handler::Handler; -use crate::multiaddr_ext::MultiaddrExt; -use crate::proto; -use crate::protocol::{inbound_hop, outbound_stop}; +use std::{ + collections::{hash_map, HashMap, HashSet, VecDeque}, + num::NonZeroU32, + ops::Add, + task::{Context, Poll}, + time::Duration, +}; + use either::Either; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::transport::PortUse; -use libp2p_core::{ConnectedPoint, Endpoint, Multiaddr}; +use libp2p_core::{multiaddr::Protocol, transport::PortUse, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::behaviour::{ConnectionClosed, FromSwarm}; use libp2p_swarm::{ + behaviour::{ConnectionClosed, FromSwarm}, dummy, ConnectionDenied, ConnectionId, ExternalAddresses, NetworkBehaviour, NotifyHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use std::collections::{hash_map, HashMap, HashSet, VecDeque}; -use std::num::NonZeroU32; -use std::ops::Add; -use std::task::{Context, Poll}; -use std::time::Duration; use web_time::Instant; +use crate::{ + behaviour::handler::Handler, + multiaddr_ext::MultiaddrExt, + proto, + protocol::{inbound_hop, outbound_stop}, +}; + /// Configuration for the relay [`Behaviour`]. /// /// # Panics @@ -120,12 +124,14 @@ impl std::fmt::Debug for Config { impl Default for Config { fn default() -> Self { let reservation_rate_limiters = vec![ - // For each peer ID one reservation every 2 minutes with up to 30 reservations per hour. + // For each peer ID one reservation every 2 minutes with up + // to 30 reservations per hour. rate_limiter::new_per_peer(rate_limiter::GenericRateLimiterConfig { limit: NonZeroU32::new(30).expect("30 > 0"), interval: Duration::from_secs(60 * 2), }), - // For each IP address one reservation every minute with up to 60 reservations per hour. + // For each IP address one reservation every minute with up + // to 60 reservations per hour. rate_limiter::new_per_ip(rate_limiter::GenericRateLimiterConfig { limit: NonZeroU32::new(60).expect("60 > 0"), interval: Duration::from_secs(60), @@ -386,7 +392,8 @@ impl NetworkBehaviour for Behaviour { ); let action = if - // Deny if it is a new reservation and exceeds `max_reservations_per_peer`. + // Deny if it is a new reservation and exceeds + // `max_reservations_per_peer`. (!renewed && self .reservations diff --git a/protocols/relay/src/behaviour/handler.rs b/protocols/relay/src/behaviour/handler.rs index 92e45720f3f..d714bf04fc9 100644 --- a/protocols/relay/src/behaviour/handler.rs +++ b/protocols/relay/src/behaviour/handler.rs @@ -18,32 +18,38 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::behaviour::CircuitId; -use crate::copy_future::CopyFuture; -use crate::protocol::{inbound_hop, outbound_stop}; -use crate::{proto, HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME}; +use std::{ + collections::{HashMap, VecDeque}, + fmt, io, + task::{Context, Poll}, + time::Duration, +}; + use bytes::Bytes; use either::Either; -use futures::future::{BoxFuture, FutureExt, TryFutureExt}; -use futures::io::AsyncWriteExt; -use futures::stream::{FuturesUnordered, StreamExt}; +use futures::{ + future::{BoxFuture, FutureExt, TryFutureExt}, + io::AsyncWriteExt, + stream::{FuturesUnordered, StreamExt}, +}; use futures_timer::Delay; -use libp2p_core::upgrade::ReadyUpgrade; -use libp2p_core::{ConnectedPoint, Multiaddr}; +use libp2p_core::{upgrade::ReadyUpgrade, ConnectedPoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, -}; use libp2p_swarm::{ + handler::{ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound}, ConnectionHandler, ConnectionHandlerEvent, ConnectionId, Stream, StreamProtocol, StreamUpgradeError, SubstreamProtocol, }; -use std::collections::{HashMap, VecDeque}; -use std::task::{Context, Poll}; -use std::time::Duration; -use std::{fmt, io}; use web_time::Instant; +use crate::{ + behaviour::CircuitId, + copy_future::CopyFuture, + proto, + protocol::{inbound_hop, outbound_stop}, + HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME, +}; + const MAX_CONCURRENT_STREAMS_PER_CONNECTION: usize = 10; const STREAM_TIMEOUT: Duration = Duration::from_secs(60); @@ -337,7 +343,7 @@ pub struct Handler { queued_events: VecDeque< ConnectionHandlerEvent< ::OutboundProtocol, - ::OutboundOpenInfo, + (), ::ToBehaviour, >, >, @@ -441,7 +447,7 @@ impl Handler { fn on_dial_upgrade_error( &mut self, DialUpgradeError { error, .. }: DialUpgradeError< - ::OutboundOpenInfo, + (), ::OutboundProtocol, >, ) { @@ -488,7 +494,7 @@ impl ConnectionHandler for Handler { type OutboundProtocol = ReadyUpgrade; type OutboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { SubstreamProtocol::new(ReadyUpgrade::new(HOP_PROTOCOL_NAME), ()) } @@ -592,9 +598,7 @@ impl ConnectionHandler for Handler { fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent, - > { + ) -> Poll> { // Return queued events. if let Some(event) = self.queued_events.pop_front() { return Poll::Ready(event); @@ -870,12 +874,7 @@ impl ConnectionHandler for Handler { fn on_connection_event( &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + event: ConnectionEvent, ) { match event { ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { diff --git a/protocols/relay/src/behaviour/rate_limiter.rs b/protocols/relay/src/behaviour/rate_limiter.rs index 45b701c1b50..4b97c3d5090 100644 --- a/protocols/relay/src/behaviour/rate_limiter.rs +++ b/protocols/relay/src/behaviour/rate_limiter.rs @@ -18,18 +18,20 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + collections::{HashMap, VecDeque}, + hash::Hash, + net::IpAddr, + num::NonZeroU32, + time::Duration, +}; + use libp2p_core::multiaddr::{Multiaddr, Protocol}; use libp2p_identity::PeerId; -use std::collections::{HashMap, VecDeque}; -use std::hash::Hash; -use std::net::IpAddr; -use std::num::NonZeroU32; -use std::time::Duration; use web_time::Instant; /// Allows rate limiting access to some resource based on the [`PeerId`] and /// [`Multiaddr`] of a remote peer. -// // See [`new_per_peer`] and [`new_per_ip`] for precast implementations. Use // [`GenericRateLimiter`] to build your own, e.g. based on the autonomous system // number of a peers IP address. @@ -170,9 +172,10 @@ impl GenericRateLimiter { #[cfg(test)] mod tests { - use super::*; use quickcheck::{QuickCheck, TestResult}; + use super::*; + #[test] fn first() { let id = 1; diff --git a/protocols/relay/src/copy_future.rs b/protocols/relay/src/copy_future.rs index c0039c29534..ae7ef22d648 100644 --- a/protocols/relay/src/copy_future.rs +++ b/protocols/relay/src/copy_future.rs @@ -24,16 +24,19 @@ //! //! Inspired by [`futures::io::Copy`]. -use futures::future::Future; -use futures::future::FutureExt; -use futures::io::{AsyncBufRead, BufReader}; -use futures::io::{AsyncRead, AsyncWrite}; -use futures::ready; +use std::{ + io, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{ + future::{Future, FutureExt}, + io::{AsyncBufRead, AsyncRead, AsyncWrite, BufReader}, + ready, +}; use futures_timer::Delay; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::time::Duration; pub(crate) struct CopyFuture { src: BufReader, @@ -161,12 +164,13 @@ fn forward_data( #[cfg(test)] mod tests { - use super::*; - use futures::executor::block_on; - use futures::io::BufWriter; - use quickcheck::QuickCheck; use std::io::ErrorKind; + use futures::{executor::block_on, io::BufWriter}; + use quickcheck::QuickCheck; + + use super::*; + #[test] fn quickcheck() { struct Connection { @@ -356,13 +360,14 @@ mod tests { } } - // The source has two reads available, handing them out on `AsyncRead::poll_read` one by one. + // The source has two reads available, handing them out + // on `AsyncRead::poll_read` one by one. let mut source = BufReader::new(NeverEndingSource { read: vec![1, 2] }); // The destination is wrapped by a `BufWriter` with a capacity of `3`, i.e. one larger than // the available reads of the source. Without an explicit `AsyncWrite::poll_flush` the two - // reads would thus never make it to the destination, but instead be stuck in the buffer of - // the `BufWrite`. + // reads would thus never make it to the destination, + // but instead be stuck in the buffer of the `BufWrite`. let mut destination = BufWriter::with_capacity( 3, RecordingDestination { diff --git a/protocols/relay/src/lib.rs b/protocols/relay/src/lib.rs index eca3578d599..dba07015765 100644 --- a/protocols/relay/src/lib.rs +++ b/protocols/relay/src/lib.rs @@ -32,10 +32,10 @@ mod protocol; mod proto { #![allow(unreachable_pub)] include!("generated/mod.rs"); - pub(crate) use self::message_v2::pb::mod_HopMessage::Type as HopMessageType; pub use self::message_v2::pb::mod_StopMessage::Type as StopMessageType; pub(crate) use self::message_v2::pb::{ - HopMessage, Limit, Peer, Reservation, Status, StopMessage, + mod_HopMessage::Type as HopMessageType, HopMessage, Limit, Peer, Reservation, Status, + StopMessage, }; } diff --git a/protocols/relay/src/multiaddr_ext.rs b/protocols/relay/src/multiaddr_ext.rs index 6991a8b9ded..7c06eb7eab0 100644 --- a/protocols/relay/src/multiaddr_ext.rs +++ b/protocols/relay/src/multiaddr_ext.rs @@ -1,5 +1,4 @@ -use libp2p_core::multiaddr::Protocol; -use libp2p_core::Multiaddr; +use libp2p_core::{multiaddr::Protocol, Multiaddr}; pub(crate) trait MultiaddrExt { fn is_relayed(&self) -> bool; diff --git a/protocols/relay/src/priv_client.rs b/protocols/relay/src/priv_client.rs index fc9d28e66ed..7ac9b716700 100644 --- a/protocols/relay/src/priv_client.rs +++ b/protocols/relay/src/priv_client.rs @@ -23,33 +23,39 @@ pub(crate) mod handler; pub(crate) mod transport; -use crate::multiaddr_ext::MultiaddrExt; -use crate::priv_client::handler::Handler; -use crate::protocol::{self, inbound_stop}; +use std::{ + collections::{hash_map, HashMap, VecDeque}, + convert::Infallible, + io::{Error, ErrorKind, IoSlice}, + pin::Pin, + task::{Context, Poll}, +}; + use bytes::Bytes; use either::Either; -use futures::channel::mpsc::Receiver; -use futures::future::{BoxFuture, FutureExt}; -use futures::io::{AsyncRead, AsyncWrite}; -use futures::ready; -use futures::stream::StreamExt; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +use futures::{ + channel::mpsc::Receiver, + future::{BoxFuture, FutureExt}, + io::{AsyncRead, AsyncWrite}, + ready, + stream::StreamExt, +}; +use libp2p_core::{multiaddr::Protocol, transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm}; -use libp2p_swarm::dial_opts::DialOpts; use libp2p_swarm::{ + behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm}, + dial_opts::DialOpts, dummy, ConnectionDenied, ConnectionHandler, ConnectionId, DialFailure, NetworkBehaviour, NotifyHandler, Stream, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use std::collections::{hash_map, HashMap, VecDeque}; -use std::convert::Infallible; -use std::io::{Error, ErrorKind, IoSlice}; -use std::pin::Pin; -use std::task::{Context, Poll}; use transport::Transport; +use crate::{ + multiaddr_ext::MultiaddrExt, + priv_client::handler::Handler, + protocol::{self, inbound_stop}, +}; + /// The events produced by the client `Behaviour`. #[derive(Debug)] pub enum Event { @@ -89,7 +95,8 @@ pub struct Behaviour { /// Stores the address of a pending or confirmed reservation. /// - /// This is indexed by the [`ConnectionId`] to a relay server and the address is the `/p2p-circuit` address we reserved on it. + /// This is indexed by the [`ConnectionId`] to a relay server and the address is the + /// `/p2p-circuit` address we reserved on it. reservation_addresses: HashMap, /// Queue of actions to return when polled. diff --git a/protocols/relay/src/priv_client/handler.rs b/protocols/relay/src/priv_client/handler.rs index 77b7f94ae60..d2e4db56b4c 100644 --- a/protocols/relay/src/priv_client/handler.rs +++ b/protocols/relay/src/priv_client/handler.rs @@ -18,29 +18,35 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::client::Connection; -use crate::priv_client::transport; -use crate::priv_client::transport::ToListenerMsg; -use crate::protocol::{self, inbound_stop, outbound_hop}; -use crate::{priv_client, proto, HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME}; -use futures::channel::mpsc::Sender; -use futures::channel::{mpsc, oneshot}; -use futures::future::FutureExt; +use std::{ + collections::VecDeque, + convert::Infallible, + fmt, io, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{ + channel::{mpsc, mpsc::Sender, oneshot}, + future::FutureExt, +}; use futures_timer::Delay; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::upgrade::ReadyUpgrade; -use libp2p_core::Multiaddr; +use libp2p_core::{multiaddr::Protocol, upgrade::ReadyUpgrade, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::handler::{ConnectionEvent, FullyNegotiatedInbound}; use libp2p_swarm::{ + handler::{ConnectionEvent, FullyNegotiatedInbound}, ConnectionHandler, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError, SubstreamProtocol, }; -use std::collections::VecDeque; -use std::convert::Infallible; -use std::task::{Context, Poll}; -use std::time::Duration; -use std::{fmt, io}; + +use crate::{ + client::Connection, + priv_client, + priv_client::{transport, transport::ToListenerMsg}, + proto, + protocol::{self, inbound_stop, outbound_hop}, + HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME, +}; /// The maximum number of circuits being denied concurrently. /// @@ -101,7 +107,7 @@ pub struct Handler { queued_events: VecDeque< ConnectionHandlerEvent< ::OutboundProtocol, - ::OutboundOpenInfo, + (), ::ToBehaviour, >, >, @@ -235,7 +241,7 @@ impl ConnectionHandler for Handler { type OutboundProtocol = ReadyUpgrade; type OutboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { SubstreamProtocol::new(ReadyUpgrade::new(STOP_PROTOCOL_NAME), ()) } @@ -261,9 +267,7 @@ impl ConnectionHandler for Handler { fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent, - > { + ) -> Poll> { loop { // Reservations match self.inflight_reserve_requests.poll_unpin(cx) { @@ -420,12 +424,7 @@ impl ConnectionHandler for Handler { fn on_connection_event( &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + event: ConnectionEvent, ) { match event { ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { diff --git a/protocols/relay/src/priv_client/transport.rs b/protocols/relay/src/priv_client/transport.rs index ec1e8ca5fb8..ed9faa946db 100644 --- a/protocols/relay/src/priv_client/transport.rs +++ b/protocols/relay/src/priv_client/transport.rs @@ -19,25 +19,35 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::multiaddr_ext::MultiaddrExt; -use crate::priv_client::Connection; -use crate::protocol::outbound_hop; -use crate::protocol::outbound_hop::{ConnectError, ReserveError}; -use crate::RequestId; -use futures::channel::mpsc; -use futures::channel::oneshot; -use futures::future::{ready, BoxFuture, FutureExt, Ready}; -use futures::sink::SinkExt; -use futures::stream::SelectAll; -use futures::stream::{Stream, StreamExt}; -use libp2p_core::multiaddr::{Multiaddr, Protocol}; -use libp2p_core::transport::{DialOpts, ListenerId, TransportError, TransportEvent}; +use std::{ + collections::VecDeque, + pin::Pin, + task::{Context, Poll, Waker}, +}; + +use futures::{ + channel::{mpsc, oneshot}, + future::{ready, BoxFuture, FutureExt, Ready}, + sink::SinkExt, + stream::{SelectAll, Stream, StreamExt}, +}; +use libp2p_core::{ + multiaddr::{Multiaddr, Protocol}, + transport::{DialOpts, ListenerId, TransportError, TransportEvent}, +}; use libp2p_identity::PeerId; -use std::collections::VecDeque; -use std::pin::Pin; -use std::task::{Context, Poll, Waker}; use thiserror::Error; +use crate::{ + multiaddr_ext::MultiaddrExt, + priv_client::Connection, + protocol::{ + outbound_hop, + outbound_hop::{ConnectError, ReserveError}, + }, + RequestId, +}; + /// A [`Transport`] enabling client relay capabilities. /// /// Note: The transport only handles listening and dialing on relayed [`Multiaddr`], and depends on @@ -49,7 +59,8 @@ use thiserror::Error; /// 1. Establish relayed connections by dialing `/p2p-circuit` addresses. /// /// ``` -/// # use libp2p_core::{Multiaddr, multiaddr::{Protocol}, Transport, transport::{DialOpts, PortUse}, connection::Endpoint}; +/// # use libp2p_core::{Multiaddr, multiaddr::{Protocol}, Transport, +/// # transport::{DialOpts, PortUse}, connection::Endpoint}; /// # use libp2p_core::transport::memory::MemoryTransport; /// # use libp2p_core::transport::choice::OrTransport; /// # use libp2p_relay as relay; @@ -307,8 +318,9 @@ pub(crate) struct Listener { queued_events: VecDeque<::Item>, /// Channel for messages from the behaviour [`Handler`][super::handler::Handler]. from_behaviour: mpsc::Receiver, - /// The listener can be closed either manually with [`Transport::remove_listener`](libp2p_core::Transport) or if - /// the sender side of the `from_behaviour` channel is dropped. + /// The listener can be closed either manually with + /// [`Transport::remove_listener`](libp2p_core::Transport) or if the sender side of the + /// `from_behaviour` channel is dropped. is_closed: bool, waker: Option, } @@ -344,7 +356,8 @@ impl Stream for Listener { } if self.is_closed { - // Terminate the stream if the listener closed and all remaining events have been reported. + // Terminate the stream if the listener closed and + // all remaining events have been reported. self.waker = None; return Poll::Ready(None); } diff --git a/protocols/relay/src/protocol.rs b/protocols/relay/src/protocol.rs index b94151259cd..b1adeedaaf5 100644 --- a/protocols/relay/src/protocol.rs +++ b/protocols/relay/src/protocol.rs @@ -18,10 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; -use libp2p_swarm::StreamProtocol; use std::time::Duration; +use libp2p_swarm::StreamProtocol; + +use crate::proto; + pub(crate) mod inbound_hop; pub(crate) mod inbound_stop; pub(crate) mod outbound_hop; diff --git a/protocols/relay/src/protocol/inbound_hop.rs b/protocols/relay/src/protocol/inbound_hop.rs index 401c6258176..01280d70897 100644 --- a/protocols/relay/src/protocol/inbound_hop.rs +++ b/protocols/relay/src/protocol/inbound_hop.rs @@ -19,21 +19,18 @@ // DEALINGS IN THE SOFTWARE. use std::time::Duration; -use web_time::SystemTime; use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; use either::Either; use futures::prelude::*; -use thiserror::Error; - use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_swarm::Stream; +use thiserror::Error; +use web_time::SystemTime; -use crate::proto; -use crate::proto::message_v2::pb::mod_HopMessage::Type; -use crate::protocol::MAX_MESSAGE_SIZE; +use crate::{proto, proto::message_v2::pb::mod_HopMessage::Type, protocol::MAX_MESSAGE_SIZE}; #[derive(Debug, Error)] pub enum Error { diff --git a/protocols/relay/src/protocol/inbound_stop.rs b/protocols/relay/src/protocol/inbound_stop.rs index b698a5ff769..8994c2cff73 100644 --- a/protocols/relay/src/protocol/inbound_stop.rs +++ b/protocols/relay/src/protocol/inbound_stop.rs @@ -18,16 +18,20 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; -use crate::protocol::{self, MAX_MESSAGE_SIZE}; +use std::io; + use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; use futures::prelude::*; use libp2p_identity::PeerId; use libp2p_swarm::Stream; -use std::io; use thiserror::Error; +use crate::{ + proto, + protocol::{self, MAX_MESSAGE_SIZE}, +}; + pub(crate) async fn handle_open_circuit(io: Stream) -> Result { let mut substream = Framed::new(io, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); diff --git a/protocols/relay/src/protocol/outbound_hop.rs b/protocols/relay/src/protocol/outbound_hop.rs index b349f8848be..216c6d115bf 100644 --- a/protocols/relay/src/protocol/outbound_hop.rs +++ b/protocols/relay/src/protocol/outbound_hop.rs @@ -18,22 +18,23 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::io; -use std::time::Duration; +use std::{io, time::Duration}; use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; use futures::prelude::*; use futures_timer::Delay; -use thiserror::Error; -use web_time::SystemTime; - use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_swarm::Stream; +use thiserror::Error; +use web_time::SystemTime; -use crate::protocol::{Limit, MAX_MESSAGE_SIZE}; -use crate::{proto, HOP_PROTOCOL_NAME}; +use crate::{ + proto, + protocol::{Limit, MAX_MESSAGE_SIZE}, + HOP_PROTOCOL_NAME, +}; #[derive(Debug, Error)] pub enum ConnectError { diff --git a/protocols/relay/src/protocol/outbound_stop.rs b/protocols/relay/src/protocol/outbound_stop.rs index 525ebc10821..272aa24eef6 100644 --- a/protocols/relay/src/protocol/outbound_stop.rs +++ b/protocols/relay/src/protocol/outbound_stop.rs @@ -18,19 +18,16 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::io; -use std::time::Duration; +use std::{io, time::Duration}; use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; use futures::prelude::*; -use thiserror::Error; - use libp2p_identity::PeerId; use libp2p_swarm::Stream; +use thiserror::Error; -use crate::protocol::MAX_MESSAGE_SIZE; -use crate::{proto, STOP_PROTOCOL_NAME}; +use crate::{proto, protocol::MAX_MESSAGE_SIZE, STOP_PROTOCOL_NAME}; #[derive(Debug, Error)] pub enum Error { diff --git a/protocols/relay/tests/lib.rs b/protocols/relay/tests/lib.rs index 2b28d5a50cd..da6f549c091 100644 --- a/protocols/relay/tests/lib.rs +++ b/protocols/relay/tests/lib.rs @@ -18,33 +18,32 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::executor::LocalPool; -use futures::future::FutureExt; -use futures::io::{AsyncRead, AsyncWrite}; -use futures::stream::StreamExt; -use futures::task::Spawn; -use libp2p_core::multiaddr::{Multiaddr, Protocol}; -use libp2p_core::muxing::StreamMuxerBox; -use libp2p_core::transport::choice::OrTransport; -use libp2p_core::transport::{Boxed, MemoryTransport, Transport}; -use libp2p_core::upgrade; +use std::{error::Error, time::Duration}; + +use futures::{ + executor::LocalPool, + future::FutureExt, + io::{AsyncRead, AsyncWrite}, + stream::StreamExt, + task::Spawn, +}; +use libp2p_core::{ + multiaddr::{Multiaddr, Protocol}, + muxing::StreamMuxerBox, + transport::{choice::OrTransport, Boxed, MemoryTransport, Transport}, + upgrade, +}; use libp2p_identity as identity; use libp2p_identity::PeerId; use libp2p_ping as ping; use libp2p_plaintext as plaintext; use libp2p_relay as relay; -use libp2p_swarm::dial_opts::DialOpts; -use libp2p_swarm::{Config, DialError, NetworkBehaviour, Swarm, SwarmEvent}; +use libp2p_swarm::{dial_opts::DialOpts, Config, DialError, NetworkBehaviour, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; -use std::error::Error; -use std::time::Duration; -use tracing_subscriber::EnvFilter; #[test] fn reservation() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -85,9 +84,7 @@ fn reservation() { #[test] fn new_reservation_to_same_relay_replaces_old() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -184,9 +181,7 @@ fn new_reservation_to_same_relay_replaces_old() { #[test] fn connect() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -270,9 +265,7 @@ async fn connection_established_to( #[test] fn handle_dial_failure() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -291,9 +284,7 @@ fn handle_dial_failure() { #[test] fn propagate_reservation_error_to_listener() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -340,9 +331,7 @@ fn propagate_reservation_error_to_listener() { #[test] fn propagate_connect_error_to_unknown_peer_to_dialer() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -396,9 +385,7 @@ fn propagate_connect_error_to_unknown_peer_to_dialer() { #[test] fn reuse_connection() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -414,10 +401,7 @@ fn reuse_connection() { .with(Protocol::P2p(relay_peer_id)) .with(Protocol::P2pCircuit); - // To reuse the connection, we need to ensure it is not shut down due to being idle. - let mut client = build_client_with_config( - Config::with_async_std_executor().with_idle_connection_timeout(Duration::from_secs(1)), - ); + let mut client = build_client(); let client_peer_id = *client.local_peer_id(); client.dial(relay_addr).unwrap(); diff --git a/protocols/rendezvous/CHANGELOG.md b/protocols/rendezvous/CHANGELOG.md index 1ed9e5bc3b0..ca01538a76d 100644 --- a/protocols/rendezvous/CHANGELOG.md +++ b/protocols/rendezvous/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.15.1 + +- Update to `libp2p-request-response` `v0.28.0`. + ## 0.15.0 diff --git a/protocols/rendezvous/Cargo.toml b/protocols/rendezvous/Cargo.toml index 5fa40c3785b..104dc6ad1d4 100644 --- a/protocols/rendezvous/Cargo.toml +++ b/protocols/rendezvous/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-rendezvous" edition = "2021" rust-version = { workspace = true } description = "Rendezvous protocol for libp2p" -version = "0.15.0" +version = "0.15.1" authors = ["The COMIT guys "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -29,15 +29,10 @@ tracing = { workspace = true } [dev-dependencies] libp2p-swarm = { workspace = true, features = ["macros", "tokio"] } -libp2p-noise = { workspace = true } -libp2p-ping = { workspace = true } -libp2p-identify = { workspace = true } libp2p-swarm-test = { path = "../../swarm-test" } -libp2p-tcp = { workspace = true, features = ["tokio"] } -libp2p-yamux = { workspace = true } rand = "0.8" tokio = { workspace = true, features = [ "rt-multi-thread", "time", "macros", "sync", "process", "fs", "net" ] } -tracing-subscriber = { workspace = true, features = ["env-filter"] } +libp2p-test-utils = { workspace = true } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/rendezvous/src/client.rs b/protocols/rendezvous/src/client.rs index a794252ff0b..019b23c092b 100644 --- a/protocols/rendezvous/src/client.rs +++ b/protocols/rendezvous/src/client.rs @@ -18,24 +18,28 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::codec::Message::*; -use crate::codec::{Cookie, ErrorCode, Message, Namespace, NewRegistration, Registration, Ttl}; -use futures::future::BoxFuture; -use futures::future::FutureExt; -use futures::stream::FuturesUnordered; -use futures::stream::StreamExt; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr, PeerRecord}; +use std::{ + collections::HashMap, + iter, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{ + future::{BoxFuture, FutureExt}, + stream::{FuturesUnordered, StreamExt}, +}; +use libp2p_core::{transport::PortUse, Endpoint, Multiaddr, PeerRecord}; use libp2p_identity::{Keypair, PeerId, SigningError}; use libp2p_request_response::{OutboundRequestId, ProtocolSupport}; use libp2p_swarm::{ ConnectionDenied, ConnectionId, ExternalAddresses, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use std::collections::HashMap; -use std::iter; -use std::task::{Context, Poll}; -use std::time::Duration; + +use crate::codec::{ + Cookie, ErrorCode, Message, Message::*, Namespace, NewRegistration, Registration, Ttl, +}; pub struct Behaviour { inner: libp2p_request_response::Behaviour, @@ -47,12 +51,14 @@ pub struct Behaviour { /// Hold addresses of all peers that we have discovered so far. /// - /// Storing these internally allows us to assist the [`libp2p_swarm::Swarm`] in dialing by returning addresses from [`NetworkBehaviour::handle_pending_outbound_connection`]. + /// Storing these internally allows us to assist the [`libp2p_swarm::Swarm`] in dialing by + /// returning addresses from [`NetworkBehaviour::handle_pending_outbound_connection`]. discovered_peers: HashMap<(PeerId, Namespace), Vec>, registered_namespaces: HashMap<(PeerId, Namespace), Ttl>, - /// Tracks the expiry of registrations that we have discovered and stored in `discovered_peers` otherwise we have a memory leak. + /// Tracks the expiry of registrations that we have discovered and stored in `discovered_peers` + /// otherwise we have a memory leak. expiring_registrations: FuturesUnordered>, external_addresses: ExternalAddresses, @@ -81,8 +87,9 @@ impl Behaviour { /// Register our external addresses in the given namespace with the given rendezvous peer. /// - /// External addresses are either manually added via [`libp2p_swarm::Swarm::add_external_address`] or reported - /// by other [`NetworkBehaviour`]s via [`ToSwarm::ExternalAddrConfirmed`]. + /// External addresses are either manually added via + /// [`libp2p_swarm::Swarm::add_external_address`] or reported by other [`NetworkBehaviour`]s + /// via [`ToSwarm::ExternalAddrConfirmed`]. pub fn register( &mut self, namespace: Namespace, diff --git a/protocols/rendezvous/src/codec.rs b/protocols/rendezvous/src/codec.rs index cad3688e00b..60f9f14f332 100644 --- a/protocols/rendezvous/src/codec.rs +++ b/protocols/rendezvous/src/codec.rs @@ -18,16 +18,17 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::DEFAULT_TTL; +use std::{fmt, io}; + use async_trait::async_trait; -use asynchronous_codec::{BytesMut, Decoder, Encoder}; -use asynchronous_codec::{FramedRead, FramedWrite}; +use asynchronous_codec::{BytesMut, Decoder, Encoder, FramedRead, FramedWrite}; use futures::{AsyncRead, AsyncWrite, SinkExt, StreamExt}; use libp2p_core::{peer_record, signed_envelope, PeerRecord, SignedEnvelope}; use libp2p_swarm::StreamProtocol; use quick_protobuf_codec::Codec as ProtobufCodec; use rand::RngCore; -use std::{fmt, io}; + +use crate::DEFAULT_TTL; pub type Ttl = u64; pub(crate) type Limit = u64; @@ -54,7 +55,9 @@ pub struct Namespace(String); impl Namespace { /// Creates a new [`Namespace`] from a static string. /// - /// This will panic if the namespace is too long. We accepting panicking in this case because we are enforcing a `static lifetime which means this value can only be a constant in the program and hence we hope the developer checked that it is of an acceptable length. + /// This will panic if the namespace is too long. We accepting panicking in this case because we + /// are enforcing a `static lifetime which means this value can only be a constant in the + /// program and hence we hope the developer checked that it is of an acceptable length. pub fn from_static(value: &'static str) -> Self { if value.len() > crate::MAX_NAMESPACE { panic!("Namespace '{value}' is too long!") @@ -109,7 +112,8 @@ pub struct Cookie { impl Cookie { /// Construct a new [`Cookie`] for a given namespace. /// - /// This cookie will only be valid for subsequent DISCOVER requests targeting the same namespace. + /// This cookie will only be valid for subsequent DISCOVER requests targeting the same + /// namespace. pub fn for_namespace(namespace: Namespace) -> Self { Self { id: rand::thread_rng().next_u64(), diff --git a/protocols/rendezvous/src/lib.rs b/protocols/rendezvous/src/lib.rs index 7c607085f20..221178728af 100644 --- a/protocols/rendezvous/src/lib.rs +++ b/protocols/rendezvous/src/lib.rs @@ -22,9 +22,10 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -pub use self::codec::{Cookie, ErrorCode, Namespace, NamespaceTooLong, Registration, Ttl}; use libp2p_swarm::StreamProtocol; +pub use self::codec::{Cookie, ErrorCode, Namespace, NamespaceTooLong, Registration, Ttl}; + mod codec; /// If unspecified, rendezvous nodes should assume a TTL of 2h. diff --git a/protocols/rendezvous/src/server.rs b/protocols/rendezvous/src/server.rs index 45a525d9573..1be7220cfcb 100644 --- a/protocols/rendezvous/src/server.rs +++ b/protocols/rendezvous/src/server.rs @@ -18,25 +18,27 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::codec::{Cookie, ErrorCode, Message, Namespace, NewRegistration, Registration, Ttl}; -use crate::{MAX_TTL, MIN_TTL}; +use std::{ + collections::{HashMap, HashSet}, + iter, + task::{ready, Context, Poll}, + time::Duration, +}; + use bimap::BiMap; -use futures::future::BoxFuture; -use futures::stream::FuturesUnordered; -use futures::{FutureExt, StreamExt}; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +use futures::{future::BoxFuture, stream::FuturesUnordered, FutureExt, StreamExt}; +use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_request_response::ProtocolSupport; -use libp2p_swarm::behaviour::FromSwarm; use libp2p_swarm::{ - ConnectionDenied, ConnectionId, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, - ToSwarm, + behaviour::FromSwarm, ConnectionDenied, ConnectionId, NetworkBehaviour, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, +}; + +use crate::{ + codec::{Cookie, ErrorCode, Message, Namespace, NewRegistration, Registration, Ttl}, + MAX_TTL, MIN_TTL, }; -use std::collections::{HashMap, HashSet}; -use std::iter; -use std::task::{ready, Context, Poll}; -use std::time::Duration; pub struct Behaviour { inner: libp2p_request_response::Behaviour, @@ -181,6 +183,7 @@ impl NetworkBehaviour for Behaviour { libp2p_request_response::Message::Request { request, channel, .. }, + .. }) => { if let Some((event, response)) = handle_request(peer_id, request, &mut self.registrations) @@ -200,6 +203,7 @@ impl NetworkBehaviour for Behaviour { peer, request_id, error, + .. }) => { tracing::warn!( %peer, @@ -215,6 +219,7 @@ impl NetworkBehaviour for Behaviour { | ToSwarm::GenerateEvent(libp2p_request_response::Event::Message { peer: _, message: libp2p_request_response::Message::Response { .. }, + .. }) | ToSwarm::GenerateEvent(libp2p_request_response::Event::OutboundFailure { .. @@ -534,10 +539,9 @@ pub struct CookieNamespaceMismatch; #[cfg(test)] mod tests { - use web_time::SystemTime; - use libp2p_core::PeerRecord; use libp2p_identity as identity; + use web_time::SystemTime; use super::*; @@ -792,7 +796,8 @@ mod tests { .unwrap_err(); } - /// Polls [`Registrations`] for at most `seconds` and panics if doesn't return an event within that time. + /// Polls [`Registrations`] for at most `seconds` and panics if doesn't + /// return an event within that time. async fn next_event_in_at_most(&mut self, seconds: u64) -> ExpiredRegistration { tokio::time::timeout(Duration::from_secs(seconds), self.next_event()) .await diff --git a/protocols/rendezvous/tests/rendezvous.rs b/protocols/rendezvous/tests/rendezvous.rs index d9200780ece..98aa9dab62d 100644 --- a/protocols/rendezvous/tests/rendezvous.rs +++ b/protocols/rendezvous/tests/rendezvous.rs @@ -18,23 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::stream::FuturesUnordered; -use futures::StreamExt; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::Multiaddr; +use std::time::Duration; + +use futures::{stream::FuturesUnordered, StreamExt}; +use libp2p_core::{multiaddr::Protocol, Multiaddr}; use libp2p_identity as identity; use libp2p_rendezvous as rendezvous; use libp2p_rendezvous::client::RegisterError; use libp2p_swarm::{DialError, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; -use std::time::Duration; -use tracing_subscriber::EnvFilter; #[tokio::test] async fn given_successful_registration_then_successful_discovery() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice, mut bob], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -87,9 +83,7 @@ async fn given_successful_registration_then_successful_discovery() { #[tokio::test] async fn should_return_error_when_no_external_addresses() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let server = new_server(rendezvous::server::Config::default()).await; let mut client = Swarm::new_ephemeral(rendezvous::client::Behaviour::new); @@ -104,9 +98,7 @@ async fn should_return_error_when_no_external_addresses() { #[tokio::test] async fn given_successful_registration_then_refresh_ttl() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice, mut bob], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -172,9 +164,7 @@ async fn given_successful_registration_then_refresh_ttl() { #[tokio::test] async fn given_successful_registration_then_refresh_external_addrs() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -225,9 +215,7 @@ async fn given_successful_registration_then_refresh_external_addrs() { #[tokio::test] async fn given_invalid_ttl_then_unsuccessful_registration() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -254,9 +242,7 @@ async fn given_invalid_ttl_then_unsuccessful_registration() { #[tokio::test] async fn discover_allows_for_dial_by_peer_id() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice, mut bob], robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -311,9 +297,7 @@ async fn discover_allows_for_dial_by_peer_id() { #[tokio::test] async fn eve_cannot_register() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let mut robert = new_server(rendezvous::server::Config::default()).await; let mut eve = new_impersonating_client().await; @@ -339,9 +323,7 @@ async fn eve_cannot_register() { // test if charlie can operate as client and server simultaneously #[tokio::test] async fn can_combine_client_and_server() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -377,9 +359,7 @@ async fn can_combine_client_and_server() { #[tokio::test] async fn registration_on_clients_expire() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice, mut bob], robert) = new_server_with_connected_clients(rendezvous::server::Config::default().with_min_ttl(1)) @@ -471,9 +451,11 @@ async fn new_combined_node() -> Swarm { } async fn new_impersonating_client() -> Swarm { - // In reality, if Eve were to try and fake someones identity, she would obviously only know the public key. - // Due to the type-safe API of the `Rendezvous` behaviour and `PeerRecord`, we actually cannot construct a bad `PeerRecord` (i.e. one that is claims to be someone else). - // As such, the best we can do is hand eve a completely different keypair from what she is using to authenticate her connection. + // In reality, if Eve were to try and fake someones identity, she would obviously only know the + // public key. Due to the type-safe API of the `Rendezvous` behaviour and `PeerRecord`, we + // actually cannot construct a bad `PeerRecord` (i.e. one that is claims to be someone else). + // As such, the best we can do is hand eve a completely different keypair from what she is using + // to authenticate her connection. let someone_else = identity::Keypair::generate_ed25519(); let mut eve = Swarm::new_ephemeral(move |_| rendezvous::client::Behaviour::new(someone_else)); eve.listen().with_memory_addr_external().await; diff --git a/protocols/request-response/CHANGELOG.md b/protocols/request-response/CHANGELOG.md index 9ed658fc90f..34fc27b7432 100644 --- a/protocols/request-response/CHANGELOG.md +++ b/protocols/request-response/CHANGELOG.md @@ -1,3 +1,11 @@ +## 0.28.0 + +- Add connection id to the events emitted by a request-response `Behaviour`. + See [PR 5719](https://github.com/libp2p/rust-libp2p/pull/5719). + +- Allow configurable request and response sizes for `json` and `cbor` codec. + See [PR 5792](https://github.com/libp2p/rust-libp2p/pull/5792). + ## 0.27.1 - Deprecate `void` crate. diff --git a/protocols/request-response/Cargo.toml b/protocols/request-response/Cargo.toml index b2e6fd0b0ac..cb78f536ae4 100644 --- a/protocols/request-response/Cargo.toml +++ b/protocols/request-response/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-request-response" edition = "2021" rust-version = { workspace = true } description = "Generic Request/Response Protocols" -version = "0.27.1" +version = "0.28.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,7 +14,6 @@ categories = ["network-programming", "asynchronous"] async-trait = "0.1" cbor4ii = { version = "0.3.2", features = ["serde1", "use_std"], optional = true } futures = { workspace = true } -web-time = { workspace = true } libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } @@ -23,7 +22,6 @@ serde = { version = "1.0", optional = true} serde_json = { version = "1.0.117", optional = true } smallvec = "1.13.2" tracing = { workspace = true } -futures-timer = "3.0.3" futures-bounded = { workspace = true } [features] @@ -33,14 +31,11 @@ cbor = ["dep:serde", "dep:cbor4ii", "libp2p-swarm/macros"] [dev-dependencies] anyhow = "1.0.86" async-std = { version = "1.6.2", features = ["attributes"] } -libp2p-noise = { workspace = true } -libp2p-tcp = { workspace = true, features = ["async-io"] } -libp2p-yamux = { workspace = true } rand = "0.8" libp2p-swarm-test = { path = "../../swarm-test" } futures_ringbuf = "0.4.0" serde = { version = "1.0", features = ["derive"] } -tracing-subscriber = { workspace = true, features = ["env-filter"] } +libp2p-test-utils = { workspace = true } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/request-response/src/cbor.rs b/protocols/request-response/src/cbor.rs index a27d069e758..eac1944bb09 100644 --- a/protocols/request-response/src/cbor.rs +++ b/protocols/request-response/src/cbor.rs @@ -37,32 +37,37 @@ /// } /// /// let behaviour = cbor::Behaviour::::new( -/// [(StreamProtocol::new("/my-cbor-protocol"), ProtocolSupport::Full)], -/// request_response::Config::default() +/// [( +/// StreamProtocol::new("/my-cbor-protocol"), +/// ProtocolSupport::Full, +/// )], +/// request_response::Config::default(), /// ); /// ``` pub type Behaviour = crate::Behaviour>; mod codec { + use std::{collections::TryReserveError, convert::Infallible, io, marker::PhantomData}; + use async_trait::async_trait; use cbor4ii::core::error::DecodeError; use futures::prelude::*; use libp2p_swarm::StreamProtocol; use serde::{de::DeserializeOwned, Serialize}; - use std::{collections::TryReserveError, convert::Infallible, io, marker::PhantomData}; - - /// Max request size in bytes - const REQUEST_SIZE_MAXIMUM: u64 = 1024 * 1024; - /// Max response size in bytes - const RESPONSE_SIZE_MAXIMUM: u64 = 10 * 1024 * 1024; pub struct Codec { + /// Max request size in bytes. + request_size_maximum: u64, + /// Max response size in bytes. + response_size_maximum: u64, phantom: PhantomData<(Req, Resp)>, } impl Default for Codec { fn default() -> Self { Codec { + request_size_maximum: 1024 * 1024, + response_size_maximum: 10 * 1024 * 1024, phantom: PhantomData, } } @@ -70,7 +75,25 @@ mod codec { impl Clone for Codec { fn clone(&self) -> Self { - Self::default() + Self { + request_size_maximum: self.request_size_maximum, + response_size_maximum: self.response_size_maximum, + phantom: PhantomData, + } + } + } + + impl Codec { + /// Sets the limit for request size in bytes. + pub fn set_request_size_maximum(mut self, request_size_maximum: u64) -> Self { + self.request_size_maximum = request_size_maximum; + self + } + + /// Sets the limit for response size in bytes. + pub fn set_response_size_maximum(mut self, response_size_maximum: u64) -> Self { + self.response_size_maximum = response_size_maximum; + self } } @@ -90,7 +113,9 @@ mod codec { { let mut vec = Vec::new(); - io.take(REQUEST_SIZE_MAXIMUM).read_to_end(&mut vec).await?; + io.take(self.request_size_maximum) + .read_to_end(&mut vec) + .await?; cbor4ii::serde::from_slice(vec.as_slice()).map_err(decode_into_io_error) } @@ -101,7 +126,9 @@ mod codec { { let mut vec = Vec::new(); - io.take(RESPONSE_SIZE_MAXIMUM).read_to_end(&mut vec).await?; + io.take(self.response_size_maximum) + .read_to_end(&mut vec) + .await?; cbor4ii::serde::from_slice(vec.as_slice()).map_err(decode_into_io_error) } @@ -168,13 +195,13 @@ mod codec { #[cfg(test)] mod tests { - use crate::cbor::codec::Codec; - use crate::Codec as _; use futures::AsyncWriteExt; use futures_ringbuf::Endpoint; use libp2p_swarm::StreamProtocol; use serde::{Deserialize, Serialize}; + use crate::{cbor::codec::Codec, Codec as _}; + #[async_std::test] async fn test_codec() { let expected_request = TestRequest { diff --git a/protocols/request-response/src/codec.rs b/protocols/request-response/src/codec.rs index d26b729acae..d396a75ad7b 100644 --- a/protocols/request-response/src/codec.rs +++ b/protocols/request-response/src/codec.rs @@ -18,9 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::io; + use async_trait::async_trait; use futures::prelude::*; -use std::io; /// A `Codec` defines the request and response types /// for a request-response [`Behaviour`](crate::Behaviour) protocol or diff --git a/protocols/request-response/src/handler.rs b/protocols/request-response/src/handler.rs index dbd7a0708ce..d70ddc5c0ae 100644 --- a/protocols/request-response/src/handler.rs +++ b/protocols/request-response/src/handler.rs @@ -20,23 +20,6 @@ pub(crate) mod protocol; -pub use protocol::ProtocolSupport; - -use crate::codec::Codec; -use crate::handler::protocol::Protocol; -use crate::{InboundRequestId, OutboundRequestId, EMPTY_QUEUE_SHRINK_THRESHOLD}; - -use futures::channel::mpsc; -use futures::{channel::oneshot, prelude::*}; -use libp2p_swarm::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, - ListenUpgradeError, -}; -use libp2p_swarm::{ - handler::{ConnectionHandler, ConnectionHandlerEvent, StreamUpgradeError}, - SubstreamProtocol, -}; -use smallvec::SmallVec; use std::{ collections::VecDeque, fmt, io, @@ -48,6 +31,25 @@ use std::{ time::Duration, }; +use futures::{ + channel::{mpsc, oneshot}, + prelude::*, +}; +use libp2p_swarm::{ + handler::{ + ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, + FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, StreamUpgradeError, + }, + SubstreamProtocol, +}; +pub use protocol::ProtocolSupport; +use smallvec::SmallVec; + +use crate::{ + codec::Codec, handler::protocol::Protocol, InboundRequestId, OutboundRequestId, + EMPTY_QUEUE_SHRINK_THRESHOLD, +}; + /// A connection handler for a request response [`Behaviour`](super::Behaviour) protocol. pub struct Handler where @@ -125,10 +127,7 @@ where FullyNegotiatedInbound { protocol: (mut stream, protocol), info: (), - }: FullyNegotiatedInbound< - ::InboundProtocol, - ::InboundOpenInfo, - >, + }: FullyNegotiatedInbound<::InboundProtocol>, ) { let mut codec = self.codec.clone(); let request_id = self.next_inbound_request_id(); @@ -176,10 +175,7 @@ where FullyNegotiatedOutbound { protocol: (mut stream, protocol), info: (), - }: FullyNegotiatedOutbound< - ::OutboundProtocol, - ::OutboundOpenInfo, - >, + }: FullyNegotiatedOutbound<::OutboundProtocol>, ) { let message = self .requested_outbound @@ -217,7 +213,7 @@ where fn on_dial_upgrade_error( &mut self, DialUpgradeError { error, info: () }: DialUpgradeError< - ::OutboundOpenInfo, + (), ::OutboundProtocol, >, ) { @@ -254,7 +250,7 @@ where fn on_listen_upgrade_error( &mut self, ListenUpgradeError { error, .. }: ListenUpgradeError< - ::InboundOpenInfo, + (), ::InboundProtocol, >, ) { @@ -381,7 +377,7 @@ where type OutboundOpenInfo = (); type InboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { SubstreamProtocol::new( Protocol { protocols: self.inbound_protocols.clone(), @@ -471,12 +467,7 @@ where fn on_connection_event( &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + event: ConnectionEvent, ) { match event { ConnectionEvent::FullyNegotiatedInbound(fully_negotiated_inbound) => { diff --git a/protocols/request-response/src/json.rs b/protocols/request-response/src/json.rs index 85e78e7ddda..f151b16bf5f 100644 --- a/protocols/request-response/src/json.rs +++ b/protocols/request-response/src/json.rs @@ -18,7 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -/// A request-response behaviour using [`serde_json`] for serializing and deserializing the messages. +/// A request-response behaviour using [`serde_json`] for serializing and deserializing the +/// messages. /// /// # Example /// @@ -36,31 +37,36 @@ /// } /// /// let behaviour = json::Behaviour::::new( -/// [(StreamProtocol::new("/my-json-protocol"), ProtocolSupport::Full)], -/// request_response::Config::default() +/// [( +/// StreamProtocol::new("/my-json-protocol"), +/// ProtocolSupport::Full, +/// )], +/// request_response::Config::default(), /// ); /// ``` pub type Behaviour = crate::Behaviour>; mod codec { + use std::{io, marker::PhantomData}; + use async_trait::async_trait; use futures::prelude::*; use libp2p_swarm::StreamProtocol; use serde::{de::DeserializeOwned, Serialize}; - use std::{io, marker::PhantomData}; - - /// Max request size in bytes - const REQUEST_SIZE_MAXIMUM: u64 = 1024 * 1024; - /// Max response size in bytes - const RESPONSE_SIZE_MAXIMUM: u64 = 10 * 1024 * 1024; pub struct Codec { + /// Max request size in bytes + request_size_maximum: u64, + /// Max response size in bytes + response_size_maximum: u64, phantom: PhantomData<(Req, Resp)>, } impl Default for Codec { fn default() -> Self { Codec { + request_size_maximum: 1024 * 1024, + response_size_maximum: 10 * 1024 * 1024, phantom: PhantomData, } } @@ -68,7 +74,25 @@ mod codec { impl Clone for Codec { fn clone(&self) -> Self { - Self::default() + Self { + request_size_maximum: self.request_size_maximum, + response_size_maximum: self.response_size_maximum, + phantom: self.phantom, + } + } + } + + impl Codec { + /// Sets the limit for request size in bytes. + pub fn set_request_size_maximum(mut self, request_size_maximum: u64) -> Self { + self.request_size_maximum = request_size_maximum; + self + } + + /// Sets the limit for response size in bytes. + pub fn set_response_size_maximum(mut self, response_size_maximum: u64) -> Self { + self.response_size_maximum = response_size_maximum; + self } } @@ -88,7 +112,9 @@ mod codec { { let mut vec = Vec::new(); - io.take(REQUEST_SIZE_MAXIMUM).read_to_end(&mut vec).await?; + io.take(self.request_size_maximum) + .read_to_end(&mut vec) + .await?; Ok(serde_json::from_slice(vec.as_slice())?) } @@ -99,7 +125,9 @@ mod codec { { let mut vec = Vec::new(); - io.take(RESPONSE_SIZE_MAXIMUM).read_to_end(&mut vec).await?; + io.take(self.response_size_maximum) + .read_to_end(&mut vec) + .await?; Ok(serde_json::from_slice(vec.as_slice())?) } @@ -140,12 +168,13 @@ mod codec { #[cfg(test)] mod tests { - use crate::Codec; use futures::AsyncWriteExt; use futures_ringbuf::Endpoint; use libp2p_swarm::StreamProtocol; use serde::{Deserialize, Serialize}; + use crate::Codec; + #[async_std::test] async fn test_codec() { let expected_request = TestRequest { diff --git a/protocols/request-response/src/lib.rs b/protocols/request-response/src/lib.rs index e627f5668ff..39a773d99b4 100644 --- a/protocols/request-response/src/lib.rs +++ b/protocols/request-response/src/lib.rs @@ -73,12 +73,18 @@ mod handler; #[cfg(feature = "json")] pub mod json; -pub use codec::Codec; -pub use handler::ProtocolSupport; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + fmt, io, + sync::{atomic::AtomicU64, Arc}, + task::{Context, Poll}, + time::Duration, +}; -use crate::handler::OutboundMessage; +pub use codec::Codec; use futures::channel::oneshot; use handler::Handler; +pub use handler::ProtocolSupport; use libp2p_core::{transport::PortUse, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ @@ -88,13 +94,8 @@ use libp2p_swarm::{ PeerAddresses, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use smallvec::SmallVec; -use std::{ - collections::{HashMap, HashSet, VecDeque}, - fmt, io, - sync::{atomic::AtomicU64, Arc}, - task::{Context, Poll}, - time::Duration, -}; + +use crate::handler::OutboundMessage; /// An inbound request or response. #[derive(Debug)] @@ -130,6 +131,8 @@ pub enum Event { Message { /// The peer who sent the message. peer: PeerId, + /// The connection used. + connection_id: ConnectionId, /// The incoming message. message: Message, }, @@ -137,6 +140,8 @@ pub enum Event { OutboundFailure { /// The peer to whom the request was sent. peer: PeerId, + /// The connection used. + connection_id: ConnectionId, /// The (local) ID of the failed request. request_id: OutboundRequestId, /// The error that occurred. @@ -146,6 +151,8 @@ pub enum Event { InboundFailure { /// The peer from whom the request was received. peer: PeerId, + /// The connection used. + connection_id: ConnectionId, /// The ID of the failed inbound request. request_id: InboundRequestId, /// The error that occurred. @@ -158,6 +165,8 @@ pub enum Event { ResponseSent { /// The peer to whom the response was sent. peer: PeerId, + /// The connection used. + connection_id: ConnectionId, /// The ID of the inbound request whose response was sent. request_id: InboundRequestId, }, @@ -353,8 +362,8 @@ where /// Pending events to return from `poll`. pending_events: VecDeque, OutboundMessage>>, - /// The currently connected peers, their pending outbound and inbound responses and their known, - /// reachable addresses, if any. + /// The currently connected peers, their pending outbound and inbound responses and their + /// known, reachable addresses, if any. connected: HashMap>, /// Externally managed addresses via `add_address` and `remove_address`. addresses: PeerAddresses, @@ -367,7 +376,8 @@ impl Behaviour where TCodec: Codec + Default + Clone + Send + 'static, { - /// Creates a new `Behaviour` for the given protocols and configuration, using [`Default`] to construct the codec. + /// Creates a new `Behaviour` for the given protocols and configuration, using [`Default`] to + /// construct the codec. pub fn new(protocols: I, cfg: Config) -> Self where I: IntoIterator, @@ -567,10 +577,10 @@ where fn remove_pending_outbound_response( &mut self, peer: &PeerId, - connection: ConnectionId, + connection_id: ConnectionId, request: OutboundRequestId, ) -> bool { - self.get_connection_mut(peer, connection) + self.get_connection_mut(peer, connection_id) .map(|c| c.pending_outbound_responses.remove(&request)) .unwrap_or(false) } @@ -583,10 +593,10 @@ where fn remove_pending_inbound_response( &mut self, peer: &PeerId, - connection: ConnectionId, + connection_id: ConnectionId, request: InboundRequestId, ) -> bool { - self.get_connection_mut(peer, connection) + self.get_connection_mut(peer, connection_id) .map(|c| c.pending_inbound_responses.remove(&request)) .unwrap_or(false) } @@ -596,11 +606,11 @@ where fn get_connection_mut( &mut self, peer: &PeerId, - connection: ConnectionId, + connection_id: ConnectionId, ) -> Option<&mut Connection> { self.connected .get_mut(peer) - .and_then(|connections| connections.iter_mut().find(|c| c.id == connection)) + .and_then(|connections| connections.iter_mut().find(|c| c.id == connection_id)) } fn on_address_change( @@ -657,6 +667,7 @@ where self.pending_events .push_back(ToSwarm::GenerateEvent(Event::InboundFailure { peer: peer_id, + connection_id, request_id, error: InboundFailure::ConnectionClosed, })); @@ -666,13 +677,21 @@ where self.pending_events .push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { peer: peer_id, + connection_id, request_id, error: OutboundFailure::ConnectionClosed, })); } } - fn on_dial_failure(&mut self, DialFailure { peer_id, .. }: DialFailure) { + fn on_dial_failure( + &mut self, + DialFailure { + peer_id, + connection_id, + .. + }: DialFailure, + ) { if let Some(peer) = peer_id { // If there are pending outgoing requests when a dial failure occurs, // it is implied that we are not connected to the peer, since pending @@ -685,6 +704,7 @@ where self.pending_events .push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { peer, + connection_id, request_id: request.request_id, error: OutboundFailure::DialFailure, })); @@ -693,7 +713,8 @@ where } } - /// Preloads a new [`Handler`] with requests that are waiting to be sent to the newly connected peer. + /// Preloads a new [`Handler`] with requests that are + /// waiting to be sent to the newly connected peer. fn preload_new_handler( &mut self, handler: &mut Handler, @@ -808,7 +829,7 @@ where fn on_connection_handler_event( &mut self, peer: PeerId, - connection: ConnectionId, + connection_id: ConnectionId, event: THandlerOutEvent, ) { match event { @@ -816,7 +837,8 @@ where request_id, response, } => { - let removed = self.remove_pending_outbound_response(&peer, connection, request_id); + let removed = + self.remove_pending_outbound_response(&peer, connection_id, request_id); debug_assert!( removed, "Expect request_id to be pending before receiving response.", @@ -827,13 +849,17 @@ where response, }; self.pending_events - .push_back(ToSwarm::GenerateEvent(Event::Message { peer, message })); + .push_back(ToSwarm::GenerateEvent(Event::Message { + peer, + connection_id, + message, + })); } handler::Event::Request { request_id, request, sender, - } => match self.get_connection_mut(&peer, connection) { + } => match self.get_connection_mut(&peer, connection_id) { Some(connection) => { let inserted = connection.pending_inbound_responses.insert(request_id); debug_assert!(inserted, "Expect id of new request to be unknown."); @@ -845,14 +871,19 @@ where channel, }; self.pending_events - .push_back(ToSwarm::GenerateEvent(Event::Message { peer, message })); + .push_back(ToSwarm::GenerateEvent(Event::Message { + peer, + connection_id, + message, + })); } None => { - tracing::debug!("Connection ({connection}) closed after `Event::Request` ({request_id}) has been emitted."); + tracing::debug!("Connection ({connection_id}) closed after `Event::Request` ({request_id}) has been emitted."); } }, handler::Event::ResponseSent(request_id) => { - let removed = self.remove_pending_inbound_response(&peer, connection, request_id); + let removed = + self.remove_pending_inbound_response(&peer, connection_id, request_id); debug_assert!( removed, "Expect request_id to be pending before response is sent." @@ -861,11 +892,13 @@ where self.pending_events .push_back(ToSwarm::GenerateEvent(Event::ResponseSent { peer, + connection_id, request_id, })); } handler::Event::ResponseOmission(request_id) => { - let removed = self.remove_pending_inbound_response(&peer, connection, request_id); + let removed = + self.remove_pending_inbound_response(&peer, connection_id, request_id); debug_assert!( removed, "Expect request_id to be pending before response is omitted.", @@ -874,12 +907,14 @@ where self.pending_events .push_back(ToSwarm::GenerateEvent(Event::InboundFailure { peer, + connection_id, request_id, error: InboundFailure::ResponseOmission, })); } handler::Event::OutboundTimeout(request_id) => { - let removed = self.remove_pending_outbound_response(&peer, connection, request_id); + let removed = + self.remove_pending_outbound_response(&peer, connection_id, request_id); debug_assert!( removed, "Expect request_id to be pending before request times out." @@ -888,12 +923,14 @@ where self.pending_events .push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { peer, + connection_id, request_id, error: OutboundFailure::Timeout, })); } handler::Event::OutboundUnsupportedProtocols(request_id) => { - let removed = self.remove_pending_outbound_response(&peer, connection, request_id); + let removed = + self.remove_pending_outbound_response(&peer, connection_id, request_id); debug_assert!( removed, "Expect request_id to be pending before failing to connect.", @@ -902,28 +939,33 @@ where self.pending_events .push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { peer, + connection_id, request_id, error: OutboundFailure::UnsupportedProtocols, })); } handler::Event::OutboundStreamFailed { request_id, error } => { - let removed = self.remove_pending_outbound_response(&peer, connection, request_id); + let removed = + self.remove_pending_outbound_response(&peer, connection_id, request_id); debug_assert!(removed, "Expect request_id to be pending upon failure"); self.pending_events .push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { peer, + connection_id, request_id, error: OutboundFailure::Io(error), })) } handler::Event::InboundTimeout(request_id) => { - let removed = self.remove_pending_inbound_response(&peer, connection, request_id); + let removed = + self.remove_pending_inbound_response(&peer, connection_id, request_id); if removed { self.pending_events .push_back(ToSwarm::GenerateEvent(Event::InboundFailure { peer, + connection_id, request_id, error: InboundFailure::Timeout, })); @@ -935,12 +977,14 @@ where } } handler::Event::InboundStreamFailed { request_id, error } => { - let removed = self.remove_pending_inbound_response(&peer, connection, request_id); + let removed = + self.remove_pending_inbound_response(&peer, connection_id, request_id); if removed { self.pending_events .push_back(ToSwarm::GenerateEvent(Event::InboundFailure { peer, + connection_id, request_id, error: InboundFailure::Io(error), })); diff --git a/protocols/request-response/tests/error_reporting.rs b/protocols/request-response/tests/error_reporting.rs index 19f323e169f..281701f5cc3 100644 --- a/protocols/request-response/tests/error_reporting.rs +++ b/protocols/request-response/tests/error_reporting.rs @@ -1,3 +1,5 @@ +use std::{io, iter, pin::pin, time::Duration}; + use anyhow::{bail, Result}; use async_std::task::sleep; use async_trait::async_trait; @@ -10,16 +12,10 @@ use libp2p_swarm_test::SwarmExt; use request_response::{ Codec, InboundFailure, InboundRequestId, OutboundFailure, OutboundRequestId, ResponseChannel, }; -use std::pin::pin; -use std::time::Duration; -use std::{io, iter}; -use tracing_subscriber::EnvFilter; #[async_std::test] async fn report_outbound_failure_on_read_response() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let (peer1_id, mut swarm1) = new_swarm(); let (peer2_id, mut swarm2) = new_swarm(); @@ -73,10 +69,7 @@ async fn report_outbound_failure_on_read_response() { #[async_std::test] async fn report_outbound_failure_on_write_request() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); - + libp2p_test_utils::with_default_env_filter(); let (peer1_id, mut swarm1) = new_swarm(); let (_peer2_id, mut swarm2) = new_swarm(); @@ -116,9 +109,7 @@ async fn report_outbound_failure_on_write_request() { #[async_std::test] async fn report_outbound_timeout_on_read_response() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); // `swarm1` needs to have a bigger timeout to avoid racing let (peer1_id, mut swarm1) = new_swarm_with_timeout(Duration::from_millis(200)); @@ -163,9 +154,7 @@ async fn report_outbound_timeout_on_read_response() { #[async_std::test] async fn report_outbound_failure_on_max_streams() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); // `swarm2` will be able to handle only 1 stream per time. let swarm2_config = request_response::Config::default() @@ -215,9 +204,7 @@ async fn report_outbound_failure_on_max_streams() { #[async_std::test] async fn report_inbound_failure_on_read_request() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let (peer1_id, mut swarm1) = new_swarm(); let (_peer2_id, mut swarm2) = new_swarm(); @@ -252,9 +239,7 @@ async fn report_inbound_failure_on_read_request() { #[async_std::test] async fn report_inbound_failure_on_write_response() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let (peer1_id, mut swarm1) = new_swarm(); let (peer2_id, mut swarm2) = new_swarm(); @@ -318,9 +303,7 @@ async fn report_inbound_failure_on_write_response() { #[async_std::test] async fn report_inbound_timeout_on_write_response() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); // `swarm2` needs to have a bigger timeout to avoid racing let (peer1_id, mut swarm1) = new_swarm_with_timeout(Duration::from_millis(100)); @@ -567,6 +550,7 @@ async fn wait_request( request, channel, }, + .. }) => { return Ok((peer, request_id, request, channel)); } @@ -601,6 +585,7 @@ async fn wait_inbound_failure( peer, request_id, error, + .. }) => { return Ok((peer, request_id, error)); } @@ -619,6 +604,7 @@ async fn wait_outbound_failure( peer, request_id, error, + .. }) => { return Ok((peer, request_id, error)); } diff --git a/protocols/request-response/tests/peer_address.rs b/protocols/request-response/tests/peer_address.rs index 0ed7ffe5551..714091fc682 100644 --- a/protocols/request-response/tests/peer_address.rs +++ b/protocols/request-response/tests/peer_address.rs @@ -1,18 +1,16 @@ +use std::iter; + use libp2p_core::ConnectedPoint; use libp2p_request_response as request_response; use libp2p_request_response::ProtocolSupport; use libp2p_swarm::{StreamProtocol, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use serde::{Deserialize, Serialize}; -use std::iter; -use tracing_subscriber::EnvFilter; #[async_std::test] #[cfg(feature = "cbor")] async fn dial_succeeds_after_adding_peers_address() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let protocols = iter::once((StreamProtocol::new("/ping/1"), ProtocolSupport::Full)); let config = request_response::Config::default(); diff --git a/protocols/request-response/tests/ping.rs b/protocols/request-response/tests/ping.rs index 827afae249c..12458a0e5e7 100644 --- a/protocols/request-response/tests/ping.rs +++ b/protocols/request-response/tests/ping.rs @@ -20,6 +20,8 @@ //! Integration tests for the `Behaviour`. +use std::{io, iter}; + use futures::prelude::*; use libp2p_identity::PeerId; use libp2p_request_response as request_response; @@ -28,15 +30,11 @@ use libp2p_swarm::{StreamProtocol, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use rand::Rng; use serde::{Deserialize, Serialize}; -use std::{io, iter}; -use tracing_subscriber::EnvFilter; #[async_std::test] #[cfg(feature = "cbor")] async fn is_response_outbound() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let ping = Ping("ping".to_string().into_bytes()); let offline_peer = PeerId::random(); @@ -64,6 +62,7 @@ async fn is_response_outbound() { peer, request_id: req_id, error: _error, + .. } => { assert_eq!(&offline_peer, &peer); assert_eq!(req_id, request_id1); @@ -115,6 +114,7 @@ async fn ping_protocol() { request_response::Message::Request { request, channel, .. }, + .. }) => { assert_eq!(&request, &expected_ping); assert_eq!(&peer, &peer2_id); @@ -156,6 +156,7 @@ async fn ping_protocol() { request_id, response, }, + .. } => { count += 1; assert_eq!(&response, &expected_pong); @@ -204,7 +205,8 @@ async fn emits_inbound_connection_closed_failure() { event = swarm1.select_next_some() => match event { SwarmEvent::Behaviour(request_response::Event::Message { peer, - message: request_response::Message::Request { request, channel, .. } + message: request_response::Message::Request { request, channel, .. }, + .. }) => { assert_eq!(&request, &ping); assert_eq!(&peer, &peer2_id); @@ -269,7 +271,8 @@ async fn emits_inbound_connection_closed_if_channel_is_dropped() { event = swarm1.select_next_some() => { if let SwarmEvent::Behaviour(request_response::Event::Message { peer, - message: request_response::Message::Request { request, channel, .. } + message: request_response::Message::Request { request, channel, .. }, + .. }) = event { assert_eq!(&request, &ping); assert_eq!(&peer, &peer2_id); diff --git a/protocols/stream/Cargo.toml b/protocols/stream/Cargo.toml index d9c9276cb12..adb7a797794 100644 --- a/protocols/stream/Cargo.toml +++ b/protocols/stream/Cargo.toml @@ -20,7 +20,7 @@ rand = "0.8" [dev-dependencies] libp2p-swarm-test = { workspace = true } tokio = { workspace = true, features = ["full"] } -tracing-subscriber = { workspace = true, features = ["env-filter"] } +libp2p-test-utils = { workspace = true } [lints] workspace = true diff --git a/protocols/stream/src/control.rs b/protocols/stream/src/control.rs index 036d285b2a3..2149c6bca48 100644 --- a/protocols/stream/src/control.rs +++ b/protocols/stream/src/control.rs @@ -6,9 +6,6 @@ use std::{ task::{Context, Poll}, }; -use crate::AlreadyRegistered; -use crate::{handler::NewStream, shared::Shared}; - use futures::{ channel::{mpsc, oneshot}, SinkExt as _, StreamExt as _, @@ -16,6 +13,8 @@ use futures::{ use libp2p_identity::PeerId; use libp2p_swarm::{Stream, StreamProtocol}; +use crate::{handler::NewStream, shared::Shared, AlreadyRegistered}; + /// A (remote) control for opening new streams and registration of inbound protocols. /// /// A [`Control`] can be cloned and thus allows for concurrent access. @@ -31,13 +30,15 @@ impl Control { /// Attempt to open a new stream for the given protocol and peer. /// - /// In case we are currently not connected to the peer, we will attempt to make a new connection. + /// In case we are currently not connected to the peer, + /// we will attempt to make a new connection. /// /// ## Backpressure /// /// [`Control`]s support backpressure similarly to bounded channels: /// Each [`Control`] has a guaranteed slot for internal messages. - /// A single control will always open one stream at a time which is enforced by requiring `&mut self`. + /// A single control will always open one stream at a + /// time which is enforced by requiring `&mut self`. /// /// This backpressure mechanism breaks if you clone [`Control`]s excessively. pub async fn open_stream( diff --git a/protocols/stream/src/handler.rs b/protocols/stream/src/handler.rs index b7ec516d3b1..5fc903f5980 100644 --- a/protocols/stream/src/handler.rs +++ b/protocols/stream/src/handler.rs @@ -52,9 +52,7 @@ impl ConnectionHandler for Handler { type InboundOpenInfo = (); type OutboundOpenInfo = (); - fn listen_protocol( - &self, - ) -> swarm::SubstreamProtocol { + fn listen_protocol(&self) -> swarm::SubstreamProtocol { swarm::SubstreamProtocol::new( Upgrade { supported_protocols: Shared::lock(&self.shared).supported_inbound_protocols(), @@ -66,13 +64,7 @@ impl ConnectionHandler for Handler { fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll< - swarm::ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - >, - > { + ) -> Poll> { if self.pending_upgrade.is_some() { return Poll::Pending; } @@ -104,12 +96,7 @@ impl ConnectionHandler for Handler { fn on_connection_event( &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + event: ConnectionEvent, ) { match event { ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { @@ -162,7 +149,8 @@ impl ConnectionHandler for Handler { } } -/// Message from a [`Control`](crate::Control) to a [`ConnectionHandler`] to negotiate a new outbound stream. +/// Message from a [`Control`](crate::Control) to +/// a [`ConnectionHandler`] to negotiate a new outbound stream. #[derive(Debug)] pub(crate) struct NewStream { pub(crate) protocol: StreamProtocol, diff --git a/protocols/stream/src/shared.rs b/protocols/stream/src/shared.rs index 48aa6613d83..62d7b3cfe68 100644 --- a/protocols/stream/src/shared.rs +++ b/protocols/stream/src/shared.rs @@ -12,9 +12,11 @@ use rand::seq::IteratorRandom as _; use crate::{handler::NewStream, AlreadyRegistered, IncomingStreams}; pub(crate) struct Shared { - /// Tracks the supported inbound protocols created via [`Control::accept`](crate::Control::accept). + /// Tracks the supported inbound protocols created via + /// [`Control::accept`](crate::Control::accept). /// - /// For each [`StreamProtocol`], we hold the [`mpsc::Sender`] corresponding to the [`mpsc::Receiver`] in [`IncomingStreams`]. + /// For each [`StreamProtocol`], we hold the [`mpsc::Sender`] corresponding to the + /// [`mpsc::Receiver`] in [`IncomingStreams`]. supported_inbound_protocols: HashMap>, connections: HashMap, @@ -25,7 +27,8 @@ pub(crate) struct Shared { /// Sender for peers we want to dial. /// - /// We manage this through a channel to avoid locks as part of [`NetworkBehaviour::poll`](libp2p_swarm::NetworkBehaviour::poll). + /// We manage this through a channel to avoid locks as part of + /// [`NetworkBehaviour::poll`](libp2p_swarm::NetworkBehaviour::poll). dial_sender: mpsc::Sender, } diff --git a/protocols/stream/tests/lib.rs b/protocols/stream/tests/lib.rs index cd6caaced5e..425b49adfaf 100644 --- a/protocols/stream/tests/lib.rs +++ b/protocols/stream/tests/lib.rs @@ -5,23 +5,20 @@ use libp2p_identity::PeerId; use libp2p_stream as stream; use libp2p_swarm::{StreamProtocol, Swarm}; use libp2p_swarm_test::SwarmExt as _; +use libp2p_test_utils::EnvFilter; use stream::OpenStreamError; use tracing::level_filters::LevelFilter; -use tracing_subscriber::EnvFilter; const PROTOCOL: StreamProtocol = StreamProtocol::new("/test"); #[tokio::test] async fn dropping_incoming_streams_deregisters() { - let _ = tracing_subscriber::fmt() - .with_env_filter( - EnvFilter::builder() - .with_default_directive(LevelFilter::DEBUG.into()) - .from_env() - .unwrap(), - ) - .with_test_writer() - .try_init(); + libp2p_test_utils::with_env_filter( + EnvFilter::builder() + .with_default_directive(LevelFilter::DEBUG.into()) + .from_env() + .unwrap(), + ); let mut swarm1 = Swarm::new_ephemeral(|_| stream::Behaviour::new()); let mut swarm2 = Swarm::new_ephemeral(|_| stream::Behaviour::new()); diff --git a/protocols/upnp/src/behaviour.rs b/protocols/upnp/src/behaviour.rs index ee985042b68..cea8efb1e3f 100644 --- a/protocols/upnp/src/behaviour.rs +++ b/protocols/upnp/src/behaviour.rs @@ -32,7 +32,6 @@ use std::{ time::Duration, }; -use crate::tokio::{is_addr_global, Gateway}; use futures::{channel::oneshot, Future, StreamExt}; use futures_timer::Delay; use igd_next::PortMappingProtocol; @@ -46,6 +45,8 @@ use libp2p_swarm::{ NetworkBehaviour, NewListenAddr, ToSwarm, }; +use crate::tokio::{is_addr_global, Gateway}; + /// The duration in seconds of a port mapping on the gateway. const MAPPING_DURATION: u32 = 3600; @@ -286,8 +287,9 @@ impl NetworkBehaviour for Behaviour { match &mut self.state { GatewayState::Searching(_) => { - // As the gateway is not yet available we add the mapping with `MappingState::Inactive` - // so that when and if it becomes available we map it. + // As the gateway is not yet available we add the mapping with + // `MappingState::Inactive` so that when and if it + // becomes available we map it. self.mappings.insert( Mapping { listener_id, diff --git a/protocols/upnp/src/lib.rs b/protocols/upnp/src/lib.rs index 8a74d7e8f63..d7a746f78df 100644 --- a/protocols/upnp/src/lib.rs +++ b/protocols/upnp/src/lib.rs @@ -24,7 +24,6 @@ //! implements the [`libp2p_swarm::NetworkBehaviour`] trait. //! This struct will automatically try to map the ports externally to internal //! addresses on the gateway. -//! #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] diff --git a/protocols/upnp/src/tokio.rs b/protocols/upnp/src/tokio.rs index b2cad6fa5a7..67ef52f9608 100644 --- a/protocols/upnp/src/tokio.rs +++ b/protocols/upnp/src/tokio.rs @@ -20,7 +20,6 @@ use std::{error::Error, net::IpAddr}; -use crate::behaviour::{GatewayEvent, GatewayRequest}; use futures::{ channel::{mpsc, oneshot}, SinkExt, StreamExt, @@ -28,8 +27,9 @@ use futures::{ use igd_next::SearchOptions; pub use crate::behaviour::Behaviour; +use crate::behaviour::{GatewayEvent, GatewayRequest}; -//TODO: remove when `IpAddr::is_global` stabilizes. +// TODO: remove when `IpAddr::is_global` stabilizes. pub(crate) fn is_addr_global(addr: IpAddr) -> bool { match addr { IpAddr::V4(ip) => { diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 00000000000..fe1850ee986 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,10 @@ +# Imports +reorder_imports = true +imports_granularity = "Crate" +group_imports = "StdExternalCrate" + +# Docs +wrap_comments = true +comment_width = 100 +normalize_comments = true +format_code_in_doc_comments = true diff --git a/swarm-derive/Cargo.toml b/swarm-derive/Cargo.toml index 91c643a459d..febd2a6455a 100644 --- a/swarm-derive/Cargo.toml +++ b/swarm-derive/Cargo.toml @@ -17,7 +17,6 @@ proc-macro = true heck = "0.5" quote = "1.0" syn = { version = "2.0.66", default-features = false, features = ["clone-impls", "derive", "parsing", "printing", "proc-macro"] } -proc-macro2 = "1.0" # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/swarm-derive/src/lib.rs b/swarm-derive/src/lib.rs index 258c0b976c8..41b909f329f 100644 --- a/swarm-derive/src/lib.rs +++ b/swarm-derive/src/lib.rs @@ -23,12 +23,12 @@ mod syn_ext; -use crate::syn_ext::RequireStrLit; use heck::ToUpperCamelCase; use proc_macro::TokenStream; use quote::quote; -use syn::punctuated::Punctuated; -use syn::{parse_macro_input, Data, DataStruct, DeriveInput, Meta, Token}; +use syn::{parse_macro_input, punctuated::Punctuated, Data, DataStruct, DeriveInput, Meta, Token}; + +use crate::syn_ext::RequireStrLit; /// Generates a delegating `NetworkBehaviour` implementation for the struct this is used for. See /// the trait documentation for better description. diff --git a/swarm-test/CHANGELOG.md b/swarm-test/CHANGELOG.md index 5700460b3a6..1fd213e12f6 100644 --- a/swarm-test/CHANGELOG.md +++ b/swarm-test/CHANGELOG.md @@ -2,6 +2,8 @@ - Add `tokio` runtime support and make `tokio` and `async-std` runtimes optional behind features. See [PR 5551]. + - Update default for idle-connection-timeout to 10s on `SwarmExt::new_ephemeral` methods. + See [PR 4967](https://github.com/libp2p/rust-libp2p/pull/4967). [PR 5551]: https://github.com/libp2p/rust-libp2p/pull/5551 diff --git a/swarm-test/Cargo.toml b/swarm-test/Cargo.toml index 7ac7c900deb..4a0d5ee8c71 100644 --- a/swarm-test/Cargo.toml +++ b/swarm-test/Cargo.toml @@ -20,7 +20,6 @@ libp2p-swarm = { workspace = true } libp2p-tcp = { workspace = true } libp2p-yamux = { workspace = true } futures = { workspace = true } -rand = "0.8.5" tracing = { workspace = true } futures-timer = "3.0.3" diff --git a/swarm-test/src/lib.rs b/swarm-test/src/lib.rs index bcab6e5b700..0bc417dd8b1 100644 --- a/swarm-test/src/lib.rs +++ b/swarm-test/src/lib.rs @@ -18,27 +18,32 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{fmt::Debug, future::IntoFuture, time::Duration}; + use async_trait::async_trait; -use futures::future::{BoxFuture, Either}; -use futures::{FutureExt, StreamExt}; +use futures::{ + future::{BoxFuture, Either}, + FutureExt, StreamExt, +}; use libp2p_core::{multiaddr::Protocol, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::dial_opts::PeerCondition; -use libp2p_swarm::{dial_opts::DialOpts, NetworkBehaviour, Swarm, SwarmEvent}; -use std::fmt::Debug; -use std::future::IntoFuture; -use std::time::Duration; +use libp2p_swarm::{ + dial_opts::{DialOpts, PeerCondition}, + NetworkBehaviour, Swarm, SwarmEvent, +}; -/// An extension trait for [`Swarm`] that makes it easier to set up a network of [`Swarm`]s for tests. +/// An extension trait for [`Swarm`] that makes it +/// easier to set up a network of [`Swarm`]s for tests. #[async_trait] pub trait SwarmExt { type NB: NetworkBehaviour; /// Create a new [`Swarm`] with an ephemeral identity and the `async-std` runtime. /// - /// The swarm will use a [`libp2p_core::transport::MemoryTransport`] together with a [`libp2p_plaintext::Config`] authentication layer and - /// [`libp2p_yamux::Config`] as the multiplexer. However, these details should not be relied upon by the test - /// and may change at any time. + /// The swarm will use a [`libp2p_core::transport::MemoryTransport`] together with a + /// [`libp2p_plaintext::Config`] authentication layer and [`libp2p_yamux::Config`] as the + /// multiplexer. However, these details should not be relied + /// upon by the test and may change at any time. #[cfg(feature = "async-std")] fn new_ephemeral(behaviour_fn: impl FnOnce(libp2p_identity::Keypair) -> Self::NB) -> Self where @@ -46,19 +51,22 @@ pub trait SwarmExt { /// Create a new [`Swarm`] with an ephemeral identity and the `tokio` runtime. /// - /// The swarm will use a [`libp2p_core::transport::MemoryTransport`] together with a [`libp2p_plaintext::Config`] authentication layer and - /// [`libp2p_yamux::Config`] as the multiplexer. However, these details should not be relied upon by the test - /// and may change at any time. + /// The swarm will use a [`libp2p_core::transport::MemoryTransport`] together with a + /// [`libp2p_plaintext::Config`] authentication layer and [`libp2p_yamux::Config`] as the + /// multiplexer. However, these details should not be relied + /// upon by the test and may change at any time. #[cfg(feature = "tokio")] fn new_ephemeral_tokio(behaviour_fn: impl FnOnce(libp2p_identity::Keypair) -> Self::NB) -> Self where Self: Sized; - /// Establishes a connection to the given [`Swarm`], polling both of them until the connection is established. + /// Establishes a connection to the given [`Swarm`], polling both of them until the connection + /// is established. /// /// This will take addresses from the `other` [`Swarm`] via [`Swarm::external_addresses`]. /// By default, this iterator will not yield any addresses. - /// To add listen addresses as external addresses, use [`ListenFuture::with_memory_addr_external`] or [`ListenFuture::with_tcp_addr_external`]. + /// To add listen addresses as external addresses, use + /// [`ListenFuture::with_memory_addr_external`] or [`ListenFuture::with_tcp_addr_external`]. async fn connect(&mut self, other: &mut Swarm) where T: NetworkBehaviour + Send, @@ -66,10 +74,12 @@ pub trait SwarmExt { /// Dial the provided address and wait until a connection has been established. /// - /// In a normal test scenario, you should prefer [`SwarmExt::connect`] but that is not always possible. - /// This function only abstracts away the "dial and wait for `ConnectionEstablished` event" part. + /// In a normal test scenario, you should prefer [`SwarmExt::connect`] but that is not always + /// possible. This function only abstracts away the "dial and wait for + /// `ConnectionEstablished` event" part. /// - /// Because we don't have access to the other [`Swarm`], we can't guarantee that it makes progress. + /// Because we don't have access to the other [`Swarm`], + /// we can't guarantee that it makes progress. async fn dial_and_wait(&mut self, addr: Multiaddr) -> PeerId; /// Wait for specified condition to return `Some`. @@ -78,7 +88,8 @@ pub trait SwarmExt { P: Fn(SwarmEvent<::ToSwarm>) -> Option, P: Send; - /// Listens for incoming connections, polling the [`Swarm`] until the transport is ready to accept connections. + /// Listens for incoming connections, polling the [`Swarm`] until the + /// transport is ready to accept connections. /// /// The first address is for the memory transport, the second one for the TCP transport. fn listen(&mut self) -> ListenFuture<&mut Self>; @@ -102,17 +113,19 @@ pub trait SwarmExt { /// /// ## Number of events /// -/// The number of events is configured via const generics based on the array size of the return type. -/// This allows the compiler to infer how many events you are expecting based on how you use this function. -/// For example, if you expect the first [`Swarm`] to emit 2 events, you should assign the first variable of the returned tuple value to an array of size 2. -/// This works especially well if you directly pattern-match on the return value. +/// The number of events is configured via const generics based on the array size of the return +/// type. This allows the compiler to infer how many events you are expecting based on how you use +/// this function. For example, if you expect the first [`Swarm`] to emit 2 events, you should +/// assign the first variable of the returned tuple value to an array of size 2. This works +/// especially well if you directly pattern-match on the return value. /// /// ## Type of event /// /// This function utilizes the [`TryIntoOutput`] trait. /// Similar as to the number of expected events, the type of event is inferred based on your usage. /// If you match against a [`SwarmEvent`], the first [`SwarmEvent`] will be returned. -/// If you match against your [`NetworkBehaviour::ToSwarm`] type, [`SwarmEvent`]s which are not [`SwarmEvent::Behaviour`] will be skipped until the [`Swarm`] returns a behaviour event. +/// If you match against your [`NetworkBehaviour::ToSwarm`] type, [`SwarmEvent`]s which are not +/// [`SwarmEvent::Behaviour`] will be skipped until the [`Swarm`] returns a behaviour event. /// /// You can implement the [`TryIntoOutput`] for any other type to further customize this behaviour. /// @@ -120,13 +133,16 @@ pub trait SwarmExt { /// /// This function is similar to joining two futures with two crucial differences: /// 1. As described above, it allows you to obtain more than a single event. -/// 2. More importantly, it will continue to poll the [`Swarm`]s **even if they already has emitted all expected events**. +/// 2. More importantly, it will continue to poll the [`Swarm`]s **even if they already has emitted +/// all expected events**. /// /// Especially (2) is crucial for our usage of this function. /// If a [`Swarm`] is not polled, nothing within it makes progress. -/// This can "starve" the other swarm which for example may wait for another message to be sent on a connection. +/// This can "starve" the other swarm which for example may wait for another message to be sent on a +/// connection. /// -/// Using [`drive`] instead of [`futures::future::join`] ensures that a [`Swarm`] continues to be polled, even after it emitted its events. +/// Using [`drive`] instead of [`futures::future::join`] ensures that a [`Swarm`] continues to be +/// polled, even after it emitted its events. pub async fn drive< TBehaviour1, const NUM_EVENTS_SWARM_1: usize, @@ -230,8 +246,7 @@ where transport, behaviour_fn(identity), peer_id, - libp2p_swarm::Config::with_async_std_executor() - .with_idle_connection_timeout(Duration::from_secs(5)), // Some tests need connections to be kept alive beyond what the individual behaviour configures., + libp2p_swarm::Config::with_async_std_executor(), ) } @@ -258,8 +273,7 @@ where transport, behaviour_fn(identity), peer_id, - libp2p_swarm::Config::with_tokio_executor() - .with_idle_connection_timeout(Duration::from_secs(5)), // Some tests need connections to be kept alive beyond what the individual behaviour configures., + libp2p_swarm::Config::with_tokio_executor(), ) } @@ -385,20 +399,24 @@ pub struct ListenFuture { } impl ListenFuture { - /// Adds the memory address we are starting to listen on as an external address using [`Swarm::add_external_address`]. + /// Adds the memory address we are starting to listen on as an external address using + /// [`Swarm::add_external_address`]. /// - /// This is typically "safe" for tests because within a process, memory addresses are "globally" reachable. - /// However, some tests depend on which addresses are external and need this to be configurable so it is not a good default. + /// This is typically "safe" for tests because within a process, memory addresses are "globally" + /// reachable. However, some tests depend on which addresses are external and need this to + /// be configurable so it is not a good default. pub fn with_memory_addr_external(mut self) -> Self { self.add_memory_external = true; self } - /// Adds the TCP address we are starting to listen on as an external address using [`Swarm::add_external_address`]. + /// Adds the TCP address we are starting to listen on as an external address using + /// [`Swarm::add_external_address`]. /// - /// This is typically "safe" for tests because on the same machine, 127.0.0.1 is reachable for other [`Swarm`]s. - /// However, some tests depend on which addresses are external and need this to be configurable so it is not a good default. + /// This is typically "safe" for tests because on the same machine, 127.0.0.1 is reachable for + /// other [`Swarm`]s. However, some tests depend on which addresses are external and need + /// this to be configurable so it is not a good default. pub fn with_tcp_addr_external(mut self) -> Self { self.add_tcp_external = true; diff --git a/swarm/CHANGELOG.md b/swarm/CHANGELOG.md index 0109a33747c..250d347430b 100644 --- a/swarm/CHANGELOG.md +++ b/swarm/CHANGELOG.md @@ -6,6 +6,19 @@ - Deprecate `void` crate. See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). +- Fix `cargo clippy` warnings in `rustc 1.84.0-beta.1`. + See [PR 5700](https://github.com/libp2p/rust-libp2p/pull/5700). + +- Update default for idle-connection-timeout to 10s. + See [PR 4967](https://github.com/libp2p/rust-libp2p/pull/4967). + +- Deprecate `ConnectionHandler::{InboundOpenInfo, OutboundOpenInfo}` associated type. + Previously, users could tag pending sub streams with custom data and retrieve the data + after the substream has been negotiated. + But substreams themselves are completely interchangeable, users should instead track + additional data inside `ConnectionHandler` after negotiation. + See [PR 5242](https://github.com/libp2p/rust-libp2p/pull/5242). + ## 0.45.1 - Update `libp2p-swarm-derive` to version `0.35.0`, see [PR 5545] diff --git a/swarm/Cargo.toml b/swarm/Cargo.toml index 4c3b8821ed6..b7e0fd73b5e 100644 --- a/swarm/Cargo.toml +++ b/swarm/Cargo.toml @@ -15,7 +15,7 @@ either = "1.11.0" fnv = "1.0" futures = { workspace = true } futures-timer = "3.0.3" -getrandom = { version = "0.2.15", features = ["js"], optional = true } # Explicit dependency to be used in `wasm-bindgen` feature +getrandom = { workspace = true, features = ["js"], optional = true } # Explicit dependency to be used in `wasm-bindgen` feature web-time = { workspace = true } libp2p-core = { workspace = true } libp2p-identity = { workspace = true } @@ -55,7 +55,7 @@ criterion = { version = "0.5", features = ["async_tokio"] } once_cell = "1.19.0" trybuild = "1.0.95" tokio = { workspace = true, features = ["time", "rt", "macros", "rt-multi-thread"] } -tracing-subscriber = { workspace = true, features = ["env-filter"] } +libp2p-test-utils = { workspace = true } [[test]] name = "swarm_derive" diff --git a/swarm/benches/connection_handler.rs b/swarm/benches/connection_handler.rs index 09340421f83..3ae75288208 100644 --- a/swarm/benches/connection_handler.rs +++ b/swarm/benches/connection_handler.rs @@ -1,3 +1,5 @@ +use std::{convert::Infallible, sync::atomic::AtomicUsize}; + use async_std::stream::StreamExt; use criterion::{criterion_group, criterion_main, Criterion}; use libp2p_core::{ @@ -5,7 +7,6 @@ use libp2p_core::{ }; use libp2p_identity::PeerId; use libp2p_swarm::{ConnectionHandler, NetworkBehaviour, StreamProtocol}; -use std::{convert::Infallible, sync::atomic::AtomicUsize}; use web_time::Duration; macro_rules! gen_behaviour { @@ -82,7 +83,7 @@ benchmarks! { SpinningBehaviour20::bench().name(m).poll_count(500).protocols_per_behaviour(100), ]; } -//fn main() {} +// fn main() {} trait BigBehaviour: Sized { fn behaviours(&mut self) -> &mut [SpinningBehaviour]; @@ -280,9 +281,7 @@ impl ConnectionHandler for SpinningHandler { type OutboundOpenInfo = (); - fn listen_protocol( - &self, - ) -> libp2p_swarm::SubstreamProtocol { + fn listen_protocol(&self) -> libp2p_swarm::SubstreamProtocol { libp2p_swarm::SubstreamProtocol::new(Upgrade(self.protocols), ()) } @@ -290,11 +289,7 @@ impl ConnectionHandler for SpinningHandler { &mut self, cx: &mut std::task::Context<'_>, ) -> std::task::Poll< - libp2p_swarm::ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - >, + libp2p_swarm::ConnectionHandlerEvent, > { if self.iter_count == usize::MAX { return std::task::Poll::Pending; @@ -321,8 +316,6 @@ impl ConnectionHandler for SpinningHandler { _event: libp2p_swarm::handler::ConnectionEvent< Self::InboundProtocol, Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, >, ) { } diff --git a/swarm/src/behaviour.rs b/swarm/src/behaviour.rs index 35aed12fba5..8c8c5998f67 100644 --- a/swarm/src/behaviour.rs +++ b/swarm/src/behaviour.rs @@ -24,23 +24,22 @@ mod listen_addresses; mod peer_addresses; pub mod toggle; -pub use external_addresses::ExternalAddresses; -pub use listen_addresses::ListenAddresses; -pub use peer_addresses::PeerAddresses; +use std::task::{Context, Poll}; -use crate::connection::ConnectionId; -use crate::dial_opts::DialOpts; -use crate::listen_opts::ListenOpts; -use crate::{ - ConnectionDenied, ConnectionError, ConnectionHandler, DialError, ListenError, THandler, - THandlerInEvent, THandlerOutEvent, -}; +pub use external_addresses::ExternalAddresses; use libp2p_core::{ transport::{ListenerId, PortUse}, ConnectedPoint, Endpoint, Multiaddr, }; use libp2p_identity::PeerId; -use std::{task::Context, task::Poll}; +pub use listen_addresses::ListenAddresses; +pub use peer_addresses::PeerAddresses; + +use crate::{ + connection::ConnectionId, dial_opts::DialOpts, listen_opts::ListenOpts, ConnectionDenied, + ConnectionError, ConnectionHandler, DialError, ListenError, THandler, THandlerInEvent, + THandlerOutEvent, +}; /// A [`NetworkBehaviour`] defines the behaviour of the local node on the network. /// @@ -101,25 +100,25 @@ use std::{task::Context, task::Poll}; /// #[behaviour(to_swarm = "Event")] /// # #[behaviour(prelude = "libp2p_swarm::derive_prelude")] /// struct MyBehaviour { -/// identify: identify::Behaviour, -/// ping: ping::Behaviour, +/// identify: identify::Behaviour, +/// ping: ping::Behaviour, /// } /// /// enum Event { -/// Identify(identify::Event), -/// Ping(ping::Event), +/// Identify(identify::Event), +/// Ping(ping::Event), /// } /// /// impl From for Event { -/// fn from(event: identify::Event) -> Self { -/// Self::Identify(event) -/// } +/// fn from(event: identify::Event) -> Self { +/// Self::Identify(event) +/// } /// } /// /// impl From for Event { -/// fn from(event: ping::Event) -> Self { -/// Self::Ping(event) -/// } +/// fn from(event: ping::Event) -> Self { +/// Self::Ping(event) +/// } /// } /// ``` pub trait NetworkBehaviour: 'static { @@ -131,8 +130,8 @@ pub trait NetworkBehaviour: 'static { /// Callback that is invoked for every new inbound connection. /// - /// At this point in the connection lifecycle, only the remote's and our local address are known. - /// We have also already allocated a [`ConnectionId`]. + /// At this point in the connection lifecycle, only the remote's and our local address are + /// known. We have also already allocated a [`ConnectionId`]. /// /// Any error returned from this function will immediately abort the dial attempt. fn handle_pending_inbound_connection( @@ -148,9 +147,10 @@ pub trait NetworkBehaviour: 'static { /// /// This is invoked once another peer has successfully dialed us. /// - /// At this point, we have verified their [`PeerId`] and we know, which particular [`Multiaddr`] succeeded in the dial. - /// In order to actually use this connection, this function must return a [`ConnectionHandler`]. - /// Returning an error will immediately close the connection. + /// At this point, we have verified their [`PeerId`] and we know, which particular [`Multiaddr`] + /// succeeded in the dial. In order to actually use this connection, this function must + /// return a [`ConnectionHandler`]. Returning an error will immediately close the + /// connection. /// /// Note when any composed behaviour returns an error the connection will be closed and a /// [`FromSwarm::ListenFailure`] event will be emitted. @@ -168,10 +168,14 @@ pub trait NetworkBehaviour: 'static { /// /// - The [`PeerId`], if known. Remember that we can dial without a [`PeerId`]. /// - All addresses passed to [`DialOpts`] are passed in here too. - /// - The effective [`Role`](Endpoint) of this peer in the dial attempt. Typically, this is set to [`Endpoint::Dialer`] except if we are attempting a hole-punch. - /// - The [`ConnectionId`] identifying the future connection resulting from this dial, if successful. + /// - The effective [`Role`](Endpoint) of this peer in the dial attempt. Typically, this is set + /// to [`Endpoint::Dialer`] except if we are attempting a hole-punch. + /// - The [`ConnectionId`] identifying the future connection resulting from this dial, if + /// successful. /// - /// Note that the addresses returned from this function are only used for dialing if [`WithPeerIdWithAddresses::extend_addresses_through_behaviour`](crate::dial_opts::WithPeerIdWithAddresses::extend_addresses_through_behaviour) is set. + /// Note that the addresses returned from this function are only used for dialing if + /// [`WithPeerIdWithAddresses::extend_addresses_through_behaviour`](crate::dial_opts::WithPeerIdWithAddresses::extend_addresses_through_behaviour) + /// is set. /// /// Any error returned from this function will immediately abort the dial attempt. fn handle_pending_outbound_connection( @@ -187,9 +191,10 @@ pub trait NetworkBehaviour: 'static { /// Callback that is invoked for every established outbound connection. /// /// This is invoked once we have successfully dialed a peer. - /// At this point, we have verified their [`PeerId`] and we know, which particular [`Multiaddr`] succeeded in the dial. - /// In order to actually use this connection, this function must return a [`ConnectionHandler`]. - /// Returning an error will immediately close the connection. + /// At this point, we have verified their [`PeerId`] and we know, which particular [`Multiaddr`] + /// succeeded in the dial. In order to actually use this connection, this function must + /// return a [`ConnectionHandler`]. Returning an error will immediately close the + /// connection. /// /// Note when any composed behaviour returns an error the connection will be closed and a /// [`FromSwarm::DialFailure`] event will be emitted. @@ -240,8 +245,9 @@ pub enum ToSwarm { /// On failure, [`NetworkBehaviour::on_swarm_event`] with `DialFailure` is invoked. /// /// [`DialOpts`] provides access to the [`ConnectionId`] via [`DialOpts::connection_id`]. - /// This [`ConnectionId`] will be used throughout the connection's lifecycle to associate events with it. - /// This allows a [`NetworkBehaviour`] to identify a connection that resulted out of its own dial request. + /// This [`ConnectionId`] will be used throughout the connection's lifecycle to associate + /// events with it. This allows a [`NetworkBehaviour`] to identify a connection that + /// resulted out of its own dial request. Dial { opts: DialOpts }, /// Instructs the [`Swarm`](crate::Swarm) to listen on the provided address. @@ -253,8 +259,8 @@ pub enum ToSwarm { /// Instructs the `Swarm` to send an event to the handler dedicated to a /// connection with a peer. /// - /// If the `Swarm` is connected to the peer, the message is delivered to the [`ConnectionHandler`] - /// instance identified by the peer ID and connection ID. + /// If the `Swarm` is connected to the peer, the message is delivered to the + /// [`ConnectionHandler`] instance identified by the peer ID and connection ID. /// /// If the specified connection no longer exists, the event is silently dropped. /// @@ -278,11 +284,12 @@ pub enum ToSwarm { /// /// The emphasis on a **new** candidate is important. /// Protocols MUST take care to only emit a candidate once per "source". - /// For example, the observed address of a TCP connection does not change throughout its lifetime. - /// Thus, only one candidate should be emitted per connection. + /// For example, the observed address of a TCP connection does not change throughout its + /// lifetime. Thus, only one candidate should be emitted per connection. /// - /// This makes the report frequency of an address a meaningful data-point for consumers of this event. - /// This address will be shared with all [`NetworkBehaviour`]s via [`FromSwarm::NewExternalAddrCandidate`]. + /// This makes the report frequency of an address a meaningful data-point for consumers of this + /// event. This address will be shared with all [`NetworkBehaviour`]s via + /// [`FromSwarm::NewExternalAddrCandidate`]. /// /// This address could come from a variety of sources: /// - A protocol such as identify obtained it from a remote. @@ -290,25 +297,32 @@ pub enum ToSwarm { /// - We made an educated guess based on one of our listen addresses. NewExternalAddrCandidate(Multiaddr), - /// Indicates to the [`Swarm`](crate::Swarm) that the provided address is confirmed to be externally reachable. + /// Indicates to the [`Swarm`](crate::Swarm) that the provided address is confirmed to be + /// externally reachable. /// - /// This is intended to be issued in response to a [`FromSwarm::NewExternalAddrCandidate`] if we are indeed externally reachable on this address. - /// This address will be shared with all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrConfirmed`]. + /// This is intended to be issued in response to a [`FromSwarm::NewExternalAddrCandidate`] if + /// we are indeed externally reachable on this address. This address will be shared with + /// all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrConfirmed`]. ExternalAddrConfirmed(Multiaddr), - /// Indicates to the [`Swarm`](crate::Swarm) that we are no longer externally reachable under the provided address. + /// Indicates to the [`Swarm`](crate::Swarm) that we are no longer externally reachable under + /// the provided address. /// /// This expires an address that was earlier confirmed via [`ToSwarm::ExternalAddrConfirmed`]. - /// This address will be shared with all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrExpired`]. + /// This address will be shared with all [`NetworkBehaviour`]s via + /// [`FromSwarm::ExternalAddrExpired`]. ExternalAddrExpired(Multiaddr), - /// Instructs the `Swarm` to initiate a graceful close of one or all connections with the given peer. + /// Instructs the `Swarm` to initiate a graceful close of one or all connections with the given + /// peer. /// - /// Closing a connection via [`ToSwarm::CloseConnection`] will poll [`ConnectionHandler::poll_close`] to completion. - /// In most cases, stopping to "use" a connection is enough to have it closed. - /// The keep-alive algorithm will close a connection automatically once all [`ConnectionHandler`]s are idle. + /// Closing a connection via [`ToSwarm::CloseConnection`] will poll + /// [`ConnectionHandler::poll_close`] to completion. In most cases, stopping to "use" a + /// connection is enough to have it closed. The keep-alive algorithm will close a + /// connection automatically once all [`ConnectionHandler`]s are idle. /// - /// Use this command if you want to close a connection _despite_ it still being in use by one or more handlers. + /// Use this command if you want to close a connection _despite_ it still being in use by one + /// or more handlers. CloseConnection { /// The peer to disconnect. peer_id: PeerId, @@ -316,7 +330,8 @@ pub enum ToSwarm { connection: CloseConnection, }, - /// Reports external address of a remote peer to the [`Swarm`](crate::Swarm) and through that to other [`NetworkBehaviour`]s. + /// Reports external address of a remote peer to the [`Swarm`](crate::Swarm) and through that + /// to other [`NetworkBehaviour`]s. NewExternalAddrOfPeer { peer_id: PeerId, address: Multiaddr }, } @@ -440,8 +455,8 @@ pub enum FromSwarm<'a> { /// Informs the behaviour that an error /// happened on an incoming connection during its initial handshake. /// - /// This can include, for example, an error during the handshake of the encryption layer, or the - /// connection unexpectedly closed. + /// This can include, for example, an error during the handshake of the encryption layer, or + /// the connection unexpectedly closed. ListenFailure(ListenFailure<'a>), /// Informs the behaviour that a new listener was created. NewListener(NewListener), @@ -455,11 +470,13 @@ pub enum FromSwarm<'a> { ListenerError(ListenerError<'a>), /// Informs the behaviour that a listener closed. ListenerClosed(ListenerClosed<'a>), - /// Informs the behaviour that we have discovered a new candidate for an external address for us. + /// Informs the behaviour that we have discovered a new candidate for an external address for + /// us. NewExternalAddrCandidate(NewExternalAddrCandidate<'a>), /// Informs the behaviour that an external address of the local node was confirmed. ExternalAddrConfirmed(ExternalAddrConfirmed<'a>), - /// Informs the behaviour that an external address of the local node expired, i.e. is no-longer confirmed. + /// Informs the behaviour that an external address of the local node expired, i.e. is no-longer + /// confirmed. ExternalAddrExpired(ExternalAddrExpired<'a>), /// Informs the behaviour that we have discovered a new external address for a remote peer. NewExternalAddrOfPeer(NewExternalAddrOfPeer<'a>), @@ -559,7 +576,8 @@ pub struct ListenerClosed<'a> { pub reason: Result<(), &'a std::io::Error>, } -/// [`FromSwarm`] variant that informs the behaviour about a new candidate for an external address for us. +/// [`FromSwarm`] variant that informs the behaviour about a new candidate for an external address +/// for us. #[derive(Debug, Clone, Copy)] pub struct NewExternalAddrCandidate<'a> { pub addr: &'a Multiaddr, @@ -577,7 +595,8 @@ pub struct ExternalAddrExpired<'a> { pub addr: &'a Multiaddr, } -/// [`FromSwarm`] variant that informs the behaviour that a new external address for a remote peer was detected. +/// [`FromSwarm`] variant that informs the behaviour that a new external address for a remote peer +/// was detected. #[derive(Clone, Copy, Debug)] pub struct NewExternalAddrOfPeer<'a> { pub peer_id: PeerId, diff --git a/swarm/src/behaviour/either.rs b/swarm/src/behaviour/either.rs index 7a51303e74d..b9a86e1b9d8 100644 --- a/swarm/src/behaviour/either.rs +++ b/swarm/src/behaviour/either.rs @@ -18,14 +18,17 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::behaviour::{self, NetworkBehaviour, ToSwarm}; -use crate::connection::ConnectionId; -use crate::{ConnectionDenied, THandler, THandlerInEvent, THandlerOutEvent}; +use std::task::{Context, Poll}; + use either::Either; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use std::{task::Context, task::Poll}; + +use crate::{ + behaviour::{self, NetworkBehaviour, ToSwarm}, + connection::ConnectionId, + ConnectionDenied, THandler, THandlerInEvent, THandlerOutEvent, +}; /// Implementation of [`NetworkBehaviour`] that can be either of two implementations. impl NetworkBehaviour for Either diff --git a/swarm/src/behaviour/external_addresses.rs b/swarm/src/behaviour/external_addresses.rs index 579f46fe486..ba2dd3eb890 100644 --- a/swarm/src/behaviour/external_addresses.rs +++ b/swarm/src/behaviour/external_addresses.rs @@ -1,6 +1,7 @@ -use crate::behaviour::{ExternalAddrConfirmed, ExternalAddrExpired, FromSwarm}; use libp2p_core::Multiaddr; +use crate::behaviour::{ExternalAddrConfirmed, ExternalAddrExpired, FromSwarm}; + /// The maximum number of local external addresses. When reached any /// further externally reported addresses are ignored. The behaviour always /// tracks all its listen addresses. @@ -78,17 +79,20 @@ impl ExternalAddresses { } fn push_front(&mut self, addr: &Multiaddr) { - self.addresses.insert(0, addr.clone()); // We have at most `MAX_LOCAL_EXTERNAL_ADDRS` so this isn't very expensive. + // We have at most `MAX_LOCAL_EXTERNAL_ADDRS` so + // this isn't very expensive. + self.addresses.insert(0, addr.clone()); } } #[cfg(test)] mod tests { - use super::*; use libp2p_core::multiaddr::Protocol; use once_cell::sync::Lazy; use rand::Rng; + use super::*; + #[test] fn new_external_addr_returns_correct_changed_value() { let mut addresses = ExternalAddresses::default(); diff --git a/swarm/src/behaviour/listen_addresses.rs b/swarm/src/behaviour/listen_addresses.rs index 6076f5e7923..0c685d798c7 100644 --- a/swarm/src/behaviour/listen_addresses.rs +++ b/swarm/src/behaviour/listen_addresses.rs @@ -1,7 +1,9 @@ -use crate::behaviour::{ExpiredListenAddr, FromSwarm, NewListenAddr}; -use libp2p_core::Multiaddr; use std::collections::HashSet; +use libp2p_core::Multiaddr; + +use crate::behaviour::{ExpiredListenAddr, FromSwarm, NewListenAddr}; + /// Utility struct for tracking the addresses a [`Swarm`](crate::Swarm) is listening on. #[derive(Debug, Default, Clone)] pub struct ListenAddresses { @@ -32,10 +34,11 @@ impl ListenAddresses { #[cfg(test)] mod tests { - use super::*; use libp2p_core::{multiaddr::Protocol, transport::ListenerId}; use once_cell::sync::Lazy; + use super::*; + #[test] fn new_listen_addr_returns_correct_changed_value() { let mut addresses = ListenAddresses::default(); diff --git a/swarm/src/behaviour/peer_addresses.rs b/swarm/src/behaviour/peer_addresses.rs index 1eeead56ca1..5aeae7741d5 100644 --- a/swarm/src/behaviour/peer_addresses.rs +++ b/swarm/src/behaviour/peer_addresses.rs @@ -1,12 +1,10 @@ -use crate::behaviour::FromSwarm; -use crate::{DialError, DialFailure, NewExternalAddrOfPeer}; +use std::num::NonZeroUsize; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; - use lru::LruCache; -use std::num::NonZeroUsize; +use crate::{behaviour::FromSwarm, DialError, DialFailure, NewExternalAddrOfPeer}; /// Struct for tracking peers' external addresses of the [`Swarm`](crate::Swarm). #[derive(Debug)] @@ -46,7 +44,6 @@ impl PeerAddresses { /// Appends address to the existing set if peer addresses already exist. /// Creates a new cache entry for peer_id if no addresses are present. /// Returns true if the newly added address was not previously in the cache. - /// pub fn add(&mut self, peer: PeerId, address: Multiaddr) -> bool { match prepare_addr(&peer, &address) { Ok(address) => { @@ -98,17 +95,17 @@ impl Default for PeerAddresses { #[cfg(test)] mod tests { - use super::*; use std::io; - use crate::ConnectionId; use libp2p_core::{ multiaddr::Protocol, transport::{memory::MemoryTransportError, TransportError}, }; - use once_cell::sync::Lazy; + use super::*; + use crate::ConnectionId; + #[test] fn new_peer_addr_returns_correct_changed_value() { let mut cache = PeerAddresses::default(); diff --git a/swarm/src/behaviour/toggle.rs b/swarm/src/behaviour/toggle.rs index 3dde364bf19..a706187a40c 100644 --- a/swarm/src/behaviour/toggle.rs +++ b/swarm/src/behaviour/toggle.rs @@ -18,22 +18,24 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::behaviour::FromSwarm; -use crate::connection::ConnectionId; -use crate::handler::{ - AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, SubstreamProtocol, -}; -use crate::upgrade::SendWrapper; -use crate::{ - ConnectionDenied, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, -}; +use std::task::{Context, Poll}; + use either::Either; use futures::future; -use libp2p_core::transport::PortUse; -use libp2p_core::{upgrade::DeniedUpgrade, Endpoint, Multiaddr}; +use libp2p_core::{transport::PortUse, upgrade::DeniedUpgrade, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use std::{task::Context, task::Poll}; + +use crate::{ + behaviour::FromSwarm, + connection::ConnectionId, + handler::{ + AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, + DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, + SubstreamProtocol, + }, + upgrade::SendWrapper, + ConnectionDenied, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, +}; /// Implementation of `NetworkBehaviour` that can be either in the disabled or enabled state. /// @@ -198,6 +200,7 @@ impl ToggleConnectionHandler where TInner: ConnectionHandler, { + #[expect(deprecated)] // TODO: Remove when {In, Out}boundOpenInfo is fully removed. fn on_fully_negotiated_inbound( &mut self, FullyNegotiatedInbound { @@ -229,7 +232,7 @@ where panic!("Unexpected Either::Right in enabled `on_fully_negotiated_inbound`.") } } - + #[expect(deprecated)] // TODO: Remove when {In, Out}boundOpenInfo is fully removed. fn on_listen_upgrade_error( &mut self, ListenUpgradeError { info, error: err }: ListenUpgradeError< @@ -265,6 +268,7 @@ where } } +#[expect(deprecated)] // TODO: Remove when {In, Out}boundOpenInfo is fully removed. impl ConnectionHandler for ToggleConnectionHandler where TInner: ConnectionHandler, diff --git a/swarm/src/connection.rs b/swarm/src/connection.rs index 78c007fd71d..8e913aa80e5 100644 --- a/swarm/src/connection.rs +++ b/swarm/src/connection.rs @@ -23,42 +23,47 @@ mod error; pub(crate) mod pool; mod supported_protocols; +use std::{ + collections::{HashMap, HashSet}, + fmt, + fmt::{Display, Formatter}, + future::Future, + io, mem, + pin::Pin, + sync::atomic::{AtomicUsize, Ordering}, + task::{Context, Poll, Waker}, + time::Duration, +}; + pub use error::ConnectionError; pub(crate) use error::{ PendingConnectionError, PendingInboundConnectionError, PendingOutboundConnectionError, }; -use libp2p_core::transport::PortUse; +use futures::{future::BoxFuture, stream, stream::FuturesUnordered, FutureExt, StreamExt}; +use futures_timer::Delay; +use libp2p_core::{ + connection::ConnectedPoint, + multiaddr::Multiaddr, + muxing::{StreamMuxerBox, StreamMuxerEvent, StreamMuxerExt, SubstreamBox}, + transport::PortUse, + upgrade, + upgrade::{NegotiationError, ProtocolError}, + Endpoint, +}; +use libp2p_identity::PeerId; pub use supported_protocols::SupportedProtocols; +use web_time::Instant; -use crate::handler::{ - AddressChange, ConnectionEvent, ConnectionHandler, DialUpgradeError, FullyNegotiatedInbound, - FullyNegotiatedOutbound, ListenUpgradeError, ProtocolSupport, ProtocolsChange, UpgradeInfoSend, -}; -use crate::stream::ActiveStreamCounter; -use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend}; use crate::{ + handler::{ + AddressChange, ConnectionEvent, ConnectionHandler, DialUpgradeError, + FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, ProtocolSupport, + ProtocolsChange, UpgradeInfoSend, + }, + stream::ActiveStreamCounter, + upgrade::{InboundUpgradeSend, OutboundUpgradeSend}, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError, SubstreamProtocol, }; -use futures::future::BoxFuture; -use futures::stream::FuturesUnordered; -use futures::StreamExt; -use futures::{stream, FutureExt}; -use futures_timer::Delay; -use libp2p_core::connection::ConnectedPoint; -use libp2p_core::multiaddr::Multiaddr; -use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerEvent, StreamMuxerExt, SubstreamBox}; -use libp2p_core::upgrade; -use libp2p_core::upgrade::{NegotiationError, ProtocolError}; -use libp2p_core::Endpoint; -use libp2p_identity::PeerId; -use std::collections::{HashMap, HashSet}; -use std::fmt::{Display, Formatter}; -use std::future::Future; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::task::Waker; -use std::time::Duration; -use std::{fmt, io, mem, pin::Pin, task::Context, task::Poll}; -use web_time::Instant; static NEXT_CONNECTION_ID: AtomicUsize = AtomicUsize::new(1); @@ -72,7 +77,8 @@ impl ConnectionId { /// [`Swarm`](crate::Swarm) enforces that [`ConnectionId`]s are unique and not reused. /// This constructor does not, hence the _unchecked_. /// - /// It is primarily meant for allowing manual tests of [`NetworkBehaviour`](crate::NetworkBehaviour)s. + /// It is primarily meant for allowing manual tests of + /// [`NetworkBehaviour`](crate::NetworkBehaviour)s. pub fn new_unchecked(id: usize) -> Self { Self(id) } @@ -117,6 +123,7 @@ where /// The underlying handler. handler: THandler, /// Futures that upgrade incoming substreams. + #[expect(deprecated)] // TODO: Remove when {In, Out}boundOpenInfo is fully removed. negotiating_in: FuturesUnordered< StreamUpgrade< THandler::InboundOpenInfo, @@ -125,6 +132,7 @@ where >, >, /// Futures that upgrade outgoing substreams. + #[expect(deprecated)] // TODO: Remove when {In, Out}boundOpenInfo is fully removed. negotiating_out: FuturesUnordered< StreamUpgrade< THandler::OutboundOpenInfo, @@ -147,8 +155,9 @@ where max_negotiating_inbound_streams: usize, /// Contains all upgrades that are waiting for a new outbound substream. /// - /// The upgrade timeout is already ticking here so this may fail in case the remote is not quick - /// enough in providing us with a new stream. + /// The upgrade timeout is already ticking here so this may fail in case the remote is not + /// quick enough in providing us with a new stream. + #[expect(deprecated)] // TODO: Remove when {In, Out}boundOpenInfo is fully removed. requested_substreams: FuturesUnordered< SubstreamRequested, >, @@ -162,6 +171,7 @@ where stream_counter: ActiveStreamCounter, } +#[expect(deprecated)] // TODO: Remove when {In, Out}boundOpenInfo is fully removed. impl fmt::Debug for Connection where THandler: ConnectionHandler + fmt::Debug, @@ -223,7 +233,8 @@ where self.handler.on_behaviour_event(event); } - /// Begins an orderly shutdown of the connection, returning a stream of final events and a `Future` that resolves when connection shutdown is complete. + /// Begins an orderly shutdown of the connection, returning a stream of final events and a + /// `Future` that resolves when connection shutdown is complete. pub(crate) fn close( self, ) -> ( @@ -320,7 +331,8 @@ where } } - // In case the [`ConnectionHandler`] can not make any more progress, poll the negotiating outbound streams. + // In case the [`ConnectionHandler`] can not make any more progress, poll the + // negotiating outbound streams. match negotiating_out.poll_next_unpin(cx) { Poll::Pending | Poll::Ready(None) => {} Poll::Ready(Some((info, Ok(protocol)))) => { @@ -368,7 +380,8 @@ where } // Check if the connection (and handler) should be shut down. - // As long as we're still negotiating substreams or have any active streams shutdown is always postponed. + // As long as we're still negotiating substreams or have + // any active streams shutdown is always postponed. if negotiating_in.is_empty() && negotiating_out.is_empty() && requested_substreams.is_empty() @@ -419,7 +432,9 @@ where stream_counter.clone(), )); - continue; // Go back to the top, handler can potentially make progress again. + // Go back to the top, + // handler can potentially make progress again. + continue; } } } @@ -436,7 +451,9 @@ where stream_counter.clone(), )); - continue; // Go back to the top, handler can potentially make progress again. + // Go back to the top, + // handler can potentially make progress again. + continue; } } } @@ -451,10 +468,12 @@ where for change in changes { handler.on_connection_event(ConnectionEvent::LocalProtocolsChange(change)); } - continue; // Go back to the top, handler can potentially make progress again. + // Go back to the top, handler can potentially make progress again. + continue; } - return Poll::Pending; // Nothing can make progress, return `Pending`. + // Nothing can make progress, return `Pending`. + return Poll::Pending; } } @@ -482,7 +501,8 @@ fn compute_new_shutdown( ) -> Option { match (current_shutdown, handler_keep_alive) { (_, false) if idle_timeout == Duration::ZERO => Some(Shutdown::Asap), - (Shutdown::Later(_), false) => None, // Do nothing, i.e. let the shutdown timer continue to tick. + // Do nothing, i.e. let the shutdown timer continue to tick. + (Shutdown::Later(_), false) => None, (_, false) => { let now = Instant::now(); let safe_keep_alive = checked_add_fraction(now, idle_timeout); @@ -493,10 +513,12 @@ fn compute_new_shutdown( } } -/// Repeatedly halves and adds the [`Duration`] to the [`Instant`] until [`Instant::checked_add`] succeeds. +/// Repeatedly halves and adds the [`Duration`] +/// to the [`Instant`] until [`Instant::checked_add`] succeeds. /// -/// [`Instant`] depends on the underlying platform and has a limit of which points in time it can represent. -/// The [`Duration`] computed by the this function may not be the longest possible that we can add to `now` but it will work. +/// [`Instant`] depends on the underlying platform and has a limit of which points in time it can +/// represent. The [`Duration`] computed by the this function may not be the longest possible that +/// we can add to `now` but it will work. fn checked_add_fraction(start: Instant, mut duration: Duration) -> Duration { while start.checked_add(duration).is_none() { tracing::debug!(start=?start, duration=?duration, "start + duration cannot be presented, halving duration"); @@ -767,24 +789,25 @@ impl> std::hash::Hash for AsStrHashEq { #[cfg(test)] mod tests { + use std::{ + convert::Infallible, + sync::{Arc, Weak}, + time::Instant, + }; + + use futures::{future, AsyncRead, AsyncWrite}; + use libp2p_core::{ + upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}, + StreamMuxer, + }; + use quickcheck::*; + use super::*; use crate::dummy; - use futures::future; - use futures::AsyncRead; - use futures::AsyncWrite; - use libp2p_core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; - use libp2p_core::StreamMuxer; - use quickcheck::*; - use std::convert::Infallible; - use std::sync::{Arc, Weak}; - use std::time::Instant; - use tracing_subscriber::EnvFilter; #[test] fn max_negotiating_inbound_streams() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); fn prop(max_negotiating_inbound_streams: u8) { let max_negotiating_inbound_streams: usize = max_negotiating_inbound_streams.into(); @@ -906,7 +929,8 @@ mod tests { ); assert!(connection.handler.remote_removed.is_empty()); - // Third, stop listening on a protocol it never advertised (we can't control what handlers do so this needs to be handled gracefully). + // Third, stop listening on a protocol it never advertised (we can't control what handlers + // do so this needs to be handled gracefully). connection.handler.remote_removes_support_for(&["/baz"]); let _ = connection.poll_noop_waker(); @@ -951,9 +975,7 @@ mod tests { #[test] fn checked_add_fraction_can_add_u64_max() { - let _ = tracing_subscriber::fmt() - .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let start = Instant::now(); let duration = checked_add_fraction(start, Duration::from_secs(u64::MAX)); @@ -963,9 +985,7 @@ mod tests { #[test] fn compute_new_shutdown_does_not_panic() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); #[derive(Debug)] struct ArbitraryShutdown(Shutdown); @@ -1173,20 +1193,13 @@ mod tests { type InboundOpenInfo = (); type OutboundOpenInfo = (); - fn listen_protocol( - &self, - ) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { SubstreamProtocol::new(DeniedUpgrade, ()).with_timeout(self.upgrade_timeout) } fn on_connection_event( &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + event: ConnectionEvent, ) { match event { // TODO: remove when Rust 1.82 is MSRV @@ -1226,13 +1239,7 @@ mod tests { fn poll( &mut self, _: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - >, - > { + ) -> Poll> { if self.outbound_requested { self.outbound_requested = false; return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { @@ -1253,9 +1260,7 @@ mod tests { type InboundOpenInfo = (); type OutboundOpenInfo = (); - fn listen_protocol( - &self, - ) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { SubstreamProtocol::new( ManyProtocolsUpgrade { protocols: Vec::from_iter(self.active_protocols.clone()), @@ -1266,12 +1271,7 @@ mod tests { fn on_connection_event( &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + event: ConnectionEvent, ) { match event { ConnectionEvent::LocalProtocolsChange(ProtocolsChange::Added(added)) => { @@ -1303,13 +1303,7 @@ mod tests { fn poll( &mut self, _: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - >, - > { + ) -> Poll> { if let Some(event) = self.events.pop() { return Poll::Ready(event); } diff --git a/swarm/src/connection/error.rs b/swarm/src/connection/error.rs index 33aa81c19a9..39e5a88fca6 100644 --- a/swarm/src/connection/error.rs +++ b/swarm/src/connection/error.rs @@ -18,11 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::transport::TransportError; -use crate::Multiaddr; -use crate::{ConnectedPoint, PeerId}; use std::{fmt, io}; +use crate::{transport::TransportError, ConnectedPoint, Multiaddr, PeerId}; + /// Errors that can occur in the context of an established `Connection`. #[derive(Debug)] pub enum ConnectionError { diff --git a/swarm/src/connection/pool.rs b/swarm/src/connection/pool.rs index b2accf745ef..f21a7307105 100644 --- a/swarm/src/connection/pool.rs +++ b/swarm/src/connection/pool.rs @@ -18,41 +18,41 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::connection::{Connection, ConnectionId, PendingPoint}; -use crate::{ - connection::{ - Connected, ConnectionError, IncomingInfo, PendingConnectionError, - PendingInboundConnectionError, PendingOutboundConnectionError, - }, - transport::TransportError, - ConnectedPoint, ConnectionHandler, Executor, Multiaddr, PeerId, +use std::{ + collections::HashMap, + convert::Infallible, + fmt, + num::{NonZeroU8, NonZeroUsize}, + pin::Pin, + task::{Context, Poll, Waker}, }; + use concurrent_dial::ConcurrentDial; use fnv::FnvHashMap; -use futures::prelude::*; -use futures::stream::SelectAll; use futures::{ channel::{mpsc, oneshot}, future::{poll_fn, BoxFuture, Either}, + prelude::*, ready, - stream::FuturesUnordered, + stream::{FuturesUnordered, SelectAll}, }; -use libp2p_core::connection::Endpoint; -use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerExt}; -use libp2p_core::transport::PortUse; -use std::convert::Infallible; -use std::task::Waker; -use std::{ - collections::HashMap, - fmt, - num::{NonZeroU8, NonZeroUsize}, - pin::Pin, - task::Context, - task::Poll, +use libp2p_core::{ + connection::Endpoint, + muxing::{StreamMuxerBox, StreamMuxerExt}, + transport::PortUse, }; use tracing::Instrument; use web_time::{Duration, Instant}; +use crate::{ + connection::{ + Connected, Connection, ConnectionError, ConnectionId, IncomingInfo, PendingConnectionError, + PendingInboundConnectionError, PendingOutboundConnectionError, PendingPoint, + }, + transport::TransportError, + ConnectedPoint, ConnectionHandler, Executor, Multiaddr, PeerId, +}; + mod concurrent_dial; mod task; @@ -115,7 +115,8 @@ where /// See [`Connection::max_negotiating_inbound_streams`]. max_negotiating_inbound_streams: usize, - /// How many [`task::EstablishedConnectionEvent`]s can be buffered before the connection is back-pressured. + /// How many [`task::EstablishedConnectionEvent`]s can be buffered before the connection is + /// back-pressured. per_connection_event_buffer_size: usize, /// The executor to use for running connection tasks. Can either be a global executor @@ -207,7 +208,7 @@ struct PendingConnection { impl PendingConnection { fn is_for_same_remote_as(&self, other: PeerId) -> bool { - self.peer_id.map_or(false, |peer| peer == other) + self.peer_id == Some(other) } /// Aborts the connection attempt, closing the connection. @@ -247,13 +248,11 @@ pub(crate) enum PoolEvent { /// /// A connection may close if /// - /// * it encounters an error, which includes the connection being - /// closed by the remote. In this case `error` is `Some`. - /// * it was actively closed by [`EstablishedConnection::start_close`], - /// i.e. a successful, orderly close. - /// * it was actively closed by [`Pool::disconnect`], i.e. - /// dropped without an orderly close. - /// + /// * it encounters an error, which includes the connection being closed by the remote. In + /// this case `error` is `Some`. + /// * it was actively closed by [`EstablishedConnection::start_close`], i.e. a successful, + /// orderly close. + /// * it was actively closed by [`Pool::disconnect`], i.e. dropped without an orderly close. ConnectionClosed { id: ConnectionId, /// Information about the connection that errored. @@ -554,6 +553,7 @@ where /// Polls the connection pool for events. #[tracing::instrument(level = "debug", name = "Pool::poll", skip(self, cx))] + #[expect(deprecated)] // TODO: Remove when {In, Out}boundOpenInfo is fully removed. pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> where THandler: ConnectionHandler + 'static, @@ -990,7 +990,7 @@ impl PoolConfig { task_command_buffer_size: 32, per_connection_event_buffer_size: 7, dial_concurrency_factor: NonZeroU8::new(8).expect("8 > 0"), - idle_connection_timeout: Duration::ZERO, + idle_connection_timeout: Duration::from_secs(10), substream_upgrade_protocol_override: None, max_negotiating_inbound_streams: 128, } diff --git a/swarm/src/connection/pool/concurrent_dial.rs b/swarm/src/connection/pool/concurrent_dial.rs index 57e4b078098..99f0b385884 100644 --- a/swarm/src/connection/pool/concurrent_dial.rs +++ b/swarm/src/connection/pool/concurrent_dial.rs @@ -18,7 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::{transport::TransportError, Multiaddr}; +use std::{ + num::NonZeroU8, + pin::Pin, + task::{Context, Poll}, +}; + use futures::{ future::{BoxFuture, Future}, ready, @@ -26,11 +31,8 @@ use futures::{ }; use libp2p_core::muxing::StreamMuxerBox; use libp2p_identity::PeerId; -use std::{ - num::NonZeroU8, - pin::Pin, - task::{Context, Poll}, -}; + +use crate::{transport::TransportError, Multiaddr}; type Dial = BoxFuture< 'static, diff --git a/swarm/src/connection/pool/task.rs b/swarm/src/connection/pool/task.rs index 3b808a30fd1..3a82e5c11d1 100644 --- a/swarm/src/connection/pool/task.rs +++ b/swarm/src/connection/pool/task.rs @@ -21,6 +21,15 @@ //! Async functions driving pending and established connections in the form of a task. +use std::{convert::Infallible, pin::Pin}; + +use futures::{ + channel::{mpsc, oneshot}, + future::{poll_fn, Either, Future}, + SinkExt, StreamExt, +}; +use libp2p_core::muxing::StreamMuxerBox; + use super::concurrent_dial::ConcurrentDial; use crate::{ connection::{ @@ -30,14 +39,6 @@ use crate::{ transport::TransportError, ConnectionHandler, Multiaddr, PeerId, }; -use futures::{ - channel::{mpsc, oneshot}, - future::{poll_fn, Either, Future}, - SinkExt, StreamExt, -}; -use libp2p_core::muxing::StreamMuxerBox; -use std::convert::Infallible; -use std::pin::Pin; /// Commands that can be sent to a task driving an established connection. #[derive(Debug)] diff --git a/swarm/src/connection/supported_protocols.rs b/swarm/src/connection/supported_protocols.rs index 124ec93d669..c167bf88649 100644 --- a/swarm/src/connection/supported_protocols.rs +++ b/swarm/src/connection/supported_protocols.rs @@ -1,7 +1,7 @@ -use crate::handler::ProtocolsChange; -use crate::StreamProtocol; use std::collections::HashSet; +use crate::{handler::ProtocolsChange, StreamProtocol}; + #[derive(Default, Clone, Debug)] pub struct SupportedProtocols { protocols: HashSet, diff --git a/swarm/src/dial_opts.rs b/swarm/src/dial_opts.rs index 4f5b621327c..f569a38df1c 100644 --- a/swarm/src/dial_opts.rs +++ b/swarm/src/dial_opts.rs @@ -19,14 +19,13 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::ConnectionId; -use libp2p_core::connection::Endpoint; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::transport::PortUse; -use libp2p_core::Multiaddr; -use libp2p_identity::PeerId; use std::num::NonZeroU8; +use libp2p_core::{connection::Endpoint, multiaddr::Protocol, transport::PortUse, Multiaddr}; +use libp2p_identity::PeerId; + +use crate::ConnectionId; + macro_rules! fn_override_role { () => { /// Override role of local node on connection. I.e. execute the dial _as a @@ -130,7 +129,8 @@ impl DialOpts { /// Get the [`ConnectionId`] of this dial attempt. /// /// All future events of this dial will be associated with this ID. - /// See [`DialFailure`](crate::DialFailure) and [`ConnectionEstablished`](crate::behaviour::ConnectionEstablished). + /// See [`DialFailure`](crate::DialFailure) and + /// [`ConnectionEstablished`](crate::behaviour::ConnectionEstablished). pub fn connection_id(&self) -> ConnectionId { self.connection_id } @@ -324,8 +324,8 @@ impl WithoutPeerIdWithAddress { /// # use libp2p_identity::PeerId; /// # /// DialOpts::peer_id(PeerId::random()) -/// .condition(PeerCondition::Disconnected) -/// .build(); +/// .condition(PeerCondition::Disconnected) +/// .build(); /// ``` #[derive(Debug, Copy, Clone, Default)] pub enum PeerCondition { @@ -338,7 +338,7 @@ pub enum PeerCondition { NotDialing, /// A combination of [`Disconnected`](PeerCondition::Disconnected) and /// [`NotDialing`](PeerCondition::NotDialing). A new dialing attempt is - /// iniated _only if_ the peer is both considered disconnected and there + /// initiated _only if_ the peer is both considered disconnected and there /// is currently no ongoing dialing attempt. #[default] DisconnectedAndNotDialing, diff --git a/swarm/src/dummy.rs b/swarm/src/dummy.rs index b87ef32c8f7..f8137bdbeee 100644 --- a/swarm/src/dummy.rs +++ b/swarm/src/dummy.rs @@ -1,19 +1,18 @@ -use crate::behaviour::{FromSwarm, NetworkBehaviour, ToSwarm}; -use crate::connection::ConnectionId; -use crate::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, +use std::{ + convert::Infallible, + task::{Context, Poll}, }; + +use libp2p_core::{transport::PortUse, upgrade::DeniedUpgrade, Endpoint, Multiaddr}; +use libp2p_identity::PeerId; + use crate::{ + behaviour::{FromSwarm, NetworkBehaviour, ToSwarm}, + connection::ConnectionId, + handler::{ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound}, ConnectionDenied, ConnectionHandlerEvent, StreamUpgradeError, SubstreamProtocol, THandler, THandlerInEvent, THandlerOutEvent, }; -use libp2p_core::transport::PortUse; -use libp2p_core::upgrade::DeniedUpgrade; -use libp2p_core::Endpoint; -use libp2p_core::Multiaddr; -use libp2p_identity::PeerId; -use std::convert::Infallible; -use std::task::{Context, Poll}; /// Implementation of [`NetworkBehaviour`] that doesn't do anything. pub struct Behaviour; @@ -61,7 +60,8 @@ impl NetworkBehaviour for Behaviour { fn on_swarm_event(&mut self, _event: FromSwarm) {} } -/// An implementation of [`ConnectionHandler`] that neither handles any protocols nor does it keep the connection alive. +/// An implementation of [`ConnectionHandler`] that neither handles any protocols nor does it keep +/// the connection alive. #[derive(Clone)] pub struct ConnectionHandler; @@ -71,9 +71,9 @@ impl crate::handler::ConnectionHandler for ConnectionHandler { type InboundProtocol = DeniedUpgrade; type OutboundProtocol = DeniedUpgrade; type InboundOpenInfo = (); - type OutboundOpenInfo = Infallible; + type OutboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { SubstreamProtocol::new(DeniedUpgrade, ()) } @@ -86,20 +86,13 @@ impl crate::handler::ConnectionHandler for ConnectionHandler { fn poll( &mut self, _: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent, - > { + ) -> Poll> { Poll::Pending } fn on_connection_event( &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + event: ConnectionEvent, ) { match event { // TODO: remove when Rust 1.82 is MSRV diff --git a/swarm/src/executor.rs b/swarm/src/executor.rs index a2abbbde6ef..db5ed6b2da4 100644 --- a/swarm/src/executor.rs +++ b/swarm/src/executor.rs @@ -1,14 +1,15 @@ //! Provides executors for spawning background tasks. -use futures::executor::ThreadPool; use std::{future::Future, pin::Pin}; +use futures::executor::ThreadPool; + /// Implemented on objects that can run a `Future` in the background. /// /// > **Note**: While it may be tempting to implement this trait on types such as -/// > [`futures::stream::FuturesUnordered`], please note that passing an `Executor` is -/// > optional, and that `FuturesUnordered` (or a similar struct) will automatically -/// > be used as fallback by libp2p. The `Executor` trait should therefore only be -/// > about running `Future`s on a separate task. +/// > [`futures::stream::FuturesUnordered`], please note that passing an `Executor` is +/// > optional, and that `FuturesUnordered` (or a similar struct) will automatically +/// > be used as fallback by libp2p. The `Executor` trait should therefore only be +/// > about running `Future`s on a separate task. pub trait Executor { /// Run the given future in the background until it ends. #[track_caller] diff --git a/swarm/src/handler.rs b/swarm/src/handler.rs index 9e31592d68d..d9293fa41de 100644 --- a/swarm/src/handler.rs +++ b/swarm/src/handler.rs @@ -34,9 +34,9 @@ //! used protocol(s) determined by the associated types of the handlers. //! //! > **Note**: A [`ConnectionHandler`] handles one or more protocols in the context of a single -//! > connection with a remote. In order to handle a protocol that requires knowledge of -//! > the network as a whole, see the -//! > [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) trait. +//! > connection with a remote. In order to handle a protocol that requires knowledge of +//! > the network as a whole, see the +//! > [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) trait. pub mod either; mod map_in; @@ -46,8 +46,15 @@ mod one_shot; mod pending; mod select; -use crate::connection::AsStrHashEq; -pub use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend, SendWrapper, UpgradeInfoSend}; +use core::slice; +use std::{ + collections::{HashMap, HashSet}, + error, fmt, io, + task::{Context, Poll}, + time::Duration, +}; + +use libp2p_core::Multiaddr; pub use map_in::MapInEvent; pub use map_out::MapOutEvent; pub use one_shot::{OneShotHandler, OneShotHandlerConfig}; @@ -55,11 +62,8 @@ pub use pending::PendingConnectionHandler; pub use select::ConnectionHandlerSelect; use smallvec::SmallVec; -use crate::StreamProtocol; -use core::slice; -use libp2p_core::Multiaddr; -use std::collections::{HashMap, HashSet}; -use std::{error, fmt, io, task::Context, task::Poll, time::Duration}; +pub use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend, SendWrapper, UpgradeInfoSend}; +use crate::{connection::AsStrHashEq, StreamProtocol}; /// A handler for a set of protocols used on a connection with a remote. /// @@ -71,17 +75,17 @@ use std::{error, fmt, io, task::Context, task::Poll, time::Duration}; /// Communication with a remote over a set of protocols is initiated in one of two ways: /// /// 1. Dialing by initiating a new outbound substream. In order to do so, -/// [`ConnectionHandler::poll()`] must return an [`ConnectionHandlerEvent::OutboundSubstreamRequest`], -/// providing an instance of [`libp2p_core::upgrade::OutboundUpgrade`] that is used to negotiate the -/// protocol(s). Upon success, [`ConnectionHandler::on_connection_event`] is called with +/// [`ConnectionHandler::poll()`] must return an +/// [`ConnectionHandlerEvent::OutboundSubstreamRequest`], providing an instance of +/// [`libp2p_core::upgrade::OutboundUpgrade`] that is used to negotiate the protocol(s). Upon +/// success, [`ConnectionHandler::on_connection_event`] is called with /// [`ConnectionEvent::FullyNegotiatedOutbound`] translating the final output of the upgrade. /// -/// 2. Listening by accepting a new inbound substream. When a new inbound substream -/// is created on a connection, [`ConnectionHandler::listen_protocol`] is called -/// to obtain an instance of [`libp2p_core::upgrade::InboundUpgrade`] that is used to -/// negotiate the protocol(s). Upon success, -/// [`ConnectionHandler::on_connection_event`] is called with [`ConnectionEvent::FullyNegotiatedInbound`] -/// translating the final output of the upgrade. +/// 2. Listening by accepting a new inbound substream. When a new inbound substream is created on +/// a connection, [`ConnectionHandler::listen_protocol`] is called to obtain an instance of +/// [`libp2p_core::upgrade::InboundUpgrade`] that is used to negotiate the protocol(s). Upon +/// success, [`ConnectionHandler::on_connection_event`] is called with +/// [`ConnectionEvent::FullyNegotiatedInbound`] translating the final output of the upgrade. /// /// /// # Connection Keep-Alive @@ -94,27 +98,34 @@ use std::{error, fmt, io, task::Context, task::Poll, time::Duration}; /// Implementors of this trait should keep in mind that the connection can be closed at any time. /// When a connection is closed gracefully, the substreams used by the handler may still /// continue reading data until the remote closes its side of the connection. +#[expect(deprecated)] // TODO: Remove when {In, Out}boundOpenInfo is fully removed. pub trait ConnectionHandler: Send + 'static { - /// A type representing the message(s) a [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) can send to a [`ConnectionHandler`] via [`ToSwarm::NotifyHandler`](crate::behaviour::ToSwarm::NotifyHandler) + /// A type representing the message(s) a + /// [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) can send to a [`ConnectionHandler`] + /// via [`ToSwarm::NotifyHandler`](crate::behaviour::ToSwarm::NotifyHandler) type FromBehaviour: fmt::Debug + Send + 'static; - /// A type representing message(s) a [`ConnectionHandler`] can send to a [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) via [`ConnectionHandlerEvent::NotifyBehaviour`]. + /// A type representing message(s) a [`ConnectionHandler`] can send to a + /// [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) via + /// [`ConnectionHandlerEvent::NotifyBehaviour`]. type ToBehaviour: fmt::Debug + Send + 'static; /// The inbound upgrade for the protocol(s) used by the handler. type InboundProtocol: InboundUpgradeSend; /// The outbound upgrade for the protocol(s) used by the handler. type OutboundProtocol: OutboundUpgradeSend; /// The type of additional information returned from `listen_protocol`. + #[deprecated = "Track data in ConnectionHandler instead."] type InboundOpenInfo: Send + 'static; /// The type of additional information passed to an `OutboundSubstreamRequest`. + #[deprecated = "Track data in ConnectionHandler instead."] type OutboundOpenInfo: Send + 'static; /// The [`InboundUpgrade`](libp2p_core::upgrade::InboundUpgrade) to apply on inbound /// substreams to negotiate the desired protocols. /// /// > **Note**: The returned `InboundUpgrade` should always accept all the generally - /// > supported protocols, even if in a specific context a particular one is - /// > not supported, (eg. when only allowing one substream at a time for a protocol). - /// > This allows a remote to put the list of supported protocols in a cache. + /// > supported protocols, even if in a specific context a particular one is + /// > not supported, (eg. when only allowing one substream at a time for a protocol). + /// > This allows a remote to put the list of supported protocols in a cache. fn listen_protocol(&self) -> SubstreamProtocol; /// Returns whether the connection should be kept alive. @@ -127,15 +138,21 @@ pub trait ConnectionHandler: Send + 'static { /// - We are negotiating inbound or outbound streams. /// - There are active [`Stream`](crate::Stream)s on the connection. /// - /// The combination of the above means that _most_ protocols will not need to override this method. - /// This method is only invoked when all of the above are `false`, i.e. when the connection is entirely idle. + /// The combination of the above means that _most_ protocols will not need to override this + /// method. This method is only invoked when all of the above are `false`, i.e. when the + /// connection is entirely idle. /// /// ## Exceptions /// - /// - Protocols like [circuit-relay v2](https://github.com/libp2p/specs/blob/master/relay/circuit-v2.md) need to keep a connection alive beyond these circumstances and can thus override this method. - /// - Protocols like [ping](https://github.com/libp2p/specs/blob/master/ping/ping.md) **don't** want to keep a connection alive despite an active streams. + /// - Protocols like [circuit-relay v2](https://github.com/libp2p/specs/blob/master/relay/circuit-v2.md) + /// need to keep a connection alive beyond these circumstances and can thus override this + /// method. + /// - Protocols like [ping](https://github.com/libp2p/specs/blob/master/ping/ping.md) **don't** + /// want to keep a connection alive despite an active streams. /// - /// In that case, protocol authors can use [`Stream::ignore_for_keep_alive`](crate::Stream::ignore_for_keep_alive) to opt-out a particular stream from the keep-alive algorithm. + /// In that case, protocol authors can use + /// [`Stream::ignore_for_keep_alive`](crate::Stream::ignore_for_keep_alive) to opt-out a + /// particular stream from the keep-alive algorithm. fn connection_keep_alive(&self) -> bool { false } @@ -160,7 +177,8 @@ pub trait ConnectionHandler: Send + 'static { /// To signal completion, [`Poll::Ready(None)`] should be returned. /// /// Implementations MUST have a [`fuse`](futures::StreamExt::fuse)-like behaviour. - /// That is, [`Poll::Ready(None)`] MUST be returned on repeated calls to [`ConnectionHandler::poll_close`]. + /// That is, [`Poll::Ready(None)`] MUST be returned on repeated calls to + /// [`ConnectionHandler::poll_close`]. fn poll_close(&mut self, _: &mut Context<'_>) -> Poll> { Poll::Ready(None) } @@ -209,7 +227,7 @@ pub trait ConnectionHandler: Send + 'static { /// Enumeration with the list of the possible stream events /// to pass to [`on_connection_event`](ConnectionHandler::on_connection_event). #[non_exhaustive] -pub enum ConnectionEvent<'a, IP: InboundUpgradeSend, OP: OutboundUpgradeSend, IOI, OOI> { +pub enum ConnectionEvent<'a, IP: InboundUpgradeSend, OP: OutboundUpgradeSend, IOI = (), OOI = ()> { /// Informs the handler about the output of a successful upgrade on a new inbound substream. FullyNegotiatedInbound(FullyNegotiatedInbound), /// Informs the handler about the output of a successful upgrade on a new outbound stream. @@ -303,28 +321,31 @@ impl /// [`ConnectionHandler`] implementation to stop a malicious remote node to open and keep alive /// an excessive amount of inbound substreams. #[derive(Debug)] -pub struct FullyNegotiatedInbound { +pub struct FullyNegotiatedInbound { pub protocol: IP::Output, pub info: IOI, } -/// [`ConnectionEvent`] variant that informs the handler about successful upgrade on a new outbound stream. +/// [`ConnectionEvent`] variant that informs the handler about successful upgrade on a new outbound +/// stream. /// /// The `protocol` field is the information that was previously passed to /// [`ConnectionHandlerEvent::OutboundSubstreamRequest`]. #[derive(Debug)] -pub struct FullyNegotiatedOutbound { +pub struct FullyNegotiatedOutbound { pub protocol: OP::Output, pub info: OOI, } -/// [`ConnectionEvent`] variant that informs the handler about a change in the address of the remote. +/// [`ConnectionEvent`] variant that informs the handler about a change in the address of the +/// remote. #[derive(Debug)] pub struct AddressChange<'a> { pub new_address: &'a Multiaddr, } -/// [`ConnectionEvent`] variant that informs the handler about a change in the protocols supported on the connection. +/// [`ConnectionEvent`] variant that informs the handler about a change in the protocols supported +/// on the connection. #[derive(Debug, Clone)] pub enum ProtocolsChange<'a> { Added(ProtocolsAdded<'a>), @@ -373,9 +394,11 @@ impl<'a> ProtocolsChange<'a> { })) } - /// Compute the [`ProtocolsChange`] that results from removing `to_remove` from `existing_protocols`. Removes the protocols from `existing_protocols`. + /// Compute the [`ProtocolsChange`] that results from removing `to_remove` from + /// `existing_protocols`. Removes the protocols from `existing_protocols`. /// - /// Returns `None` if the change is a no-op, i.e. none of the protocols in `to_remove` are in `existing_protocols`. + /// Returns `None` if the change is a no-op, i.e. none of the protocols in `to_remove` are in + /// `existing_protocols`. pub(crate) fn remove( existing_protocols: &mut HashSet, to_remove: HashSet, @@ -397,7 +420,8 @@ impl<'a> ProtocolsChange<'a> { })) } - /// Compute the [`ProtocolsChange`]s required to go from `existing_protocols` to `new_protocols`. + /// Compute the [`ProtocolsChange`]s required to go from `existing_protocols` to + /// `new_protocols`. pub(crate) fn from_full_sets>( existing_protocols: &mut HashMap, bool>, new_protocols: impl IntoIterator, @@ -429,7 +453,8 @@ impl<'a> ProtocolsChange<'a> { let num_new_protocols = buffer.len(); // Drain all protocols that we haven't visited. - // For existing protocols that are not in `new_protocols`, the boolean will be false, meaning we need to remove it. + // For existing protocols that are not in `new_protocols`, the boolean will be false, + // meaning we need to remove it. existing_protocols.retain(|p, &mut is_supported| { if !is_supported { buffer.extend(StreamProtocol::try_from_owned(p.0.as_ref().to_owned()).ok()); @@ -502,7 +527,7 @@ pub struct ListenUpgradeError { /// The inbound substream protocol(s) are defined by [`ConnectionHandler::listen_protocol`] /// and the outbound substream protocol(s) by [`ConnectionHandlerEvent::OutboundSubstreamRequest`]. #[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub struct SubstreamProtocol { +pub struct SubstreamProtocol { upgrade: TUpgrade, info: TInfo, timeout: Duration, diff --git a/swarm/src/handler/either.rs b/swarm/src/handler/either.rs index a5aab9b5fee..a349726f454 100644 --- a/swarm/src/handler/either.rs +++ b/swarm/src/handler/either.rs @@ -18,14 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::handler::{ - ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, FullyNegotiatedInbound, - InboundUpgradeSend, ListenUpgradeError, SubstreamProtocol, -}; -use crate::upgrade::SendWrapper; +use std::task::{Context, Poll}; + use either::Either; use futures::future; -use std::task::{Context, Poll}; + +use crate::{ + handler::{ + ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, FullyNegotiatedInbound, + InboundUpgradeSend, ListenUpgradeError, SubstreamProtocol, + }, + upgrade::SendWrapper, +}; impl FullyNegotiatedInbound, SendWrapper>, Either> @@ -73,6 +77,7 @@ where /// Implementation of a [`ConnectionHandler`] that represents either of two [`ConnectionHandler`] /// implementations. +#[expect(deprecated)] // TODO: Remove when {In, Out}boundOpenInfo is fully removed. impl ConnectionHandler for Either where L: ConnectionHandler, diff --git a/swarm/src/handler/map_in.rs b/swarm/src/handler/map_in.rs index 9316ef4d2ce..7a5166cab75 100644 --- a/swarm/src/handler/map_in.rs +++ b/swarm/src/handler/map_in.rs @@ -18,10 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + fmt::Debug, + marker::PhantomData, + task::{Context, Poll}, +}; + use crate::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, SubstreamProtocol, }; -use std::{fmt::Debug, marker::PhantomData, task::Context, task::Poll}; /// Wrapper around a protocol handler that turns the input event into something else. #[derive(Debug)] @@ -42,6 +47,7 @@ impl MapInEvent ConnectionHandler for MapInEvent where diff --git a/swarm/src/handler/map_out.rs b/swarm/src/handler/map_out.rs index f877bfa6f64..9bb0e2bc554 100644 --- a/swarm/src/handler/map_out.rs +++ b/swarm/src/handler/map_out.rs @@ -18,12 +18,16 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + fmt::Debug, + task::{Context, Poll}, +}; + +use futures::ready; + use crate::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, SubstreamProtocol, }; -use futures::ready; -use std::fmt::Debug; -use std::task::{Context, Poll}; /// Wrapper around a protocol handler that turns the output event into something else. #[derive(Debug)] @@ -39,6 +43,7 @@ impl MapOutEvent { } } +#[expect(deprecated)] // TODO: Remove when {In, Out}boundOpenInfo is fully removed. impl ConnectionHandler for MapOutEvent where TConnectionHandler: ConnectionHandler, diff --git a/swarm/src/handler/multi.rs b/swarm/src/handler/multi.rs index 5efcde5c2bb..4f4d64f65fe 100644 --- a/swarm/src/handler/multi.rs +++ b/swarm/src/handler/multi.rs @@ -21,14 +21,6 @@ //! A [`ConnectionHandler`] implementation that combines multiple other [`ConnectionHandler`]s //! indexed by some key. -use crate::handler::{ - AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, SubstreamProtocol, -}; -use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend, UpgradeInfoSend}; -use crate::Stream; -use futures::{future::BoxFuture, prelude::*, ready}; -use rand::Rng; use std::{ cmp, collections::{HashMap, HashSet}, @@ -40,6 +32,19 @@ use std::{ time::Duration, }; +use futures::{future::BoxFuture, prelude::*, ready}; +use rand::Rng; + +use crate::{ + handler::{ + AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, + DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, + SubstreamProtocol, + }, + upgrade::{InboundUpgradeSend, OutboundUpgradeSend, UpgradeInfoSend}, + Stream, +}; + /// A [`ConnectionHandler`] for multiple [`ConnectionHandler`]s of the same type. #[derive(Clone)] pub struct MultiHandler { @@ -81,6 +86,7 @@ where Ok(m) } + #[expect(deprecated)] // TODO: Remove when {In, Out}boundOpenInfo is fully removed. fn on_listen_upgrade_error( &mut self, ListenUpgradeError { @@ -102,6 +108,7 @@ where } } +#[expect(deprecated)] // TODO: Remove when {In, Out}boundOpenInfo is fully removed. impl ConnectionHandler for MultiHandler where K: Clone + Debug + Hash + Eq + Send + 'static, @@ -248,7 +255,8 @@ where return Poll::Pending; } - // Not always polling handlers in the same order should give anyone the chance to make progress. + // Not always polling handlers in the same order + // should give anyone the chance to make progress. let pos = rand::thread_rng().gen_range(0..self.handlers.len()); for (k, h) in self.handlers.iter_mut().skip(pos) { diff --git a/swarm/src/handler/one_shot.rs b/swarm/src/handler/one_shot.rs index 7c84f4bb11a..c623008dd90 100644 --- a/swarm/src/handler/one_shot.rs +++ b/swarm/src/handler/one_shot.rs @@ -18,14 +18,23 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::handler::{ - ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, SubstreamProtocol, +use std::{ + error, + fmt::Debug, + task::{Context, Poll}, + time::Duration, }; -use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend}; -use crate::StreamUpgradeError; + use smallvec::SmallVec; -use std::{error, fmt::Debug, task::Context, task::Poll, time::Duration}; + +use crate::{ + handler::{ + ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, + FullyNegotiatedInbound, FullyNegotiatedOutbound, SubstreamProtocol, + }, + upgrade::{InboundUpgradeSend, OutboundUpgradeSend}, + StreamUpgradeError, +}; /// A [`ConnectionHandler`] that opens a new substream for each request. // TODO: Debug @@ -71,7 +80,7 @@ where /// Returns a reference to the listen protocol configuration. /// /// > **Note**: If you modify the protocol, modifications will only applies to future inbound - /// > substreams, not the ones already being negotiated. + /// > substreams, not the ones already being negotiated. pub fn listen_protocol_ref(&self) -> &SubstreamProtocol { &self.listen_protocol } @@ -79,7 +88,7 @@ where /// Returns a mutable reference to the listen protocol configuration. /// /// > **Note**: If you modify the protocol, modifications will only applies to future inbound - /// > substreams, not the ones already being negotiated. + /// > substreams, not the ones already being negotiated. pub fn listen_protocol_mut(&mut self) -> &mut SubstreamProtocol { &mut self.listen_protocol } @@ -120,7 +129,7 @@ where type OutboundOpenInfo = (); type InboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { self.listen_protocol.clone() } @@ -131,9 +140,7 @@ where fn poll( &mut self, _: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent, - > { + ) -> Poll> { if !self.events_out.is_empty() { return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( self.events_out.remove(0), @@ -160,12 +167,7 @@ where fn on_connection_event( &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + event: ConnectionEvent, ) { match event { ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { @@ -212,12 +214,12 @@ impl Default for OneShotHandlerConfig { #[cfg(test)] mod tests { - use super::*; + use std::convert::Infallible; - use futures::executor::block_on; - use futures::future::poll_fn; + use futures::{executor::block_on, future::poll_fn}; use libp2p_core::upgrade::DeniedUpgrade; - use std::convert::Infallible; + + use super::*; #[test] fn do_not_keep_idle_connection_alive() { diff --git a/swarm/src/handler/pending.rs b/swarm/src/handler/pending.rs index 656a38849d5..8223c544593 100644 --- a/swarm/src/handler/pending.rs +++ b/swarm/src/handler/pending.rs @@ -19,13 +19,17 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + convert::Infallible, + task::{Context, Poll}, +}; + +use libp2p_core::upgrade::PendingUpgrade; + use crate::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, FullyNegotiatedInbound, FullyNegotiatedOutbound, SubstreamProtocol, }; -use libp2p_core::upgrade::PendingUpgrade; -use std::convert::Infallible; -use std::task::{Context, Poll}; /// Implementation of [`ConnectionHandler`] that returns a pending upgrade. #[derive(Clone, Debug)] @@ -47,7 +51,7 @@ impl ConnectionHandler for PendingConnectionHandler { type OutboundOpenInfo = Infallible; type InboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { SubstreamProtocol::new(PendingUpgrade::new(self.protocol_name.clone()), ()) } @@ -60,20 +64,13 @@ impl ConnectionHandler for PendingConnectionHandler { fn poll( &mut self, _: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent, - > { + ) -> Poll> { Poll::Pending } fn on_connection_event( &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + event: ConnectionEvent, ) { match event { // TODO: remove when Rust 1.82 is MSRV diff --git a/swarm/src/handler/select.rs b/swarm/src/handler/select.rs index e049252d448..f4c926f1a0e 100644 --- a/swarm/src/handler/select.rs +++ b/swarm/src/handler/select.rs @@ -18,16 +18,23 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::handler::{ - AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, InboundUpgradeSend, ListenUpgradeError, - OutboundUpgradeSend, StreamUpgradeError, SubstreamProtocol, +use std::{ + cmp, + task::{Context, Poll}, }; -use crate::upgrade::SendWrapper; + use either::Either; use futures::{future, ready}; use libp2p_core::upgrade::SelectUpgrade; -use std::{cmp, task::Context, task::Poll}; + +use crate::{ + handler::{ + AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, + DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, InboundUpgradeSend, + ListenUpgradeError, OutboundUpgradeSend, StreamUpgradeError, SubstreamProtocol, + }, + upgrade::SendWrapper, +}; /// Implementation of [`ConnectionHandler`] that combines two protocols into one. #[derive(Debug, Clone)] @@ -145,6 +152,7 @@ where TProto1: ConnectionHandler, TProto2: ConnectionHandler, { + #[expect(deprecated)] // TODO: Remove when {In, Out}boundOpenInfo is fully removed. fn on_listen_upgrade_error( &mut self, ListenUpgradeError { @@ -174,6 +182,7 @@ where } } +#[expect(deprecated)] // TODO: Remove when {In, Out}boundOpenInfo is fully removed. impl ConnectionHandler for ConnectionHandlerSelect where TProto1: ConnectionHandler, diff --git a/swarm/src/lib.rs b/swarm/src/lib.rs index 12280e99f07..f9c4c71c76f 100644 --- a/swarm/src/lib.rs +++ b/swarm/src/lib.rs @@ -31,12 +31,11 @@ //! Creating a `Swarm` requires three things: //! //! 1. A network identity of the local node in form of a [`PeerId`]. -//! 2. An implementation of the [`Transport`] trait. This is the type that -//! will be used in order to reach nodes on the network based on their -//! address. See the `transport` module for more information. -//! 3. An implementation of the [`NetworkBehaviour`] trait. This is a state -//! machine that defines how the swarm should behave once it is connected -//! to a node. +//! 2. An implementation of the [`Transport`] trait. This is the type that will be used in order to +//! reach nodes on the network based on their address. See the `transport` module for more +//! information. +//! 3. An implementation of the [`NetworkBehaviour`] trait. This is a state machine that defines +//! how the swarm should behave once it is connected to a node. //! //! # Network Behaviour //! @@ -51,7 +50,6 @@ //! The [`ConnectionHandler`] trait defines how each active connection to a //! remote should behave: how to handle incoming substreams, which protocols //! are supported, when to open a new outbound substream, etc. -//! #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] @@ -73,69 +71,55 @@ mod translation; /// Bundles all symbols required for the [`libp2p_swarm_derive::NetworkBehaviour`] macro. #[doc(hidden)] pub mod derive_prelude { - pub use crate::behaviour::AddressChange; - pub use crate::behaviour::ConnectionClosed; - pub use crate::behaviour::ConnectionEstablished; - pub use crate::behaviour::DialFailure; - pub use crate::behaviour::ExpiredListenAddr; - pub use crate::behaviour::ExternalAddrConfirmed; - pub use crate::behaviour::ExternalAddrExpired; - pub use crate::behaviour::FromSwarm; - pub use crate::behaviour::ListenFailure; - pub use crate::behaviour::ListenerClosed; - pub use crate::behaviour::ListenerError; - pub use crate::behaviour::NewExternalAddrCandidate; - pub use crate::behaviour::NewExternalAddrOfPeer; - pub use crate::behaviour::NewListenAddr; - pub use crate::behaviour::NewListener; - pub use crate::connection::ConnectionId; - pub use crate::ConnectionDenied; - pub use crate::ConnectionHandler; - pub use crate::ConnectionHandlerSelect; - pub use crate::DialError; - pub use crate::NetworkBehaviour; - pub use crate::THandler; - pub use crate::THandlerInEvent; - pub use crate::THandlerOutEvent; - pub use crate::ToSwarm; pub use either::Either; pub use futures::prelude as futures; - pub use libp2p_core::transport::{ListenerId, PortUse}; - pub use libp2p_core::ConnectedPoint; - pub use libp2p_core::Endpoint; - pub use libp2p_core::Multiaddr; + pub use libp2p_core::{ + transport::{ListenerId, PortUse}, + ConnectedPoint, Endpoint, Multiaddr, + }; pub use libp2p_identity::PeerId; + + pub use crate::{ + behaviour::{ + AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, ExpiredListenAddr, + ExternalAddrConfirmed, ExternalAddrExpired, FromSwarm, ListenFailure, ListenerClosed, + ListenerError, NewExternalAddrCandidate, NewExternalAddrOfPeer, NewListenAddr, + NewListener, + }, + connection::ConnectionId, + ConnectionDenied, ConnectionHandler, ConnectionHandlerSelect, DialError, NetworkBehaviour, + THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + }; } +use std::{ + collections::{HashMap, HashSet, VecDeque}, + error, fmt, io, + num::{NonZeroU32, NonZeroU8, NonZeroUsize}, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; + pub use behaviour::{ AddressChange, CloseConnection, ConnectionClosed, DialFailure, ExpiredListenAddr, ExternalAddrExpired, ExternalAddresses, FromSwarm, ListenAddresses, ListenFailure, ListenerClosed, ListenerError, NetworkBehaviour, NewExternalAddrCandidate, NewExternalAddrOfPeer, NewListenAddr, NotifyHandler, PeerAddresses, ToSwarm, }; -pub use connection::pool::ConnectionCounters; -pub use connection::{ConnectionError, ConnectionId, SupportedProtocols}; +pub use connection::{pool::ConnectionCounters, ConnectionError, ConnectionId, SupportedProtocols}; +use connection::{ + pool::{EstablishedConnection, Pool, PoolConfig, PoolEvent}, + IncomingInfo, PendingConnectionError, PendingInboundConnectionError, + PendingOutboundConnectionError, +}; +use dial_opts::{DialOpts, PeerCondition}; pub use executor::Executor; +use futures::{prelude::*, stream::FusedStream}; pub use handler::{ ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerSelect, OneShotHandler, OneShotHandlerConfig, StreamUpgradeError, SubstreamProtocol, }; -#[cfg(feature = "macros")] -pub use libp2p_swarm_derive::NetworkBehaviour; -pub use listen_opts::ListenOpts; -pub use stream::Stream; -pub use stream_protocol::{InvalidProtocol, StreamProtocol}; - -use crate::behaviour::ExternalAddrConfirmed; -use crate::handler::UpgradeInfoSend; -use connection::pool::{EstablishedConnection, Pool, PoolConfig, PoolEvent}; -use connection::IncomingInfo; -use connection::{ - PendingConnectionError, PendingInboundConnectionError, PendingOutboundConnectionError, -}; -use dial_opts::{DialOpts, PeerCondition}; -use futures::{prelude::*, stream::FusedStream}; - use libp2p_core::{ connection::ConnectedPoint, muxing::StreamMuxerBox, @@ -143,20 +127,18 @@ use libp2p_core::{ Multiaddr, Transport, }; use libp2p_identity::PeerId; - +#[cfg(feature = "macros")] +pub use libp2p_swarm_derive::NetworkBehaviour; +pub use listen_opts::ListenOpts; use smallvec::SmallVec; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::num::{NonZeroU32, NonZeroU8, NonZeroUsize}; -use std::time::Duration; -use std::{ - error, fmt, io, - pin::Pin, - task::{Context, Poll}, -}; +pub use stream::Stream; +pub use stream_protocol::{InvalidProtocol, StreamProtocol}; use tracing::Instrument; #[doc(hidden)] pub use translation::_address_translation; +use crate::{behaviour::ExternalAddrConfirmed, handler::UpgradeInfoSend}; + /// Event generated by the [`NetworkBehaviour`] that the swarm will report back. type TBehaviourOutEvent = ::ToSwarm; @@ -219,8 +201,8 @@ pub enum SwarmEvent { /// Identifier of the connection. connection_id: ConnectionId, /// Local connection address. - /// This address has been earlier reported with a [`NewListenAddr`](SwarmEvent::NewListenAddr) - /// event. + /// This address has been earlier reported with a + /// [`NewListenAddr`](SwarmEvent::NewListenAddr) event. local_addr: Multiaddr, /// Address used to send back data to the remote. send_back_addr: Multiaddr, @@ -233,8 +215,8 @@ pub enum SwarmEvent { /// Identifier of the connection. connection_id: ConnectionId, /// Local connection address. - /// This address has been earlier reported with a [`NewListenAddr`](SwarmEvent::NewListenAddr) - /// event. + /// This address has been earlier reported with a + /// [`NewListenAddr`](SwarmEvent::NewListenAddr) event. local_addr: Multiaddr, /// Address used to send back data to the remote. send_back_addr: Multiaddr, @@ -308,7 +290,8 @@ pub enum SwarmEvent { } impl SwarmEvent { - /// Extract the `TBehaviourOutEvent` from this [`SwarmEvent`] in case it is the `Behaviour` variant, otherwise fail. + /// Extract the `TBehaviourOutEvent` from this [`SwarmEvent`] in case it is the `Behaviour` + /// variant, otherwise fail. #[allow(clippy::result_large_err)] pub fn try_into_behaviour_event(self) -> Result { match self { @@ -610,7 +593,8 @@ where /// Add a **confirmed** external address for the local node. /// /// This function should only be called with addresses that are guaranteed to be reachable. - /// The address is broadcast to all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrConfirmed`]. + /// The address is broadcast to all [`NetworkBehaviour`]s via + /// [`FromSwarm::ExternalAddrConfirmed`]. pub fn add_external_address(&mut self, a: Multiaddr) { self.behaviour .on_swarm_event(FromSwarm::ExternalAddrConfirmed(ExternalAddrConfirmed { @@ -621,7 +605,8 @@ where /// Remove an external address for the local node. /// - /// The address is broadcast to all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrExpired`]. + /// The address is broadcast to all [`NetworkBehaviour`]s via + /// [`FromSwarm::ExternalAddrExpired`]. pub fn remove_external_address(&mut self, addr: &Multiaddr) { self.behaviour .on_swarm_event(FromSwarm::ExternalAddrExpired(ExternalAddrExpired { addr })); @@ -630,7 +615,8 @@ where /// Add a new external address of a remote peer. /// - /// The address is broadcast to all [`NetworkBehaviour`]s via [`FromSwarm::NewExternalAddrOfPeer`]. + /// The address is broadcast to all [`NetworkBehaviour`]s via + /// [`FromSwarm::NewExternalAddrOfPeer`]. pub fn add_peer_address(&mut self, peer_id: PeerId, addr: Multiaddr) { self.behaviour .on_swarm_event(FromSwarm::NewExternalAddrOfPeer(NewExternalAddrOfPeer { @@ -643,8 +629,9 @@ where /// /// Returns `Ok(())` if there was one or more established connections to the peer. /// - /// Closing a connection via [`Swarm::disconnect_peer_id`] will poll [`ConnectionHandler::poll_close`] to completion. - /// Use this function if you want to close a connection _despite_ it still being in use by one or more handlers. + /// Closing a connection via [`Swarm::disconnect_peer_id`] will poll + /// [`ConnectionHandler::poll_close`] to completion. Use this function if you want to close + /// a connection _despite_ it still being in use by one or more handlers. #[allow(clippy::result_unit_err)] pub fn disconnect_peer_id(&mut self, peer_id: PeerId) -> Result<(), ()> { let was_connected = self.pool.is_connected(peer_id); @@ -660,7 +647,8 @@ where /// Attempt to gracefully close a connection. /// /// Closing a connection is asynchronous but this function will return immediately. - /// A [`SwarmEvent::ConnectionClosed`] event will be emitted once the connection is actually closed. + /// A [`SwarmEvent::ConnectionClosed`] event will be emitted + /// once the connection is actually closed. /// /// # Returns /// @@ -1204,15 +1192,16 @@ where // // (1) is polled before (2) to prioritize local work over work coming from a remote. // - // (2) is polled before (3) to prioritize existing connections over upgrading new incoming connections. + // (2) is polled before (3) to prioritize existing connections + // over upgrading new incoming connections. loop { if let Some(swarm_event) = this.pending_swarm_events.pop_front() { return Poll::Ready(swarm_event); } match this.pending_handler_event.take() { - // Try to deliver the pending event emitted by the [`NetworkBehaviour`] in the previous - // iteration to the connection handler(s). + // Try to deliver the pending event emitted by the [`NetworkBehaviour`] in the + // previous iteration to the connection handler(s). Some((peer_id, handler, event)) => match handler { PendingNotifyHandler::One(conn_id) => { match this.pool.get_established(conn_id) { @@ -1506,7 +1495,21 @@ impl Config { /// How long to keep a connection alive once it is idling. /// - /// Defaults to 0. + /// Defaults to 10s. + /// + /// Typically, you shouldn't _need_ to modify this default as connections will be kept alive + /// whilst they are "in use" (see below). Depending on the application's usecase, it may be + /// desirable to keep connections alive despite them not being in use. + /// + /// A connection is considered idle if: + /// - There are no active inbound streams. + /// - There are no active outbounds streams. + /// - There are no pending outbound streams (i.e. all streams requested via + /// [`ConnectionHandlerEvent::OutboundSubstreamRequest`] have completed). + /// - Every [`ConnectionHandler`] returns `false` from + /// [`ConnectionHandler::connection_keep_alive`]. + /// + /// Once all these conditions are true, the idle connection timeout starts ticking. pub fn with_idle_connection_timeout(mut self, timeout: Duration) -> Self { self.pool_config.idle_connection_timeout = timeout; self @@ -1518,7 +1521,8 @@ impl Config { pub enum DialError { /// The peer identity obtained on the connection matches the local peer. LocalPeerId { endpoint: ConnectedPoint }, - /// No addresses have been provided by [`NetworkBehaviour::handle_pending_outbound_connection`] and [`DialOpts`]. + /// No addresses have been provided by [`NetworkBehaviour::handle_pending_outbound_connection`] + /// and [`DialOpts`]. NoAddresses, /// The provided [`dial_opts::PeerCondition`] evaluated to false and thus /// the dial was aborted. @@ -1688,7 +1692,8 @@ impl error::Error for ListenError { /// A connection was denied. /// -/// To figure out which [`NetworkBehaviour`] denied the connection, use [`ConnectionDenied::downcast`]. +/// To figure out which [`NetworkBehaviour`] denied the connection, use +/// [`ConnectionDenied::downcast`]. #[derive(Debug)] pub struct ConnectionDenied { inner: Box, @@ -1759,18 +1764,21 @@ impl NetworkInfo { #[cfg(test)] mod tests { - use super::*; - use crate::test::{CallTraceBehaviour, MockBehaviour}; - use libp2p_core::multiaddr::multiaddr; - use libp2p_core::transport::memory::MemoryTransportError; - use libp2p_core::transport::{PortUse, TransportEvent}; - use libp2p_core::Endpoint; - use libp2p_core::{multiaddr, transport, upgrade}; + use libp2p_core::{ + multiaddr, + multiaddr::multiaddr, + transport, + transport::{memory::MemoryTransportError, PortUse, TransportEvent}, + upgrade, Endpoint, + }; use libp2p_identity as identity; use libp2p_plaintext as plaintext; use libp2p_yamux as yamux; use quickcheck::*; + use super::*; + use crate::test::{CallTraceBehaviour, MockBehaviour}; + // Test execution state. // Connection => Disconnecting => Connecting. enum State { @@ -1790,12 +1798,7 @@ mod tests { .boxed(); let behaviour = CallTraceBehaviour::new(MockBehaviour::new(dummy::ConnectionHandler)); - Swarm::new( - transport, - behaviour, - local_public_key.into(), - config.with_idle_connection_timeout(Duration::from_secs(5)), - ) + Swarm::new(transport, behaviour, local_public_key.into(), config) } fn swarms_connected( @@ -1842,8 +1845,9 @@ mod tests { /// Establishes multiple connections between two peers, /// after which one peer disconnects the other using [`Swarm::disconnect_peer_id`]. /// - /// The test expects both behaviours to be notified via calls to [`NetworkBehaviour::on_swarm_event`] - /// with pairs of [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`] + /// The test expects both behaviours to be notified via calls to + /// [`NetworkBehaviour::on_swarm_event`] with pairs of [`FromSwarm::ConnectionEstablished`] + /// / [`FromSwarm::ConnectionClosed`] #[tokio::test] async fn test_swarm_disconnect() { let mut swarm1 = new_test_swarm(Config::with_tokio_executor()); @@ -1905,8 +1909,9 @@ mod tests { /// after which one peer disconnects the other /// using [`ToSwarm::CloseConnection`] returned by a [`NetworkBehaviour`]. /// - /// The test expects both behaviours to be notified via calls to [`NetworkBehaviour::on_swarm_event`] - /// with pairs of [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`] + /// The test expects both behaviours to be notified via calls to + /// [`NetworkBehaviour::on_swarm_event`] with pairs of [`FromSwarm::ConnectionEstablished`] + /// / [`FromSwarm::ConnectionClosed`] #[tokio::test] async fn test_behaviour_disconnect_all() { let mut swarm1 = new_test_swarm(Config::with_tokio_executor()); @@ -1972,8 +1977,9 @@ mod tests { /// after which one peer closes a single connection /// using [`ToSwarm::CloseConnection`] returned by a [`NetworkBehaviour`]. /// - /// The test expects both behaviours to be notified via calls to [`NetworkBehaviour::on_swarm_event`] - /// with pairs of [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`] + /// The test expects both behaviours to be notified via calls to + /// [`NetworkBehaviour::on_swarm_event`] with pairs of [`FromSwarm::ConnectionEstablished`] + /// / [`FromSwarm::ConnectionClosed`] #[tokio::test] async fn test_behaviour_disconnect_one() { let mut swarm1 = new_test_swarm(Config::with_tokio_executor()); @@ -2175,8 +2181,10 @@ mod tests { // Dialing the same address we're listening should result in three events: // // - The incoming connection notification (before we know the incoming peer ID). - // - The connection error for the dialing endpoint (once we've determined that it's our own ID). - // - The connection error for the listening endpoint (once we've determined that it's our own ID). + // - The connection error for the dialing endpoint (once we've determined that it's our own + // ID). + // - The connection error for the listening endpoint (once we've determined that it's our + // own ID). // // The last two can happen in any order. @@ -2190,8 +2198,9 @@ mod tests { }) .await; - swarm.listened_addrs.clear(); // This is a hack to actually execute the dial to ourselves which would otherwise be filtered. - + // This is a hack to actually execute the dial + // to ourselves which would otherwise be filtered. + swarm.listened_addrs.clear(); swarm.dial(local_address.clone()).unwrap(); let mut got_dial_err = false; @@ -2294,9 +2303,7 @@ mod tests { #[tokio::test] async fn aborting_pending_connection_surfaces_error() { - let _ = tracing_subscriber::fmt() - .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let mut dialer = new_test_swarm(Config::with_tokio_executor()); let mut listener = new_test_swarm(Config::with_tokio_executor()); @@ -2342,7 +2349,8 @@ mod tests { let string = format!("{error}"); - // Unfortunately, we have some "empty" errors that lead to multiple colons without text but that is the best we can do. + // Unfortunately, we have some "empty" errors + // that lead to multiple colons without text but that is the best we can do. assert_eq!("Failed to negotiate transport protocol(s): [(/ip4/127.0.0.1/tcp/80: : No listener on the given port.)]", string) } } diff --git a/swarm/src/listen_opts.rs b/swarm/src/listen_opts.rs index 9c4d69a6fa0..1fcb33cd348 100644 --- a/swarm/src/listen_opts.rs +++ b/swarm/src/listen_opts.rs @@ -1,6 +1,7 @@ -use crate::ListenerId; use libp2p_core::Multiaddr; +use crate::ListenerId; + #[derive(Debug)] pub struct ListenOpts { id: ListenerId, diff --git a/swarm/src/stream.rs b/swarm/src/stream.rs index 871352f3c6a..d3936cb557a 100644 --- a/swarm/src/stream.rs +++ b/swarm/src/stream.rs @@ -1,6 +1,3 @@ -use futures::{AsyncRead, AsyncWrite}; -use libp2p_core::muxing::SubstreamBox; -use libp2p_core::Negotiated; use std::{ io::{IoSlice, IoSliceMut}, pin::Pin, @@ -8,6 +5,9 @@ use std::{ task::{Context, Poll}, }; +use futures::{AsyncRead, AsyncWrite}; +use libp2p_core::{muxing::SubstreamBox, Negotiated}; + /// Counter for the number of active streams on a connection. #[derive(Debug, Clone)] pub(crate) struct ActiveStreamCounter(Arc<()>); diff --git a/swarm/src/stream_protocol.rs b/swarm/src/stream_protocol.rs index f746429a3d7..abf8068238e 100644 --- a/swarm/src/stream_protocol.rs +++ b/swarm/src/stream_protocol.rs @@ -1,7 +1,10 @@ +use std::{ + fmt, + hash::{Hash, Hasher}, + sync::Arc, +}; + use either::Either; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::sync::Arc; /// Identifies a protocol for a stream. /// @@ -39,7 +42,9 @@ impl StreamProtocol { } Ok(StreamProtocol { - inner: Either::Right(Arc::from(protocol)), // FIXME: Can we somehow reuse the allocation from the owned string? + // FIXME: Can we somehow reuse the + // allocation from the owned string? + inner: Either::Right(Arc::from(protocol)), }) } } diff --git a/swarm/src/test.rs b/swarm/src/test.rs index a6cb7c4d4eb..59aadf7e3c7 100644 --- a/swarm/src/test.rs +++ b/swarm/src/test.rs @@ -18,19 +18,27 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::behaviour::{ - ConnectionClosed, ConnectionEstablished, DialFailure, ExpiredListenAddr, ExternalAddrExpired, - FromSwarm, ListenerClosed, ListenerError, NewExternalAddrCandidate, NewListenAddr, NewListener, +use std::{ + collections::HashMap, + task::{Context, Poll}, }; + +use libp2p_core::{ + multiaddr::Multiaddr, + transport::{ListenerId, PortUse}, + ConnectedPoint, Endpoint, +}; +use libp2p_identity::PeerId; + use crate::{ + behaviour::{ + ConnectionClosed, ConnectionEstablished, DialFailure, ExpiredListenAddr, + ExternalAddrExpired, FromSwarm, ListenerClosed, ListenerError, NewExternalAddrCandidate, + NewListenAddr, NewListener, + }, ConnectionDenied, ConnectionHandler, ConnectionId, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use libp2p_core::transport::PortUse; -use libp2p_core::{multiaddr::Multiaddr, transport::ListenerId, ConnectedPoint, Endpoint}; -use libp2p_identity::PeerId; -use std::collections::HashMap; -use std::task::{Context, Poll}; /// A `MockBehaviour` is a `NetworkBehaviour` that allows for /// the instrumentation of return values, without keeping @@ -42,7 +50,8 @@ where TOutEvent: Send + 'static, { /// The prototype protocols handler that is cloned for every - /// invocation of [`NetworkBehaviour::handle_established_inbound_connection`] and [`NetworkBehaviour::handle_established_outbound_connection`] + /// invocation of [`NetworkBehaviour::handle_established_inbound_connection`] and + /// [`NetworkBehaviour::handle_established_outbound_connection`] pub(crate) handler_proto: THandler, /// The addresses to return from [`NetworkBehaviour::handle_established_outbound_connection`]. pub(crate) addresses: HashMap>, @@ -266,8 +275,8 @@ where }) .take(other_established); - // We are informed that there are `other_established` additional connections. Ensure that the - // number of previous connections is consistent with this + // We are informed that there are `other_established` additional connections. Ensure that + // the number of previous connections is consistent with this if let Some(&prev) = other_peer_connections.next() { if prev < other_established { assert_eq!( @@ -319,8 +328,8 @@ where }) .take(remaining_established); - // We are informed that there are `other_established` additional connections. Ensure that the - // number of previous connections is consistent with this + // We are informed that there are `other_established` additional connections. Ensure that + // the number of previous connections is consistent with this if let Some(&prev) = other_closed_connections.next() { if prev < remaining_established { assert_eq!( diff --git a/swarm/src/upgrade.rs b/swarm/src/upgrade.rs index f6c6648a373..ba40e5606bb 100644 --- a/swarm/src/upgrade.rs +++ b/swarm/src/upgrade.rs @@ -18,11 +18,11 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::Stream; - use futures::prelude::*; use libp2p_core::upgrade; +use crate::Stream; + /// Implemented automatically on all types that implement [`UpgradeInfo`](upgrade::UpgradeInfo) /// and `Send + 'static`. /// @@ -65,7 +65,8 @@ pub trait OutboundUpgradeSend: UpgradeInfoSend { /// Equivalent to [`OutboundUpgrade::Future`](upgrade::OutboundUpgrade::Future). type Future: Future> + Send + 'static; - /// Equivalent to [`OutboundUpgrade::upgrade_outbound`](upgrade::OutboundUpgrade::upgrade_outbound). + /// Equivalent to + /// [`OutboundUpgrade::upgrade_outbound`](upgrade::OutboundUpgrade::upgrade_outbound). fn upgrade_outbound(self, socket: Stream, info: Self::Info) -> Self::Future; } @@ -126,7 +127,7 @@ where /// [`InboundUpgrade`](upgrade::InboundUpgrade). /// /// > **Note**: This struct is mostly an implementation detail of the library and normally -/// > doesn't need to be used directly. +/// > doesn't need to be used directly. pub struct SendWrapper(pub T); impl upgrade::UpgradeInfo for SendWrapper { diff --git a/swarm/tests/connection_close.rs b/swarm/tests/connection_close.rs index 1d1a25eb84b..0d95626b2f0 100644 --- a/swarm/tests/connection_close.rs +++ b/swarm/tests/connection_close.rs @@ -1,16 +1,16 @@ -use libp2p_core::transport::PortUse; -use libp2p_core::upgrade::DeniedUpgrade; -use libp2p_core::{Endpoint, Multiaddr}; +use std::{ + convert::Infallible, + task::{Context, Poll}, +}; + +use libp2p_core::{transport::PortUse, upgrade::DeniedUpgrade, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::handler::ConnectionEvent; use libp2p_swarm::{ - ConnectionDenied, ConnectionHandler, ConnectionHandlerEvent, ConnectionId, FromSwarm, - NetworkBehaviour, SubstreamProtocol, Swarm, SwarmEvent, THandler, THandlerInEvent, - THandlerOutEvent, ToSwarm, + handler::ConnectionEvent, ConnectionDenied, ConnectionHandler, ConnectionHandlerEvent, + ConnectionId, FromSwarm, NetworkBehaviour, SubstreamProtocol, Swarm, SwarmEvent, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, }; use libp2p_swarm_test::SwarmExt; -use std::convert::Infallible; -use std::task::{Context, Poll}; #[async_std::test] async fn sends_remaining_events_to_behaviour_on_connection_close() { @@ -103,7 +103,7 @@ impl ConnectionHandler for HandlerWithState { type InboundOpenInfo = (); type OutboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { SubstreamProtocol::new(DeniedUpgrade, ()) } @@ -114,9 +114,7 @@ impl ConnectionHandler for HandlerWithState { fn poll( &mut self, _: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent, - > { + ) -> Poll> { Poll::Pending } @@ -137,12 +135,7 @@ impl ConnectionHandler for HandlerWithState { fn on_connection_event( &mut self, - _: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + _: ConnectionEvent, ) { } } diff --git a/swarm/tests/listener.rs b/swarm/tests/listener.rs index 74b23cf3f7f..01d5784cfa5 100644 --- a/swarm/tests/listener.rs +++ b/swarm/tests/listener.rs @@ -15,7 +15,6 @@ use libp2p_swarm::{ ListenerClosed, ListenerError, NetworkBehaviour, NewListenAddr, Swarm, SwarmEvent, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; - use libp2p_swarm_test::SwarmExt; #[async_std::test] diff --git a/swarm/tests/swarm_derive.rs b/swarm/tests/swarm_derive.rs index 334d1b9d304..a1c8bc5ff73 100644 --- a/swarm/tests/swarm_derive.rs +++ b/swarm/tests/swarm_derive.rs @@ -18,6 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::fmt::Debug; + use futures::StreamExt; use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identify as identify; @@ -26,19 +28,18 @@ use libp2p_swarm::{ behaviour::FromSwarm, dummy, ConnectionDenied, NetworkBehaviour, SwarmEvent, THandler, THandlerInEvent, THandlerOutEvent, }; -use std::fmt::Debug; /// Small utility to check that a type implements `NetworkBehaviour`. #[allow(dead_code)] fn require_net_behaviour() {} // TODO: doesn't compile -/*#[test] -fn empty() { - #[allow(dead_code)] - #[derive(NetworkBehaviour)] - struct Foo {} -}*/ +// #[test] +// fn empty() { +// #[allow(dead_code)] +// #[derive(NetworkBehaviour)] +// struct Foo {} +// } #[test] fn one_field() { @@ -537,10 +538,10 @@ fn multiple_behaviour_attributes() { #[test] fn custom_out_event_no_type_parameters() { + use std::task::{Context, Poll}; + use libp2p_identity::PeerId; use libp2p_swarm::{ConnectionId, ToSwarm}; - use std::task::Context; - use std::task::Poll; pub(crate) struct TemplatedBehaviour { _data: T, diff --git a/transports/dns/CHANGELOG.md b/transports/dns/CHANGELOG.md index e4f951f157f..b46b0413403 100644 --- a/transports/dns/CHANGELOG.md +++ b/transports/dns/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.42.1 + +- Upgrade `async-std-resolver` and `hickory-resolver`. + See [PR 5727](https://github.com/libp2p/rust-libp2p/pull/5727) + ## 0.42.0 - Implement refactored `Transport`. diff --git a/transports/dns/Cargo.toml b/transports/dns/Cargo.toml index 707b67fc935..a07e795397b 100644 --- a/transports/dns/Cargo.toml +++ b/transports/dns/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-dns" edition = "2021" rust-version = { workspace = true } description = "DNS transport implementation for libp2p" -version = "0.42.0" +version = "0.42.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,13 +11,13 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -async-std-resolver = { version = "0.24", optional = true } +async-std-resolver = { workspace = true, features = ["system-config"], optional = true } async-trait = "0.1.80" futures = { workspace = true } libp2p-core = { workspace = true } libp2p-identity = { workspace = true } parking_lot = "0.12.3" -hickory-resolver = { version = "0.24.1", default-features = false, features = ["system-config"] } +hickory-resolver = { workspace = true, features = ["system-config"] } smallvec = "1.13.2" tracing = { workspace = true } @@ -25,7 +25,7 @@ tracing = { workspace = true } libp2p-identity = { workspace = true, features = ["rand"] } tokio = { workspace = true, features = ["rt", "time"] } async-std-crate = { package = "async-std", version = "1.6" } -tracing-subscriber = { workspace = true, features = ["env-filter"] } +libp2p-test-utils = { workspace = true } [features] async-std = ["async-std-resolver"] diff --git a/transports/dns/src/lib.rs b/transports/dns/src/lib.rs index 7d92cc8ecfc..581942b8b7e 100644 --- a/transports/dns/src/lib.rs +++ b/transports/dns/src/lib.rs @@ -40,26 +40,26 @@ //! On Unix systems, if no custom configuration is given, [trust-dns-resolver] //! will try to parse the `/etc/resolv.conf` file. This approach comes with a //! few caveats to be aware of: -//! 1) This fails (panics even!) if `/etc/resolv.conf` does not exist. This is -//! the case on all versions of Android. -//! 2) DNS configuration is only evaluated during startup. Runtime changes are -//! thus ignored. -//! 3) DNS resolution is obviously done in process and consequently not using -//! any system APIs (like libc's `gethostbyname`). Again this is -//! problematic on platforms like Android, where there's a lot of -//! complexity hidden behind the system APIs. +//! 1) This fails (panics even!) if `/etc/resolv.conf` does not exist. This is the case on all +//! versions of Android. +//! 2) DNS configuration is only evaluated during startup. Runtime changes are thus ignored. +//! 3) DNS resolution is obviously done in process and consequently not using any system APIs +//! (like libc's `gethostbyname`). Again this is problematic on platforms like Android, where +//! there's a lot of complexity hidden behind the system APIs. //! //! If the implementation requires different characteristics, one should //! consider providing their own implementation of [`Transport`] or use //! platform specific APIs to extract the host's DNS configuration (if possible) //! and provide a custom [`ResolverConfig`]. //! -//![trust-dns-resolver]: https://docs.rs/trust-dns-resolver/latest/trust_dns_resolver/#dns-over-tls-and-dns-over-https +//! [trust-dns-resolver]: https://docs.rs/trust-dns-resolver/latest/trust_dns_resolver/#dns-over-tls-and-dns-over-https #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #[cfg(feature = "async-std")] pub mod async_std { + use std::{io, sync::Arc}; + use async_std_resolver::AsyncStdResolver; use futures::FutureExt; use hickory_resolver::{ @@ -67,7 +67,6 @@ pub mod async_std { system_conf, }; use parking_lot::Mutex; - use std::{io, sync::Arc}; /// A `Transport` wrapper for performing DNS lookups when dialing `Multiaddr`esses /// using `async-std` for all async I/O. @@ -116,13 +115,14 @@ pub mod async_std { #[cfg(feature = "tokio")] pub mod tokio { - use hickory_resolver::{system_conf, TokioAsyncResolver}; - use parking_lot::Mutex; use std::sync::Arc; + use hickory_resolver::{system_conf, TokioResolver}; + use parking_lot::Mutex; + /// A `Transport` wrapper for performing DNS lookups when dialing `Multiaddr`esses /// using `tokio` for all async I/O. - pub type Transport = crate::Transport; + pub type Transport = crate::Transport; impl Transport { /// Creates a new [`Transport`] from the OS's DNS configuration and defaults. @@ -140,24 +140,15 @@ pub mod tokio { ) -> Transport { Transport { inner: Arc::new(Mutex::new(inner)), - resolver: TokioAsyncResolver::tokio(cfg, opts), + resolver: TokioResolver::tokio(cfg, opts), } } } } -use async_trait::async_trait; -use futures::{future::BoxFuture, prelude::*}; -use libp2p_core::{ - multiaddr::{Multiaddr, Protocol}, - transport::{DialOpts, ListenerId, TransportError, TransportEvent}, -}; -use parking_lot::Mutex; -use smallvec::SmallVec; -use std::io; -use std::net::{Ipv4Addr, Ipv6Addr}; use std::{ - error, fmt, iter, + error, fmt, io, iter, + net::{Ipv4Addr, Ipv6Addr}, ops::DerefMut, pin::Pin, str, @@ -165,12 +156,23 @@ use std::{ task::{Context, Poll}, }; -pub use hickory_resolver::config::{ResolverConfig, ResolverOpts}; -pub use hickory_resolver::error::{ResolveError, ResolveErrorKind}; -use hickory_resolver::lookup::{Ipv4Lookup, Ipv6Lookup, TxtLookup}; -use hickory_resolver::lookup_ip::LookupIp; -use hickory_resolver::name_server::ConnectionProvider; -use hickory_resolver::AsyncResolver; +use async_trait::async_trait; +use futures::{future::BoxFuture, prelude::*}; +pub use hickory_resolver::{ + config::{ResolverConfig, ResolverOpts}, + ResolveError, ResolveErrorKind, +}; +use hickory_resolver::{ + lookup::{Ipv4Lookup, Ipv6Lookup, TxtLookup}, + lookup_ip::LookupIp, + name_server::ConnectionProvider, +}; +use libp2p_core::{ + multiaddr::{Multiaddr, Protocol}, + transport::{DialOpts, ListenerId, TransportError, TransportEvent}, +}; +use parking_lot::Mutex; +use smallvec::SmallVec; /// The prefix for `dnsaddr` protocol TXT record lookups. const DNSADDR_PREFIX: &str = "_dnsaddr."; @@ -191,7 +193,8 @@ const MAX_DNS_LOOKUPS: usize = 32; const MAX_TXT_RECORDS: usize = 16; /// A [`Transport`] for performing DNS lookups when dialing `Multiaddr`esses. -/// You shouldn't need to use this type directly. Use [`tokio::Transport`] or [`async_std::Transport`] instead. +/// You shouldn't need to use this type directly. Use [`tokio::Transport`] or +/// [`async_std::Transport`] instead. #[derive(Debug)] pub struct Transport { /// The underlying transport. @@ -590,7 +593,7 @@ pub trait Resolver { } #[async_trait] -impl Resolver for AsyncResolver +impl Resolver for hickory_resolver::Resolver where C: ConnectionProvider, { @@ -613,8 +616,8 @@ where #[cfg(all(test, any(feature = "tokio", feature = "async-std")))] mod tests { - use super::*; use futures::future::BoxFuture; + use hickory_resolver::proto::{ProtoError, ProtoErrorKind}; use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, transport::{PortUse, TransportError, TransportEvent}, @@ -622,11 +625,11 @@ mod tests { }; use libp2p_identity::PeerId; + use super::*; + #[test] fn basic_resolve() { - let _ = tracing_subscriber::fmt() - .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); #[derive(Clone)] struct CustomTransport; @@ -745,7 +748,8 @@ mod tests { .await { Err(Error::ResolveError(e)) => match e.kind() { - ResolveErrorKind::NoRecordsFound { .. } => {} + ResolveErrorKind::Proto(ProtoError { kind, .. }) + if matches!(kind.as_ref(), ProtoErrorKind::NoRecordsFound { .. }) => {} _ => panic!("Unexpected DNS error: {e:?}"), }, Err(e) => panic!("Unexpected error: {e:?}"), diff --git a/transports/noise/CHANGELOG.md b/transports/noise/CHANGELOG.md index f599ae3533f..cda7132cb28 100644 --- a/transports/noise/CHANGELOG.md +++ b/transports/noise/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.45.1 + +- Fix `cargo clippy` warnings in `rustc 1.84.0-beta.1`. + See [PR 5700](https://github.com/libp2p/rust-libp2p/pull/5700). + ## 0.45.0 diff --git a/transports/noise/Cargo.toml b/transports/noise/Cargo.toml index 9798ba1836e..a1b712dbdaf 100644 --- a/transports/noise/Cargo.toml +++ b/transports/noise/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-noise" edition = "2021" rust-version = { workspace = true } description = "Cryptographic handshake protocol using the noise framework." -version = "0.45.0" +version = "0.45.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,7 +11,6 @@ repository = "https://github.com/libp2p/rust-libp2p" [dependencies] asynchronous-codec = { workspace = true } bytes = "1" -curve25519-dalek = "4.1.2" futures = { workspace = true } libp2p-core = { workspace = true } libp2p-identity = { workspace = true, features = ["ed25519"] } @@ -20,7 +19,6 @@ multihash = { workspace = true } once_cell = "1.19.0" quick-protobuf = "0.8" rand = "0.8.3" -sha2 = "0.10.8" static_assertions = "1" thiserror = { workspace = true } tracing = { workspace = true } @@ -36,7 +34,7 @@ snow = { version = "0.9.5", features = ["default-resolver"], default-features = [dev-dependencies] futures_ringbuf = "0.4.0" quickcheck = { workspace = true } -tracing-subscriber = { workspace = true, features = ["env-filter"] } +libp2p-test-utils = { workspace = true } libp2p-identity = { workspace = true, features = ["rand"] } # Passing arguments to the docsrs builder in order to properly document cfg's. diff --git a/transports/noise/src/io.rs b/transports/noise/src/io.rs index 9cd4cfed52a..84aad79d76b 100644 --- a/transports/noise/src/io.rs +++ b/transports/noise/src/io.rs @@ -22,11 +22,6 @@ mod framed; pub(crate) mod handshake; -use asynchronous_codec::Framed; -use bytes::Bytes; -use framed::{Codec, MAX_FRAME_LEN}; -use futures::prelude::*; -use futures::ready; use std::{ cmp::min, fmt, io, @@ -34,6 +29,11 @@ use std::{ task::{Context, Poll}, }; +use asynchronous_codec::Framed; +use bytes::Bytes; +use framed::{Codec, MAX_FRAME_LEN}; +use futures::{prelude::*, ready}; + /// A noise session to a remote. /// /// `T` is the type of the underlying I/O resource. diff --git a/transports/noise/src/io/framed.rs b/transports/noise/src/io/framed.rs index 17254efb0a9..5aaad6f55e7 100644 --- a/transports/noise/src/io/framed.rs +++ b/transports/noise/src/io/framed.rs @@ -23,13 +23,14 @@ //! Alongside a [`asynchronous_codec::Framed`] this provides a [Sink](futures::Sink) //! and [Stream](futures::Stream) for length-delimited Noise protocol messages. -use super::handshake::proto; -use crate::{protocol::PublicKey, Error}; +use std::{io, mem::size_of}; + use asynchronous_codec::{Decoder, Encoder}; use bytes::{Buf, Bytes, BytesMut}; use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer}; -use std::io; -use std::mem::size_of; + +use super::handshake::proto; +use crate::{protocol::PublicKey, Error}; /// Max. size of a noise message. const MAX_NOISE_MSG_LEN: usize = 65535; @@ -170,7 +171,8 @@ impl Decoder for Codec { /// Encrypts the given cleartext to `dst`. /// -/// This is a standalone function to allow us reusing the `encrypt_buffer` and to use to across different session states of the noise protocol. +/// This is a standalone function to allow us reusing the `encrypt_buffer` and to use to across +/// different session states of the noise protocol. fn encrypt( cleartext: &[u8], dst: &mut BytesMut, @@ -191,8 +193,9 @@ fn encrypt( /// Encrypts the given ciphertext. /// -/// This is a standalone function so we can use it across different session states of the noise protocol. -/// In case `ciphertext` does not contain enough bytes to decrypt the entire frame, `Ok(None)` is returned. +/// This is a standalone function so we can use it across different session states of the noise +/// protocol. In case `ciphertext` does not contain enough bytes to decrypt the entire frame, +/// `Ok(None)` is returned. fn decrypt( ciphertext: &mut BytesMut, decrypt_fn: impl FnOnce(&[u8], &mut [u8]) -> Result, diff --git a/transports/noise/src/io/handshake.rs b/transports/noise/src/io/handshake.rs index 8993a5795b6..d4727b91420 100644 --- a/transports/noise/src/io/handshake.rs +++ b/transports/noise/src/io/handshake.rs @@ -23,21 +23,23 @@ pub(super) mod proto { #![allow(unreachable_pub)] include!("../generated/mod.rs"); - pub use self::payload::proto::NoiseExtensions; - pub use self::payload::proto::NoiseHandshakePayload; + pub use self::payload::proto::{NoiseExtensions, NoiseHandshakePayload}; } -use super::framed::Codec; -use crate::io::Output; -use crate::protocol::{KeypairIdentity, PublicKey, STATIC_KEY_DOMAIN}; -use crate::Error; +use std::{collections::HashSet, io, mem}; + use asynchronous_codec::Framed; use futures::prelude::*; use libp2p_identity as identity; use multihash::Multihash; use quick_protobuf::MessageWrite; -use std::collections::HashSet; -use std::{io, mem}; + +use super::framed::Codec; +use crate::{ + io::Output, + protocol::{KeypairIdentity, PublicKey, STATIC_KEY_DOMAIN}, + Error, +}; ////////////////////////////////////////////////////////////////////////////// // Internal @@ -106,7 +108,7 @@ where .id_remote_pubkey .ok_or_else(|| Error::AuthenticationFailed)?; - let is_valid_signature = self.dh_remote_pubkey_sig.as_ref().map_or(false, |s| { + let is_valid_signature = self.dh_remote_pubkey_sig.as_ref().is_some_and(|s| { id_pk.verify(&[STATIC_KEY_DOMAIN.as_bytes(), pubkey.as_ref()].concat(), s) }); @@ -142,12 +144,16 @@ where } } -/// Maps the provided [`Framed`] from the [`snow::HandshakeState`] into the [`snow::TransportState`]. +/// Maps the provided [`Framed`] from the [`snow::HandshakeState`] into the +/// [`snow::TransportState`]. /// -/// This is a bit tricky because [`Framed`] cannot just be de-composed but only into its [`FramedParts`](asynchronous_codec::FramedParts). -/// However, we need to retain the original [`FramedParts`](asynchronous_codec::FramedParts) because they contain the active read & write buffers. +/// This is a bit tricky because [`Framed`] cannot just be de-composed but only into its +/// [`FramedParts`](asynchronous_codec::FramedParts). However, we need to retain the original +/// [`FramedParts`](asynchronous_codec::FramedParts) because they contain the active read & write +/// buffers. /// -/// Those are likely **not** empty because the remote may directly write to the stream again after the noise handshake finishes. +/// Those are likely **not** empty because the remote may directly write to the stream again after +/// the noise handshake finishes. fn map_into_transport( framed: Framed>, ) -> Result<(PublicKey, Framed>), Error> diff --git a/transports/noise/src/lib.rs b/transports/noise/src/lib.rs index 2557e76e276..e05556744fe 100644 --- a/transports/noise/src/lib.rs +++ b/transports/noise/src/lib.rs @@ -21,14 +21,14 @@ //! [Noise protocol framework][noise] support for libp2p. //! //! > **Note**: This crate is still experimental and subject to major breaking changes -//! > both on the API and the wire protocol. +//! > both on the API and the wire protocol. //! //! This crate provides `libp2p_core::InboundUpgrade` and `libp2p_core::OutboundUpgrade` //! implementations for various noise handshake patterns (currently `IK`, `IX`, and `XX`) //! over a particular choice of Diffie–Hellman key agreement (currently only X25519). //! //! > **Note**: Only the `XX` handshake pattern is currently guaranteed to provide -//! > interoperability with other libp2p implementations. +//! > interoperability with other libp2p implementations. //! //! All upgrades produce as output a pair, consisting of the remote's static public key //! and a `NoiseOutput` which represents the established cryptographic session with the @@ -39,14 +39,16 @@ //! Example: //! //! ``` -//! use libp2p_core::{Transport, upgrade, transport::MemoryTransport}; -//! use libp2p_noise as noise; +//! use libp2p_core::{transport::MemoryTransport, upgrade, Transport}; //! use libp2p_identity as identity; +//! use libp2p_noise as noise; //! //! # fn main() { //! let id_keys = identity::Keypair::generate_ed25519(); //! let noise = noise::Config::new(&id_keys).unwrap(); -//! let builder = MemoryTransport::default().upgrade(upgrade::Version::V1).authenticate(noise); +//! let builder = MemoryTransport::default() +//! .upgrade(upgrade::Version::V1) +//! .authenticate(noise); //! // let transport = builder.multiplex(...); //! # } //! ``` @@ -58,22 +60,25 @@ mod io; mod protocol; -pub use io::Output; +use std::{collections::HashSet, fmt::Write, pin::Pin}; -use crate::handshake::State; -use crate::io::handshake; -use crate::protocol::{noise_params_into_builder, AuthenticKeypair, Keypair, PARAMS_XX}; use futures::prelude::*; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; -use libp2p_core::UpgradeInfo; +pub use io::Output; +use libp2p_core::{ + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, + UpgradeInfo, +}; use libp2p_identity as identity; use libp2p_identity::PeerId; use multiaddr::Protocol; use multihash::Multihash; use snow::params::NoiseParams; -use std::collections::HashSet; -use std::fmt::Write; -use std::pin::Pin; + +use crate::{ + handshake::State, + io::handshake, + protocol::{noise_params_into_builder, AuthenticKeypair, Keypair, PARAMS_XX}, +}; /// The configuration for the noise handshake. #[derive(Clone)] diff --git a/transports/noise/src/protocol.rs b/transports/noise/src/protocol.rs index 29d0c81e2e4..ca47ea0dfcd 100644 --- a/transports/noise/src/protocol.rs +++ b/transports/noise/src/protocol.rs @@ -20,7 +20,6 @@ //! Components of a Noise protocol. -use crate::Error; use libp2p_identity as identity; use once_cell::sync::Lazy; use rand::{Rng as _, SeedableRng}; @@ -28,6 +27,8 @@ use snow::params::NoiseParams; use x25519_dalek::{x25519, X25519_BASEPOINT_BYTES}; use zeroize::Zeroize; +use crate::Error; + /// Prefix of static key signatures for domain separation. pub(crate) const STATIC_KEY_DOMAIN: &str = "noise-libp2p-static-key:"; diff --git a/transports/noise/tests/smoke.rs b/transports/noise/tests/smoke.rs index 62b5d41d6b9..cd9702b1c2f 100644 --- a/transports/noise/tests/smoke.rs +++ b/transports/noise/tests/smoke.rs @@ -18,15 +18,17 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::io; + use futures::prelude::*; -use libp2p_core::transport::{MemoryTransport, Transport}; -use libp2p_core::upgrade; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +use libp2p_core::{ + transport::{MemoryTransport, Transport}, + upgrade, + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, +}; use libp2p_identity as identity; use libp2p_noise as noise; use quickcheck::*; -use std::io; -use tracing_subscriber::EnvFilter; #[allow(dead_code)] fn core_upgrade_compat() { @@ -41,9 +43,7 @@ fn core_upgrade_compat() { #[test] fn xx() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); fn prop(mut messages: Vec) -> bool { messages.truncate(5); let server_id = identity::Keypair::generate_ed25519(); diff --git a/transports/noise/tests/webtransport_certhashes.rs b/transports/noise/tests/webtransport_certhashes.rs index b3c924f8188..7fa28da0ebe 100644 --- a/transports/noise/tests/webtransport_certhashes.rs +++ b/transports/noise/tests/webtransport_certhashes.rs @@ -1,8 +1,9 @@ +use std::collections::HashSet; + use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; use libp2p_identity as identity; use libp2p_noise as noise; use multihash::Multihash; -use std::collections::HashSet; const SHA_256_MH: u64 = 0x12; diff --git a/transports/plaintext/Cargo.toml b/transports/plaintext/Cargo.toml index 47a3191baa9..95f8f5af065 100644 --- a/transports/plaintext/Cargo.toml +++ b/transports/plaintext/Cargo.toml @@ -23,9 +23,8 @@ quick-protobuf-codec = { workspace = true } [dev-dependencies] libp2p-identity = { workspace = true, features = ["ed25519", "rand"] } quickcheck = { workspace = true } -rand = "0.8" futures_ringbuf = "0.4.0" -tracing-subscriber = { workspace = true, features = ["env-filter"] } +libp2p-test-utils = { workspace = true } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/transports/plaintext/src/error.rs b/transports/plaintext/src/error.rs index 7480874a85e..2d352562528 100644 --- a/transports/plaintext/src/error.rs +++ b/transports/plaintext/src/error.rs @@ -18,9 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::error; -use std::fmt; -use std::io::Error as IoError; +use std::{error, fmt, io::Error as IoError}; #[derive(Debug)] pub enum Error { diff --git a/transports/plaintext/src/handshake.rs b/transports/plaintext/src/handshake.rs index ddd5f7f8a9b..38a56b84862 100644 --- a/transports/plaintext/src/handshake.rs +++ b/transports/plaintext/src/handshake.rs @@ -18,14 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::error::{DecodeError, Error}; -use crate::proto::Exchange; -use crate::Config; +use std::io::{Error as IoError, ErrorKind as IoErrorKind}; + use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; use futures::prelude::*; use libp2p_identity::{PeerId, PublicKey}; -use std::io::{Error as IoError, ErrorKind as IoErrorKind}; + +use crate::{ + error::{DecodeError, Error}, + proto::Exchange, + Config, +}; pub(crate) async fn handshake(socket: S, config: Config) -> Result<(S, PublicKey, Bytes), Error> where diff --git a/transports/plaintext/src/lib.rs b/transports/plaintext/src/lib.rs index 4a322d63fab..f841a859a62 100644 --- a/transports/plaintext/src/lib.rs +++ b/transports/plaintext/src/lib.rs @@ -22,22 +22,23 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use crate::error::Error; - -use bytes::Bytes; -use futures::future::BoxFuture; -use futures::prelude::*; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; -use libp2p_core::UpgradeInfo; -use libp2p_identity as identity; -use libp2p_identity::PeerId; -use libp2p_identity::PublicKey; use std::{ io, iter, pin::Pin, task::{Context, Poll}, }; +use bytes::Bytes; +use futures::{future::BoxFuture, prelude::*}; +use libp2p_core::{ + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, + UpgradeInfo, +}; +use libp2p_identity as identity; +use libp2p_identity::{PeerId, PublicKey}; + +use crate::error::Error; + mod error; mod handshake; mod proto { diff --git a/transports/plaintext/tests/smoke.rs b/transports/plaintext/tests/smoke.rs index f77f23d3ad3..ee8cec46c0b 100644 --- a/transports/plaintext/tests/smoke.rs +++ b/transports/plaintext/tests/smoke.rs @@ -23,13 +23,10 @@ use libp2p_core::upgrade::InboundConnectionUpgrade; use libp2p_identity as identity; use libp2p_plaintext as plaintext; use quickcheck::QuickCheck; -use tracing_subscriber::EnvFilter; #[test] fn variable_msg_length() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); fn prop(msg: Vec) { let msg_to_send = msg.clone(); diff --git a/transports/pnet/src/crypt_writer.rs b/transports/pnet/src/crypt_writer.rs index 06f932fbe71..8b302089a1d 100644 --- a/transports/pnet/src/crypt_writer.rs +++ b/transports/pnet/src/crypt_writer.rs @@ -18,6 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{fmt, pin::Pin}; + use futures::{ io::{self, AsyncWrite}, ready, @@ -25,7 +27,6 @@ use futures::{ }; use pin_project::pin_project; use salsa20::{cipher::StreamCipher, XSalsa20}; -use std::{fmt, pin::Pin}; /// A writer that encrypts and forwards to an inner writer #[pin_project] @@ -74,7 +75,8 @@ fn poll_flush_buf( // we made progress, so try again written += n; } else { - // we got Ok but got no progress whatsoever, so bail out so we don't spin writing 0 bytes. + // we got Ok but got no progress whatsoever, + // so bail out so we don't spin writing 0 bytes. ret = Poll::Ready(Err(io::Error::new( io::ErrorKind::WriteZero, "Failed to write buffered data", diff --git a/transports/pnet/src/lib.rs b/transports/pnet/src/lib.rs index 083ffff36a3..b27f9777c47 100644 --- a/transports/pnet/src/lib.rs +++ b/transports/pnet/src/lib.rs @@ -19,7 +19,6 @@ // DEALINGS IN THE SOFTWARE. //! Implementation of the [pnet](https://github.com/libp2p/specs/blob/master/pnet/Private-Networks-PSK-V1.md) protocol. -//! //| The `pnet` protocol implements *Pre-shared Key Based Private Networks in libp2p*. //! Libp2p nodes configured with a pre-shared key can only communicate with other nodes with //! the same key. @@ -27,15 +26,6 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod crypt_writer; -use crypt_writer::CryptWriter; -use futures::prelude::*; -use pin_project::pin_project; -use rand::RngCore; -use salsa20::{ - cipher::{KeyIvInit, StreamCipher}, - Salsa20, XSalsa20, -}; -use sha3::{digest::ExtendableOutput, Shake128}; use std::{ error, fmt::{self, Write}, @@ -47,6 +37,16 @@ use std::{ task::{Context, Poll}, }; +use crypt_writer::CryptWriter; +use futures::prelude::*; +use pin_project::pin_project; +use rand::RngCore; +use salsa20::{ + cipher::{KeyIvInit, StreamCipher}, + Salsa20, XSalsa20, +}; +use sha3::{digest::ExtendableOutput, Shake128}; + const KEY_SIZE: usize = 32; const NONCE_SIZE: usize = 24; const WRITE_BUFFER_SIZE: usize = 1024; @@ -319,9 +319,10 @@ impl fmt::Display for PnetError { #[cfg(test)] mod tests { - use super::*; use quickcheck::*; + use super::*; + impl Arbitrary for PreSharedKey { fn arbitrary(g: &mut Gen) -> PreSharedKey { let key = core::array::from_fn(|_| u8::arbitrary(g)); diff --git a/transports/pnet/tests/smoke.rs b/transports/pnet/tests/smoke.rs index 79ffaeab447..ae4fcc4b3fc 100644 --- a/transports/pnet/tests/smoke.rs +++ b/transports/pnet/tests/smoke.rs @@ -1,10 +1,9 @@ use std::time::Duration; use futures::{future, AsyncRead, AsyncWrite, StreamExt}; -use libp2p_core::transport::MemoryTransport; -use libp2p_core::upgrade::Version; -use libp2p_core::Transport; -use libp2p_core::{multiaddr::Protocol, Multiaddr}; +use libp2p_core::{ + multiaddr::Protocol, transport::MemoryTransport, upgrade::Version, Multiaddr, Transport, +}; use libp2p_pnet::{PnetConfig, PreSharedKey}; use libp2p_swarm::{dummy, Config, NetworkBehaviour, Swarm, SwarmEvent}; diff --git a/transports/quic/Cargo.toml b/transports/quic/Cargo.toml index 17d5014b974..1c35b293049 100644 --- a/transports/quic/Cargo.toml +++ b/transports/quic/Cargo.toml @@ -10,15 +10,13 @@ license = "MIT" [dependencies] async-std = { version = "1.12.0", optional = true } -bytes = "1.6.0" futures = { workspace = true } futures-timer = "3.0.3" if-watch = "3.2.0" libp2p-core = { workspace = true } libp2p-tls = { workspace = true } libp2p-identity = { workspace = true } -parking_lot = "0.12.3" -quinn = { version = "0.11.2", default-features = false, features = ["rustls", "futures-io"] } +quinn = { version = "0.11.6", default-features = false, features = ["rustls", "futures-io"] } rand = "0.8.5" rustls = { version = "0.23.9", default-features = false } thiserror = { workspace = true } @@ -45,7 +43,7 @@ libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-yamux = { workspace = true } quickcheck = "1" tokio = { workspace = true, features = ["macros", "rt-multi-thread", "time"] } -tracing-subscriber = { workspace = true, features = ["env-filter"] } +libp2p-test-utils = { workspace = true } [[test]] name = "stream_compliance" diff --git a/transports/quic/src/config.rs b/transports/quic/src/config.rs index 2456ed3e36f..c623632ddc6 100644 --- a/transports/quic/src/config.rs +++ b/transports/quic/src/config.rs @@ -18,11 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{sync::Arc, time::Duration}; + use quinn::{ crypto::rustls::{QuicClientConfig, QuicServerConfig}, MtuDiscoveryConfig, VarInt, }; -use std::{sync::Arc, time::Duration}; /// Config for the transport. #[derive(Clone)] diff --git a/transports/quic/src/connection.rs b/transports/quic/src/connection.rs index 783258a0130..a7375a1ca6d 100644 --- a/transports/quic/src/connection.rs +++ b/transports/quic/src/connection.rs @@ -21,18 +21,18 @@ mod connecting; mod stream; -pub use connecting::Connecting; -pub use stream::Stream; - -use crate::{ConnectionError, Error}; - -use futures::{future::BoxFuture, FutureExt}; -use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; use std::{ pin::Pin, task::{Context, Poll}, }; +pub use connecting::Connecting; +use futures::{future::BoxFuture, FutureExt}; +use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; +pub use stream::Stream; + +use crate::{ConnectionError, Error}; + /// State for a single opened QUIC connection. pub struct Connection { /// Underlying connection. diff --git a/transports/quic/src/connection/connecting.rs b/transports/quic/src/connection/connecting.rs index f6e397b4d1e..0ce7f9041db 100644 --- a/transports/quic/src/connection/connecting.rs +++ b/transports/quic/src/connection/connecting.rs @@ -20,7 +20,11 @@ //! Future that drives a QUIC connection until is has performed its TLS handshake. -use crate::{Connection, ConnectionError, Error}; +use std::{ + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; use futures::{ future::{select, Either, FutureExt, Select}, @@ -29,11 +33,8 @@ use futures::{ use futures_timer::Delay; use libp2p_identity::PeerId; use quinn::rustls::pki_types::CertificateDer; -use std::{ - pin::Pin, - task::{Context, Poll}, - time::Duration, -}; + +use crate::{Connection, ConnectionError, Error}; /// A QUIC connection currently being negotiated. #[derive(Debug)] diff --git a/transports/quic/src/hole_punching.rs b/transports/quic/src/hole_punching.rs index a38d123a6a4..6f1961081d2 100644 --- a/transports/quic/src/hole_punching.rs +++ b/transports/quic/src/hole_punching.rs @@ -1,15 +1,14 @@ -use crate::{provider::Provider, Error}; - -use futures::future::Either; - -use rand::{distributions, Rng}; - -use std::convert::Infallible; use std::{ + convert::Infallible, net::{SocketAddr, UdpSocket}, time::Duration, }; +use futures::future::Either; +use rand::{distributions, Rng}; + +use crate::{provider::Provider, Error}; + pub(crate) async fn hole_puncher( socket: UdpSocket, remote_addr: SocketAddr, diff --git a/transports/quic/src/lib.rs b/transports/quic/src/lib.rs index 7ae649b6914..9d97e6c4319 100644 --- a/transports/quic/src/lib.rs +++ b/transports/quic/src/lib.rs @@ -31,16 +31,20 @@ //! # #[cfg(feature = "async-std")] //! # fn main() -> std::io::Result<()> { //! # +//! use libp2p_core::{transport::ListenerId, Multiaddr, Transport}; //! use libp2p_quic as quic; -//! use libp2p_core::{Multiaddr, Transport, transport::ListenerId}; //! //! let keypair = libp2p_identity::Keypair::generate_ed25519(); //! let quic_config = quic::Config::new(&keypair); //! //! let mut quic_transport = quic::async_std::Transport::new(quic_config); //! -//! let addr = "/ip4/127.0.0.1/udp/12345/quic-v1".parse().expect("address should be valid"); -//! quic_transport.listen_on(ListenerId::next(), addr).expect("listen error."); +//! let addr = "/ip4/127.0.0.1/udp/12345/quic-v1" +//! .parse() +//! .expect("address should be valid"); +//! quic_transport +//! .listen_on(ListenerId::next(), addr) +//! .expect("listen error."); //! # //! # Ok(()) //! # } @@ -53,7 +57,6 @@ //! Note that QUIC provides transport, security, and multiplexing in a single protocol. Therefore, //! QUIC connections do not need to be upgraded. You will get a compile-time error if you try. //! Instead, you must pass all needed configuration into the constructor. -//! #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] @@ -67,7 +70,6 @@ use std::net::SocketAddr; pub use config::Config; pub use connection::{Connecting, Connection, Stream}; - #[cfg(feature = "async-std")] pub use provider::async_std; #[cfg(feature = "tokio")] diff --git a/transports/quic/src/provider.rs b/transports/quic/src/provider.rs index 6f1122ee55f..fdf88b460e8 100644 --- a/transports/quic/src/provider.rs +++ b/transports/quic/src/provider.rs @@ -18,8 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::future::BoxFuture; -use if_watch::IfEvent; use std::{ io, net::{SocketAddr, UdpSocket}, @@ -27,6 +25,9 @@ use std::{ time::Duration, }; +use futures::future::BoxFuture; +use if_watch::IfEvent; + #[cfg(feature = "async-std")] pub mod async_std; #[cfg(feature = "tokio")] @@ -59,7 +60,8 @@ pub trait Provider: Unpin + Send + Sized + 'static { /// Sleep for specified amount of time. fn sleep(duration: Duration) -> BoxFuture<'static, ()>; - /// Sends data on the socket to the given address. On success, returns the number of bytes written. + /// Sends data on the socket to the given address. On success, + /// returns the number of bytes written. fn send_to<'a>( udp_socket: &'a UdpSocket, buf: &'a [u8], diff --git a/transports/quic/src/provider/async_std.rs b/transports/quic/src/provider/async_std.rs index a110058108c..b5c3ac917dc 100644 --- a/transports/quic/src/provider/async_std.rs +++ b/transports/quic/src/provider/async_std.rs @@ -18,7 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::{future::BoxFuture, FutureExt}; use std::{ io, net::UdpSocket, @@ -26,6 +25,8 @@ use std::{ time::Duration, }; +use futures::{future::BoxFuture, FutureExt}; + use crate::GenTransport; /// Transport with [`async-std`] runtime. diff --git a/transports/quic/src/provider/tokio.rs b/transports/quic/src/provider/tokio.rs index 9cb148d6ef2..83753faac01 100644 --- a/transports/quic/src/provider/tokio.rs +++ b/transports/quic/src/provider/tokio.rs @@ -18,7 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::{future::BoxFuture, FutureExt}; use std::{ io, net::{SocketAddr, UdpSocket}, @@ -26,6 +25,8 @@ use std::{ time::Duration, }; +use futures::{future::BoxFuture, FutureExt}; + use crate::GenTransport; /// Transport with [`tokio`] runtime. diff --git a/transports/quic/src/transport.rs b/transports/quic/src/transport.rs index 057d0f978d7..63a65ce99cc 100644 --- a/transports/quic/src/transport.rs +++ b/transports/quic/src/transport.rs @@ -18,38 +18,41 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::config::{Config, QuinnConfig}; -use crate::hole_punching::hole_puncher; -use crate::provider::Provider; -use crate::{ConnectError, Connecting, Connection, Error}; - -use futures::channel::oneshot; -use futures::future::{BoxFuture, Either}; -use futures::ready; -use futures::stream::StreamExt; -use futures::{prelude::*, stream::SelectAll}; +use std::{ + collections::{ + hash_map::{DefaultHasher, Entry}, + HashMap, HashSet, + }, + fmt, + hash::{Hash, Hasher}, + io, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}, + pin::Pin, + task::{Context, Poll, Waker}, + time::Duration, +}; +use futures::{ + channel::oneshot, + future::{BoxFuture, Either}, + prelude::*, + ready, + stream::{SelectAll, StreamExt}, +}; use if_watch::IfEvent; - -use libp2p_core::transport::{DialOpts, PortUse}; -use libp2p_core::Endpoint; use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, - transport::{ListenerId, TransportError, TransportEvent}, - Transport, + transport::{DialOpts, ListenerId, PortUse, TransportError, TransportEvent}, + Endpoint, Transport, }; use libp2p_identity::PeerId; use socket2::{Domain, Socket, Type}; -use std::collections::hash_map::{DefaultHasher, Entry}; -use std::collections::{HashMap, HashSet}; -use std::hash::{Hash, Hasher}; -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, UdpSocket}; -use std::time::Duration; -use std::{fmt, io}; -use std::{ - net::SocketAddr, - pin::Pin, - task::{Context, Poll, Waker}, + +use crate::{ + config::{Config, QuinnConfig}, + hole_punching::hole_puncher, + provider::Provider, + ConnectError, Connecting, Connection, Error, }; /// Implementation of the [`Transport`] trait for QUIC. @@ -745,9 +748,10 @@ fn socketaddr_to_multiaddr(socket_addr: &SocketAddr, version: ProtocolVersion) - #[cfg(test)] #[cfg(any(feature = "async-std", feature = "tokio"))] mod tests { - use super::*; use futures::future::poll_fn; + use super::*; + #[test] fn multiaddr_to_udp_conversion() { assert!(multiaddr_to_socketaddr( diff --git a/transports/quic/tests/smoke.rs b/transports/quic/tests/smoke.rs index 6a760f9997c..f0a8bd97d70 100644 --- a/transports/quic/tests/smoke.rs +++ b/transports/quic/tests/smoke.rs @@ -1,16 +1,31 @@ #![cfg(any(feature = "async-std", feature = "tokio"))] -use futures::channel::{mpsc, oneshot}; -use futures::future::BoxFuture; -use futures::future::{poll_fn, Either}; -use futures::stream::StreamExt; -use futures::{future, AsyncReadExt, AsyncWriteExt, FutureExt, SinkExt}; +use std::{ + future::Future, + io, + num::NonZeroU8, + pin::Pin, + sync::{Arc, Mutex}, + task::Poll, + time::Duration, +}; + +use futures::{ + channel::{mpsc, oneshot}, + future, + future::{poll_fn, BoxFuture, Either}, + stream::StreamExt, + AsyncReadExt, AsyncWriteExt, FutureExt, SinkExt, +}; use futures_timer::Delay; -use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerExt, SubstreamBox}; -use libp2p_core::transport::{Boxed, DialOpts, OrTransport, PortUse, TransportEvent}; -use libp2p_core::transport::{ListenerId, TransportError}; -use libp2p_core::Endpoint; -use libp2p_core::{multiaddr::Protocol, upgrade, Multiaddr, Transport}; +use libp2p_core::{ + multiaddr::Protocol, + muxing::{StreamMuxerBox, StreamMuxerExt, SubstreamBox}, + transport::{ + Boxed, DialOpts, ListenerId, OrTransport, PortUse, TransportError, TransportEvent, + }, + upgrade, Endpoint, Multiaddr, Transport, +}; use libp2p_identity::PeerId; use libp2p_noise as noise; use libp2p_quic as quic; @@ -18,16 +33,6 @@ use libp2p_tcp as tcp; use libp2p_yamux as yamux; use quic::Provider; use rand::RngCore; -use std::future::Future; -use std::io; -use std::num::NonZeroU8; -use std::task::Poll; -use std::time::Duration; -use std::{ - pin::Pin, - sync::{Arc, Mutex}, -}; -use tracing_subscriber::EnvFilter; #[cfg(feature = "tokio")] #[tokio::test] @@ -44,9 +49,7 @@ async fn async_std_smoke() { #[cfg(feature = "tokio")] #[tokio::test] async fn endpoint_reuse() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let (_, mut a_transport) = create_default_transport::(); let (_, mut b_transport) = create_default_transport::(); @@ -71,9 +74,7 @@ async fn endpoint_reuse() { #[cfg(feature = "async-std")] #[async_std::test] async fn ipv4_dial_ipv6() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let (a_peer_id, mut a_transport) = create_default_transport::(); let (b_peer_id, mut b_transport) = create_default_transport::(); @@ -93,9 +94,7 @@ async fn ipv4_dial_ipv6() { async fn wrapped_with_delay() { use libp2p_core::transport::DialOpts; - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); struct DialDelay(Arc>>); @@ -200,7 +199,8 @@ async fn wrapped_with_delay() { #[cfg(feature = "async-std")] #[async_std::test] -#[ignore] // Transport currently does not validate PeerId. Enable once we make use of PeerId validation in rustls. +#[ignore] // Transport currently does not validate PeerId. + // Enable once we make use of PeerId validation in rustls. async fn wrong_peerid() { use libp2p_identity::PeerId; @@ -264,9 +264,7 @@ async fn tcp_and_quic() { #[cfg(feature = "async-std")] #[test] fn concurrent_connections_and_streams_async_std() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); quickcheck::QuickCheck::new() .min_tests_passed(1) @@ -277,9 +275,7 @@ fn concurrent_connections_and_streams_async_std() { #[cfg(feature = "tokio")] #[test] fn concurrent_connections_and_streams_tokio() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let rt = tokio::runtime::Runtime::new().unwrap(); let _guard = rt.enter(); @@ -296,9 +292,7 @@ async fn draft_29_support() { use futures::{future::poll_fn, select}; use libp2p_core::transport::TransportError; - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let (_, mut a_transport) = create_transport::(|cfg| cfg.support_draft_29 = true); @@ -373,9 +367,7 @@ async fn draft_29_support() { #[cfg(feature = "async-std")] #[async_std::test] async fn backpressure() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let max_stream_data = quic::Config::new(&generate_tls_keypair()).max_stream_data; let (mut stream_a, mut stream_b) = build_streams::().await; @@ -399,9 +391,7 @@ async fn backpressure() { #[cfg(feature = "async-std")] #[async_std::test] async fn read_after_peer_dropped_stream() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let (mut stream_a, mut stream_b) = build_streams::().await; let data = vec![0; 10]; @@ -421,9 +411,7 @@ async fn read_after_peer_dropped_stream() { #[async_std::test] #[should_panic] async fn write_after_peer_dropped_stream() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let (stream_a, mut stream_b) = build_streams::().await; drop(stream_a); futures_timer::Delay::new(Duration::from_millis(100)).await; @@ -477,9 +465,7 @@ async fn test_local_listener_reuse() { } async fn smoke() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let (a_peer_id, mut a_transport) = create_default_transport::

(); let (b_peer_id, mut b_transport) = create_default_transport::

(); diff --git a/transports/quic/tests/stream_compliance.rs b/transports/quic/tests/stream_compliance.rs index b0536473215..13c29f2caa0 100644 --- a/transports/quic/tests/stream_compliance.rs +++ b/transports/quic/tests/stream_compliance.rs @@ -1,10 +1,12 @@ -use futures::channel::oneshot; -use futures::StreamExt; -use libp2p_core::transport::{DialOpts, ListenerId, PortUse}; -use libp2p_core::{Endpoint, Transport}; -use libp2p_quic as quic; use std::time::Duration; +use futures::{channel::oneshot, StreamExt}; +use libp2p_core::{ + transport::{DialOpts, ListenerId, PortUse}, + Endpoint, Transport, +}; +use libp2p_quic as quic; + #[async_std::test] async fn close_implies_flush() { let (alice, bob) = connected_peers().await; diff --git a/transports/tcp/Cargo.toml b/transports/tcp/Cargo.toml index 03e7fac491c..61c31e49639 100644 --- a/transports/tcp/Cargo.toml +++ b/transports/tcp/Cargo.toml @@ -17,7 +17,6 @@ futures-timer = "3.0" if-watch = "3.2.0" libc = "0.2.155" libp2p-core = { workspace = true } -libp2p-identity = { workspace = true } socket2 = { version = "0.5.7", features = ["all"] } tokio = { workspace = true, default-features = false, features = ["net"], optional = true } tracing = { workspace = true } @@ -28,9 +27,8 @@ async-io = ["dep:async-io", "if-watch/smol"] [dev-dependencies] async-std = { version = "1.6.5", features = ["attributes"] } -libp2p-identity = { workspace = true, features = ["rand"] } tokio = { workspace = true, features = ["full"] } -tracing-subscriber = { workspace = true, features = ["env-filter"] } +libp2p-test-utils = { workspace = true } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/transports/tcp/src/lib.rs b/transports/tcp/src/lib.rs index 4c4fa7c6b84..5d3e46bcb09 100644 --- a/transports/tcp/src/lib.rs +++ b/transports/tcp/src/lib.rs @@ -30,11 +30,15 @@ mod provider; -#[cfg(feature = "async-io")] -pub use provider::async_io; - -#[cfg(feature = "tokio")] -pub use provider::tokio; +use std::{ + collections::{HashSet, VecDeque}, + io, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, TcpListener}, + pin::Pin, + sync::{Arc, RwLock}, + task::{Context, Poll, Waker}, + time::Duration, +}; use futures::{future::Ready, prelude::*, stream::SelectAll}; use futures_timer::Delay; @@ -43,17 +47,12 @@ use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, transport::{DialOpts, ListenerId, PortUse, TransportError, TransportEvent}, }; +#[cfg(feature = "async-io")] +pub use provider::async_io; +#[cfg(feature = "tokio")] +pub use provider::tokio; use provider::{Incoming, Provider}; use socket2::{Domain, Socket, Type}; -use std::{ - collections::{HashSet, VecDeque}, - io, - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, TcpListener}, - pin::Pin, - sync::{Arc, RwLock}, - task::{Context, Poll, Waker}, - time::Duration, -}; /// The configuration for a TCP/IP transport capability for libp2p. #[derive(Clone, Debug)] @@ -131,14 +130,11 @@ impl PortReuse { impl Config { /// Creates a new configuration for a TCP/IP transport: /// - /// * Nagle's algorithm, i.e. `TCP_NODELAY`, is _enabled_. - /// See [`Config::nodelay`]. - /// * Reuse of listening ports is _disabled_. - /// See [`Config::port_reuse`]. - /// * No custom `IP_TTL` is set. The default of the OS TCP stack applies. - /// See [`Config::ttl`]. - /// * The size of the listen backlog for new listening sockets is `1024`. - /// See [`Config::listen_backlog`]. + /// * Nagle's algorithm, i.e. `TCP_NODELAY`, is _enabled_. See [`Config::nodelay`]. + /// * Reuse of listening ports is _disabled_. See [`Config::port_reuse`]. + /// * No custom `IP_TTL` is set. The default of the OS TCP stack applies. See [`Config::ttl`]. + /// * The size of the listen backlog for new listening sockets is `1024`. See + /// [`Config::listen_backlog`]. pub fn new() -> Self { Self { ttl: None, @@ -241,8 +237,8 @@ where /// The configuration of port reuse when dialing. port_reuse: PortReuse, /// All the active listeners. - /// The [`ListenStream`] struct contains a stream that we want to be pinned. Since the `VecDeque` - /// can be resized, the only way is to use a `Pin>`. + /// The [`ListenStream`] struct contains a stream that we want to be pinned. Since the + /// `VecDeque` can be resized, the only way is to use a `Pin>`. listeners: SelectAll>, /// Pending transport events to return from [`libp2p_core::Transport::poll`]. pending_events: @@ -465,7 +461,8 @@ where pause: Option, /// Pending event to reported. pending_event: Option<::Item>, - /// The listener can be manually closed with [`Transport::remove_listener`](libp2p_core::Transport::remove_listener). + /// The listener can be manually closed with + /// [`Transport::remove_listener`](libp2p_core::Transport::remove_listener). is_closed: bool, /// The stream must be awaken after it has been closed to deliver the last event. close_listener_waker: Option, @@ -621,7 +618,8 @@ where } if self.is_closed { - // Terminate the stream if the listener closed and all remaining events have been reported. + // Terminate the stream if the listener closed + // and all remaining events have been reported. return Poll::Ready(None); } @@ -705,13 +703,13 @@ fn ip_to_multiaddr(ip: IpAddr, port: u16) -> Multiaddr { #[cfg(test)] mod tests { - use super::*; use futures::{ channel::{mpsc, oneshot}, future::poll_fn, }; - use libp2p_core::Endpoint; - use libp2p_core::Transport as _; + use libp2p_core::{Endpoint, Transport as _}; + + use super::*; #[test] fn multiaddr_to_tcp_conversion() { @@ -764,9 +762,7 @@ mod tests { #[test] fn communicating_between_dialer_and_listener() { - let _ = tracing_subscriber::fmt() - .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); async fn listener(addr: Multiaddr, mut ready_tx: mpsc::Sender) { let mut tcp = Transport::::default().boxed(); @@ -845,9 +841,7 @@ mod tests { #[test] fn wildcard_expansion() { - let _ = tracing_subscriber::fmt() - .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); async fn listener(addr: Multiaddr, mut ready_tx: mpsc::Sender) { let mut tcp = Transport::::default().boxed(); @@ -925,9 +919,7 @@ mod tests { #[test] fn port_reuse_dialing() { - let _ = tracing_subscriber::fmt() - .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); async fn listener( addr: Multiaddr, @@ -1044,9 +1036,7 @@ mod tests { #[test] fn port_reuse_listening() { - let _ = tracing_subscriber::fmt() - .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); async fn listen_twice(addr: Multiaddr) { let mut tcp = Transport::::new(Config::new()); @@ -1100,9 +1090,7 @@ mod tests { #[test] fn listen_port_0() { - let _ = tracing_subscriber::fmt() - .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); async fn listen(addr: Multiaddr) -> Multiaddr { let mut tcp = Transport::::default().boxed(); @@ -1137,9 +1125,7 @@ mod tests { #[test] fn listen_invalid_addr() { - let _ = tracing_subscriber::fmt() - .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); fn test(addr: Multiaddr) { #[cfg(feature = "async-io")] @@ -1160,9 +1146,7 @@ mod tests { #[test] fn test_remove_listener() { - let _ = tracing_subscriber::fmt() - .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); async fn cycle_listeners() -> bool { let mut tcp = Transport::::default().boxed(); diff --git a/transports/tcp/src/provider.rs b/transports/tcp/src/provider.rs index d94da7a6fc3..7a609d9f031 100644 --- a/transports/tcp/src/provider.rs +++ b/transports/tcp/src/provider.rs @@ -26,13 +26,18 @@ pub mod async_io; #[cfg(feature = "tokio")] pub mod tokio; -use futures::future::BoxFuture; -use futures::io::{AsyncRead, AsyncWrite}; -use futures::Stream; +use std::{ + fmt, io, + net::{SocketAddr, TcpListener, TcpStream}, + task::{Context, Poll}, +}; + +use futures::{ + future::BoxFuture, + io::{AsyncRead, AsyncWrite}, + Stream, +}; use if_watch::{IfEvent, IpNet}; -use std::net::{SocketAddr, TcpListener, TcpStream}; -use std::task::{Context, Poll}; -use std::{fmt, io}; /// An incoming connection returned from [`Provider::poll_accept()`]. pub struct Incoming { diff --git a/transports/tcp/src/provider/async_io.rs b/transports/tcp/src/provider/async_io.rs index fe0abe42d54..4df9d928fbb 100644 --- a/transports/tcp/src/provider/async_io.rs +++ b/transports/tcp/src/provider/async_io.rs @@ -18,13 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::{Incoming, Provider}; +use std::{ + io, net, + task::{Context, Poll}, +}; use async_io::Async; use futures::future::{BoxFuture, FutureExt}; -use std::io; -use std::net; -use std::task::{Context, Poll}; + +use super::{Incoming, Provider}; /// A TCP [`Transport`](libp2p_core::Transport) that works with the `async-std` ecosystem. /// @@ -40,9 +42,14 @@ use std::task::{Context, Poll}; /// # async fn main() { /// let mut transport = tcp::async_io::Transport::new(tcp::Config::default()); /// let id = ListenerId::next(); -/// transport.listen_on(id, "/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap(); +/// transport +/// .listen_on(id, "/ip4/127.0.0.1/tcp/0".parse().unwrap()) +/// .unwrap(); /// -/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)).await.into_new_address().unwrap(); +/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)) +/// .await +/// .into_new_address() +/// .unwrap(); /// /// println!("Listening on {addr}"); /// # } diff --git a/transports/tcp/src/provider/tokio.rs b/transports/tcp/src/provider/tokio.rs index ec2d098e3fb..a96c4dba858 100644 --- a/transports/tcp/src/provider/tokio.rs +++ b/transports/tcp/src/provider/tokio.rs @@ -18,16 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::{Incoming, Provider}; +use std::{ + io, net, + pin::Pin, + task::{Context, Poll}, +}; use futures::{ future::{BoxFuture, FutureExt}, prelude::*, }; -use std::io; -use std::net; -use std::pin::Pin; -use std::task::{Context, Poll}; + +use super::{Incoming, Provider}; /// A TCP [`Transport`](libp2p_core::Transport) that works with the `tokio` ecosystem. /// @@ -42,9 +44,14 @@ use std::task::{Context, Poll}; /// # #[tokio::main] /// # async fn main() { /// let mut transport = tcp::tokio::Transport::new(tcp::Config::default()); -/// let id = transport.listen_on(ListenerId::next(), "/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap(); +/// let id = transport +/// .listen_on(ListenerId::next(), "/ip4/127.0.0.1/tcp/0".parse().unwrap()) +/// .unwrap(); /// -/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)).await.into_new_address().unwrap(); +/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)) +/// .await +/// .into_new_address() +/// .unwrap(); /// /// println!("Listening on {addr}"); /// # } diff --git a/transports/tls/Cargo.toml b/transports/tls/Cargo.toml index fce76e2aa79..7702a4361b1 100644 --- a/transports/tls/Cargo.toml +++ b/transports/tls/Cargo.toml @@ -28,7 +28,6 @@ features = ["ring", "std"] # Must enable this to allow for custom verification c [dev-dependencies] -hex = "0.4.3" hex-literal = "0.4.1" libp2p-core = { workspace = true } libp2p-identity = { workspace = true, features = ["ed25519", "rsa", "secp256k1", "ecdsa", "rand"] } diff --git a/transports/tls/src/certificate.rs b/transports/tls/src/certificate.rs index 65b373bcf9b..3e7eeb22bf3 100644 --- a/transports/tls/src/certificate.rs +++ b/transports/tls/src/certificate.rs @@ -22,12 +22,12 @@ //! //! This module handles generation, signing, and verification of certificates. +use std::sync::Arc; + use libp2p_identity as identity; use libp2p_identity::PeerId; use x509_parser::{prelude::*, signature_algorithm::SignatureAlgorithm}; -use std::sync::Arc; - /// The libp2p Public Key Extension is a X.509 extension /// with the Object Identifier 1.3.6.1.4.1.53594.1.1, /// allocated by IANA to the libp2p project at Protocol Labs. @@ -283,8 +283,8 @@ impl P2pCertificate<'_> { self.extension.public_key.to_peer_id() } - /// Verify the `signature` of the `message` signed by the private key corresponding to the public key stored - /// in the certificate. + /// Verify the `signature` of the `message` signed by the private key corresponding to the + /// public key stored in the certificate. pub fn verify_signature( &self, signature_scheme: rustls::SignatureScheme, @@ -492,9 +492,10 @@ impl P2pCertificate<'_> { #[cfg(test)] mod tests { - use super::*; use hex_literal::hex; + use super::*; + #[test] fn sanity_check() { let keypair = identity::Keypair::generate_ed25519(); diff --git a/transports/tls/src/lib.rs b/transports/tls/src/lib.rs index 3aa66db12b3..57d7d69d4bd 100644 --- a/transports/tls/src/lib.rs +++ b/transports/tls/src/lib.rs @@ -29,14 +29,12 @@ pub mod certificate; mod upgrade; mod verifier; -use certificate::AlwaysResolvesCert; -use libp2p_identity::Keypair; -use libp2p_identity::PeerId; use std::sync::Arc; +use certificate::AlwaysResolvesCert; pub use futures_rustls::TlsStream; -pub use upgrade::Config; -pub use upgrade::UpgradeError; +use libp2p_identity::{Keypair, PeerId}; +pub use upgrade::{Config, UpgradeError}; const P2P_ALPN: [u8; 6] = *b"libp2p"; diff --git a/transports/tls/src/upgrade.rs b/transports/tls/src/upgrade.rs index 1c61d265ea6..a6d81ab36c9 100644 --- a/transports/tls/src/upgrade.rs +++ b/transports/tls/src/upgrade.rs @@ -18,20 +18,22 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::certificate; -use crate::certificate::P2pCertificate; -use futures::future::BoxFuture; -use futures::AsyncWrite; -use futures::{AsyncRead, FutureExt}; +use std::{ + net::{IpAddr, Ipv4Addr}, + sync::Arc, +}; + +use futures::{future::BoxFuture, AsyncRead, AsyncWrite, FutureExt}; use futures_rustls::TlsStream; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; -use libp2p_core::UpgradeInfo; +use libp2p_core::{ + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, + UpgradeInfo, +}; use libp2p_identity as identity; use libp2p_identity::PeerId; use rustls::{pki_types::ServerName, CommonState}; -use std::net::{IpAddr, Ipv4Addr}; -use std::sync::Arc; +use crate::{certificate, certificate::P2pCertificate}; #[derive(thiserror::Error, Debug)] pub enum UpgradeError { @@ -102,8 +104,10 @@ where fn upgrade_outbound(self, socket: C, _: Self::Info) -> Self::Future { async move { - // Spec: In order to keep this flexibility for future versions, clients that only support the version of the handshake defined in this document MUST NOT send any value in the Server Name Indication. - // Setting `ServerName` to unspecified will disable the use of the SNI extension. + // Spec: In order to keep this flexibility for future versions, clients that only + // support the version of the handshake defined in this document MUST NOT send any value + // in the Server Name Indication. Setting `ServerName` to unspecified will + // disable the use of the SNI extension. let name = ServerName::IpAddress(rustls::pki_types::IpAddr::from(IpAddr::V4( Ipv4Addr::UNSPECIFIED, ))); diff --git a/transports/tls/src/verifier.rs b/transports/tls/src/verifier.rs index 65636cbe708..82b275bc7be 100644 --- a/transports/tls/src/verifier.rs +++ b/transports/tls/src/verifier.rs @@ -23,7 +23,8 @@ //! This module handles a verification of a client/server certificate chain //! and signatures allegedly by the given certificates. -use crate::certificate; +use std::sync::Arc; + use libp2p_identity::PeerId; use rustls::{ client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier}, @@ -35,7 +36,8 @@ use rustls::{ CertificateError, DigitallySignedStruct, DistinguishedName, OtherError, SignatureScheme, SupportedCipherSuite, SupportedProtocolVersion, }; -use std::sync::Arc; + +use crate::certificate; /// The protocol versions supported by this verifier. /// @@ -67,8 +69,8 @@ pub(crate) struct Libp2pCertificateVerifier { /// /// - Exactly one certificate must be presented. /// - The certificate must be self-signed. -/// - The certificate must have a valid libp2p extension that includes a -/// signature of its public key. +/// - The certificate must have a valid libp2p extension that includes a signature of its public +/// key. impl Libp2pCertificateVerifier { pub(crate) fn new() -> Self { Self { @@ -153,11 +155,11 @@ impl ServerCertVerifier for Libp2pCertificateVerifier { /// libp2p requires the following of X.509 client certificate chains: /// -/// - Exactly one certificate must be presented. In particular, client -/// authentication is mandatory in libp2p. +/// - Exactly one certificate must be presented. In particular, client authentication is mandatory +/// in libp2p. /// - The certificate must be self-signed. -/// - The certificate must have a valid libp2p extension that includes a -/// signature of its public key. +/// - The certificate must have a valid libp2p extension that includes a signature of its public +/// key. impl ClientCertVerifier for Libp2pCertificateVerifier { fn offer_client_auth(&self) -> bool { true diff --git a/transports/tls/tests/smoke.rs b/transports/tls/tests/smoke.rs index d488ae7846a..e335f68a7e4 100644 --- a/transports/tls/tests/smoke.rs +++ b/transports/tls/tests/smoke.rs @@ -1,10 +1,6 @@ use futures::{future, StreamExt}; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::transport::MemoryTransport; -use libp2p_core::upgrade::Version; -use libp2p_core::Transport; +use libp2p_core::{multiaddr::Protocol, transport::MemoryTransport, upgrade::Version, Transport}; use libp2p_swarm::{dummy, Config, Swarm, SwarmEvent}; -use std::time::Duration; #[tokio::test] async fn can_establish_connection() { @@ -69,6 +65,6 @@ fn make_swarm() -> Swarm { transport, dummy::Behaviour, identity.public().to_peer_id(), - Config::with_tokio_executor().with_idle_connection_timeout(Duration::from_secs(60)), + Config::with_tokio_executor(), ) } diff --git a/transports/uds/src/lib.rs b/transports/uds/src/lib.rs index 5c57e255b4d..74e19476595 100644 --- a/transports/uds/src/lib.rs +++ b/transports/uds/src/lib.rs @@ -38,21 +38,24 @@ ))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use futures::stream::BoxStream; +use std::{ + collections::VecDeque, + io, + path::PathBuf, + pin::Pin, + task::{Context, Poll}, +}; + use futures::{ future::{BoxFuture, Ready}, prelude::*, + stream::BoxStream, }; -use libp2p_core::transport::ListenerId; use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, - transport::{DialOpts, TransportError, TransportEvent}, + transport::{DialOpts, ListenerId, TransportError, TransportEvent}, Transport, }; -use std::collections::VecDeque; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::{io, path::PathBuf}; pub type Listener = BoxStream< 'static, @@ -241,14 +244,16 @@ fn multiaddr_to_path(addr: &Multiaddr) -> Result { #[cfg(all(test, feature = "async-std"))] mod tests { - use super::{multiaddr_to_path, UdsConfig}; + use std::{borrow::Cow, path::Path}; + use futures::{channel::oneshot, prelude::*}; use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, transport::{DialOpts, ListenerId, PortUse}, Endpoint, Transport, }; - use std::{borrow::Cow, path::Path}; + + use super::{multiaddr_to_path, UdsConfig}; #[test] fn multiaddr_to_path_conversion() { diff --git a/transports/webrtc-websys/Cargo.toml b/transports/webrtc-websys/Cargo.toml index 4663913c849..6d42d74f610 100644 --- a/transports/webrtc-websys/Cargo.toml +++ b/transports/webrtc-websys/Cargo.toml @@ -14,7 +14,7 @@ publish = true [dependencies] bytes = "1" futures = { workspace = true } -getrandom = { version = "0.2.15", features = ["js"] } +getrandom = { workspace = true, features = ["js"] } hex = "0.4.3" js-sys = { version = "0.3" } libp2p-core = { workspace = true } diff --git a/transports/webrtc-websys/src/connection.rs b/transports/webrtc-websys/src/connection.rs index d0c6ccd2238..01c1a8b3b60 100644 --- a/transports/webrtc-websys/src/connection.rs +++ b/transports/webrtc-websys/src/connection.rs @@ -1,17 +1,15 @@ //! A libp2p connection backed by an [RtcPeerConnection](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection). -use super::{Error, Stream}; -use crate::stream::DropListener; -use futures::channel::mpsc; -use futures::stream::FuturesUnordered; -use futures::StreamExt; +use std::{ + pin::Pin, + task::{ready, Context, Poll, Waker}, +}; + +use futures::{channel::mpsc, stream::FuturesUnordered, StreamExt}; use js_sys::{Object, Reflect}; use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; use libp2p_webrtc_utils::Fingerprint; use send_wrapper::SendWrapper; -use std::pin::Pin; -use std::task::Waker; -use std::task::{ready, Context, Poll}; use wasm_bindgen::prelude::*; use wasm_bindgen_futures::JsFuture; use web_sys::{ @@ -19,6 +17,9 @@ use web_sys::{ RtcSessionDescriptionInit, }; +use super::{Error, Stream}; +use crate::stream::DropListener; + /// A WebRTC Connection. /// /// All connections need to be [`Send`] which is why some fields are wrapped in [`SendWrapper`]. @@ -31,7 +32,8 @@ pub struct Connection { closed: bool, /// An [`mpsc::channel`] for all inbound data channels. /// - /// Because the browser's WebRTC API is event-based, we need to use a channel to obtain all inbound data channels. + /// Because the browser's WebRTC API is event-based, we need to use a channel to obtain all + /// inbound data channels. inbound_data_channels: SendWrapper>, /// A list of futures, which, once completed, signal that a [`Stream`] has been dropped. drop_listeners: FuturesUnordered, @@ -43,7 +45,8 @@ pub struct Connection { impl Connection { /// Create a new inner WebRTC Connection pub(crate) fn new(peer_connection: RtcPeerConnection) -> Self { - // An ondatachannel Future enables us to poll for incoming data channel events in poll_incoming + // An ondatachannel Future enables us to poll for incoming data channel events in + // poll_incoming let (mut tx_ondatachannel, rx_ondatachannel) = mpsc::channel(4); // we may get more than one data channel opened on a single peer connection let ondatachannel_closure = Closure::new(move |ev: RtcDataChannelEvent| { @@ -120,7 +123,8 @@ impl StreamMuxer for Connection { Poll::Ready(Ok(stream)) } None => { - // This only happens if the [`RtcPeerConnection::ondatachannel`] closure gets freed which means we are most likely shutting down the connection. + // This only happens if the [`RtcPeerConnection::ondatachannel`] closure gets freed + // which means we are most likely shutting down the connection. tracing::debug!("`Sender` for inbound data channels has been dropped"); Poll::Ready(Err(Error::Connection("connection closed".to_owned()))) } diff --git a/transports/webrtc-websys/src/lib.rs b/transports/webrtc-websys/src/lib.rs index 04fced4111b..07207eb0ae8 100644 --- a/transports/webrtc-websys/src/lib.rs +++ b/transports/webrtc-websys/src/lib.rs @@ -7,7 +7,9 @@ mod stream; mod transport; mod upgrade; -pub use self::connection::Connection; -pub use self::error::Error; -pub use self::stream::Stream; -pub use self::transport::{Config, Transport}; +pub use self::{ + connection::Connection, + error::Error, + stream::Stream, + transport::{Config, Transport}, +}; diff --git a/transports/webrtc-websys/src/sdp.rs b/transports/webrtc-websys/src/sdp.rs index 9e63fd92462..628043111ee 100644 --- a/transports/webrtc-websys/src/sdp.rs +++ b/transports/webrtc-websys/src/sdp.rs @@ -1,5 +1,6 @@ -use libp2p_webrtc_utils::Fingerprint; use std::net::SocketAddr; + +use libp2p_webrtc_utils::Fingerprint; use web_sys::{RtcSdpType, RtcSessionDescriptionInit}; /// Creates the SDP answer used by the client. diff --git a/transports/webrtc-websys/src/stream.rs b/transports/webrtc-websys/src/stream.rs index 812aa5afbbf..ee0183b07f0 100644 --- a/transports/webrtc-websys/src/stream.rs +++ b/transports/webrtc-websys/src/stream.rs @@ -1,11 +1,15 @@ //! The WebRTC [Stream] over the Connection -use self::poll_data_channel::PollDataChannel; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + use futures::{AsyncRead, AsyncWrite}; use send_wrapper::SendWrapper; -use std::pin::Pin; -use std::task::{Context, Poll}; use web_sys::RtcDataChannel; +use self::poll_data_channel::PollDataChannel; + mod poll_data_channel; /// A stream over a WebRTC connection. diff --git a/transports/webrtc-websys/src/stream/poll_data_channel.rs b/transports/webrtc-websys/src/stream/poll_data_channel.rs index 3ec744342eb..2abe499afce 100644 --- a/transports/webrtc-websys/src/stream/poll_data_channel.rs +++ b/transports/webrtc-websys/src/stream/poll_data_channel.rs @@ -1,19 +1,23 @@ -use std::cmp::min; -use std::io; -use std::pin::Pin; -use std::rc::Rc; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Mutex; -use std::task::{Context, Poll}; +use std::{ + cmp::min, + io, + pin::Pin, + rc::Rc, + sync::{ + atomic::{AtomicBool, Ordering}, + Mutex, + }, + task::{Context, Poll}, +}; use bytes::BytesMut; -use futures::task::AtomicWaker; -use futures::{AsyncRead, AsyncWrite}; +use futures::{task::AtomicWaker, AsyncRead, AsyncWrite}; use libp2p_webrtc_utils::MAX_MSG_LEN; use wasm_bindgen::prelude::*; use web_sys::{Event, MessageEvent, RtcDataChannel, RtcDataChannelEvent, RtcDataChannelState}; -/// [`PollDataChannel`] is a wrapper around [`RtcDataChannel`] which implements [`AsyncRead`] and [`AsyncWrite`]. +/// [`PollDataChannel`] is a wrapper around [`RtcDataChannel`] which implements [`AsyncRead`] and +/// [`AsyncWrite`]. #[derive(Debug, Clone)] pub(crate) struct PollDataChannel { /// The [`RtcDataChannel`] being wrapped. @@ -25,7 +29,8 @@ pub(crate) struct PollDataChannel { /// Waker for when we are waiting for the DC to be opened. open_waker: Rc, - /// Waker for when we are waiting to write (again) to the DC because we previously exceeded the [`MAX_MSG_LEN`] threshold. + /// Waker for when we are waiting to write (again) to the DC because we previously exceeded the + /// [`MAX_MSG_LEN`] threshold. write_waker: Rc, /// Waker for when we are waiting for the DC to be closed. @@ -33,9 +38,11 @@ pub(crate) struct PollDataChannel { /// Whether we've been overloaded with data by the remote. /// - /// This is set to `true` in case `read_buffer` overflows, i.e. the remote is sending us messages faster than we can read them. - /// In that case, we return an [`std::io::Error`] from [`AsyncRead`] or [`AsyncWrite`], depending which one gets called earlier. - /// Failing these will (very likely), cause the application developer to drop the stream which resets it. + /// This is set to `true` in case `read_buffer` overflows, i.e. the remote is sending us + /// messages faster than we can read them. In that case, we return an [`std::io::Error`] + /// from [`AsyncRead`] or [`AsyncWrite`], depending which one gets called earlier. + /// Failing these will (very likely), + /// cause the application developer to drop the stream which resets it. overloaded: Rc, // Store the closures for proper garbage collection. @@ -83,7 +90,9 @@ impl PollDataChannel { inner.set_onclose(Some(on_close_closure.as_ref().unchecked_ref())); let new_data_waker = Rc::new(AtomicWaker::new()); - let read_buffer = Rc::new(Mutex::new(BytesMut::new())); // We purposely don't use `with_capacity` so we don't eagerly allocate `MAX_READ_BUFFER` per stream. + // We purposely don't use `with_capacity` + // so we don't eagerly allocate `MAX_READ_BUFFER` per stream. + let read_buffer = Rc::new(Mutex::new(BytesMut::new())); let overloaded = Rc::new(AtomicBool::new(false)); let on_message_closure = Closure::::new({ diff --git a/transports/webrtc-websys/src/transport.rs b/transports/webrtc-websys/src/transport.rs index 836acb0b9f6..abf02520244 100644 --- a/transports/webrtc-websys/src/transport.rs +++ b/transports/webrtc-websys/src/transport.rs @@ -1,15 +1,18 @@ -use super::upgrade; -use super::Connection; -use super::Error; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + use futures::future::FutureExt; -use libp2p_core::multiaddr::Multiaddr; -use libp2p_core::muxing::StreamMuxerBox; -use libp2p_core::transport::DialOpts; -use libp2p_core::transport::{Boxed, ListenerId, Transport as _, TransportError, TransportEvent}; +use libp2p_core::{ + multiaddr::Multiaddr, + muxing::StreamMuxerBox, + transport::{Boxed, DialOpts, ListenerId, Transport as _, TransportError, TransportEvent}, +}; use libp2p_identity::{Keypair, PeerId}; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; + +use super::{upgrade, Connection, Error}; /// Config for the [`Transport`]. #[derive(Clone)] diff --git a/transports/webrtc-websys/src/upgrade.rs b/transports/webrtc-websys/src/upgrade.rs index d42f2e3ae18..b1de908ae82 100644 --- a/transports/webrtc-websys/src/upgrade.rs +++ b/transports/webrtc-websys/src/upgrade.rs @@ -1,13 +1,11 @@ -use super::Error; -use crate::connection::RtcPeerConnection; -use crate::error::AuthenticationError; -use crate::sdp; -use crate::Connection; +use std::net::SocketAddr; + use libp2p_identity::{Keypair, PeerId}; -use libp2p_webrtc_utils::noise; -use libp2p_webrtc_utils::Fingerprint; +use libp2p_webrtc_utils::{noise, Fingerprint}; use send_wrapper::SendWrapper; -use std::net::SocketAddr; + +use super::Error; +use crate::{connection::RtcPeerConnection, error::AuthenticationError, sdp, Connection}; /// Upgrades an outbound WebRTC connection by creating the data channel /// and conducting a Noise handshake diff --git a/transports/webrtc/Cargo.toml b/transports/webrtc/Cargo.toml index 4197a9419d8..d43be5720d4 100644 --- a/transports/webrtc/Cargo.toml +++ b/transports/webrtc/Cargo.toml @@ -12,7 +12,6 @@ categories = ["network-programming", "asynchronous"] [dependencies] async-trait = "0.1" -bytes = "1" futures = { workspace = true } futures-timer = "3" hex = "0.4" @@ -24,14 +23,13 @@ libp2p-webrtc-utils = { workspace = true } multihash = { workspace = true } rand = "0.8" rcgen = { workspace = true } -serde = { version = "1.0", features = ["derive"] } -stun = "0.6" +stun = "0.7" thiserror = { workspace = true } -tinytemplate = "1.2" tokio = { workspace = true, features = ["net"], optional = true } tokio-util = { version = "0.7", features = ["compat"], optional = true } tracing = { workspace = true } webrtc = { version = "0.9.0", optional = true } +webrtc-ice = "=0.10.0" # smoke tests only work with this version [features] tokio = ["dep:tokio", "dep:tokio-util", "dep:webrtc", "if-watch/tokio"] @@ -41,8 +39,7 @@ pem = ["webrtc?/pem"] libp2p-identity = { workspace = true, features = ["rand"] } tokio = { workspace = true, features = ["full"] } quickcheck = "1.0.3" -tracing-subscriber = { workspace = true, features = ["env-filter"] } - +libp2p-test-utils = { workspace = true } [[test]] name = "smoke" diff --git a/transports/webrtc/src/lib.rs b/transports/webrtc/src/lib.rs index ea1e6a4d646..99f0c7da658 100644 --- a/transports/webrtc/src/lib.rs +++ b/transports/webrtc/src/lib.rs @@ -23,7 +23,7 @@ //! //! # Overview //! -//! ## ICE +//!  ## ICE //! //! RFCs: 8839, 8445 See also: //! @@ -39,10 +39,9 @@ //! //! The ICE workflow works as follows: //! -//! - An "offerer" determines ways in which it could be accessible (either an -//! IP address or through a relay using a TURN server), which are called "candidates". It then -//! generates a small text payload in a format called SDP, that describes the request for a -//! connection. +//! - An "offerer" determines ways in which it could be accessible (either an IP address or through +//! a relay using a TURN server), which are called "candidates". It then generates a small text +//! payload in a format called SDP, that describes the request for a connection. //! - The offerer sends this SDP-encoded message to the answerer. The medium through which this //! exchange is done is out of scope of the ICE protocol. //! - The answerer then finds its own candidates, and generates an answer, again in the SDP format. diff --git a/transports/webrtc/src/tokio/certificate.rs b/transports/webrtc/src/tokio/certificate.rs index 81197af4132..7ff35d46bdd 100644 --- a/transports/webrtc/src/tokio/certificate.rs +++ b/transports/webrtc/src/tokio/certificate.rs @@ -100,9 +100,10 @@ enum Kind { #[cfg(all(test, feature = "pem"))] mod test { - use super::*; use rand::thread_rng; + use super::*; + #[test] fn test_certificate_serialize_pem_and_from_pem() { let cert = Certificate::generate(&mut thread_rng()).unwrap(); diff --git a/transports/webrtc/src/tokio/connection.rs b/transports/webrtc/src/tokio/connection.rs index 3bcc4c3193e..19232707e7f 100644 --- a/transports/webrtc/src/tokio/connection.rs +++ b/transports/webrtc/src/tokio/connection.rs @@ -18,26 +18,27 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::stream::FuturesUnordered; +use std::{ + pin::Pin, + sync::Arc, + task::{Context, Poll, Waker}, +}; + use futures::{ channel::{ mpsc, oneshot::{self, Sender}, }, + future::BoxFuture, lock::Mutex as FutMutex, + ready, + stream::FuturesUnordered, StreamExt, - {future::BoxFuture, ready}, }; use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; -use webrtc::data::data_channel::DataChannel as DetachedDataChannel; -use webrtc::data_channel::RTCDataChannel; -use webrtc::peer_connection::RTCPeerConnection; - -use std::task::Waker; -use std::{ - pin::Pin, - sync::Arc, - task::{Context, Poll}, +use webrtc::{ + data::data_channel::DataChannel as DetachedDataChannel, data_channel::RTCDataChannel, + peer_connection::RTCPeerConnection, }; use crate::tokio::{error::Error, stream, stream::Stream}; @@ -172,7 +173,9 @@ impl StreamMuxer for Connection { "Sender-end of channel should be owned by `RTCPeerConnection`" ); - Poll::Pending // Return `Pending` without registering a waker: If the channel is closed, we don't need to be called anymore. + // Return `Pending` without registering a waker: If the channel is + // closed, we don't need to be called anymore. + Poll::Pending } } } diff --git a/transports/webrtc/src/tokio/req_res_chan.rs b/transports/webrtc/src/tokio/req_res_chan.rs index fb29e16db27..a733c86d5cc 100644 --- a/transports/webrtc/src/tokio/req_res_chan.rs +++ b/transports/webrtc/src/tokio/req_res_chan.rs @@ -18,16 +18,16 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::{ - channel::{mpsc, oneshot}, - SinkExt, StreamExt, -}; - use std::{ io, task::{Context, Poll}, }; +use futures::{ + channel::{mpsc, oneshot}, + SinkExt, StreamExt, +}; + pub(crate) fn new(capacity: usize) -> (Sender, Receiver) { let (sender, receiver) = mpsc::channel(capacity); diff --git a/transports/webrtc/src/tokio/sdp.rs b/transports/webrtc/src/tokio/sdp.rs index 4be4c19f188..d9f869d4433 100644 --- a/transports/webrtc/src/tokio/sdp.rs +++ b/transports/webrtc/src/tokio/sdp.rs @@ -18,10 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -pub(crate) use libp2p_webrtc_utils::sdp::random_ufrag; -use libp2p_webrtc_utils::sdp::render_description; -use libp2p_webrtc_utils::Fingerprint; use std::net::SocketAddr; + +pub(crate) use libp2p_webrtc_utils::sdp::random_ufrag; +use libp2p_webrtc_utils::{sdp::render_description, Fingerprint}; use webrtc::peer_connection::sdp::session_description::RTCSessionDescription; /// Creates the SDP answer used by the client. diff --git a/transports/webrtc/src/tokio/stream.rs b/transports/webrtc/src/tokio/stream.rs index 4278a751e27..9d5a9faf440 100644 --- a/transports/webrtc/src/tokio/stream.rs +++ b/transports/webrtc/src/tokio/stream.rs @@ -40,8 +40,8 @@ pub struct Stream { pub(crate) type DropListener = libp2p_webrtc_utils::DropListener>; impl Stream { - /// Returns a new `Substream` and a listener, which will notify the receiver when/if the substream - /// is dropped. + /// Returns a new `Substream` and a listener, which will notify the receiver when/if the + /// substream is dropped. pub(crate) fn new(data_channel: Arc) -> (Self, DropListener) { let mut data_channel = PollDataChannel::new(data_channel).compat(); data_channel.get_mut().set_read_buf_capacity(MAX_MSG_LEN); diff --git a/transports/webrtc/src/tokio/transport.rs b/transports/webrtc/src/tokio/transport.rs index 62049c8f59b..29fad180d93 100644 --- a/transports/webrtc/src/tokio/transport.rs +++ b/transports/webrtc/src/tokio/transport.rs @@ -18,6 +18,13 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + io, + net::{IpAddr, SocketAddr}, + pin::Pin, + task::{Context, Poll, Waker}, +}; + use futures::{future::BoxFuture, prelude::*, stream::SelectAll}; use if_watch::{tokio::IfWatcher, IfEvent}; use libp2p_core::{ @@ -28,14 +35,6 @@ use libp2p_identity as identity; use libp2p_identity::PeerId; use webrtc::peer_connection::configuration::RTCConfiguration; -use std::net::IpAddr; -use std::{ - io, - net::SocketAddr, - pin::Pin, - task::{Context, Poll, Waker}, -}; - use crate::tokio::{ certificate::Certificate, connection::Connection, @@ -60,8 +59,8 @@ impl Transport { /// /// ``` /// use libp2p_identity as identity; + /// use libp2p_webrtc::tokio::{Certificate, Transport}; /// use rand::thread_rng; - /// use libp2p_webrtc::tokio::{Transport, Certificate}; /// /// let id_keys = identity::Keypair::generate_ed25519(); /// let transport = Transport::new(id_keys, Certificate::generate(&mut thread_rng()).unwrap()); @@ -124,8 +123,8 @@ impl libp2p_core::Transport for Transport { dial_opts: DialOpts, ) -> Result> { if dial_opts.role.is_listener() { - // TODO: As the listener of a WebRTC hole punch, we need to send a random UDP packet to the - // `addr`. See DCUtR specification below. + // TODO: As the listener of a WebRTC hole punch, we need to send a random UDP packet to + // the `addr`. See DCUtR specification below. // // https://github.com/libp2p/specs/blob/master/relay/DCUtR.md#the-protocol tracing::warn!("WebRTC hole punch is not yet supported"); @@ -426,11 +425,13 @@ fn parse_webrtc_listen_addr(addr: &Multiaddr) -> Option { #[cfg(test)] mod tests { - use super::*; + use std::net::Ipv6Addr; + use futures::future::poll_fn; use libp2p_core::Transport as _; use rand::thread_rng; - use std::net::Ipv6Addr; + + use super::*; #[test] fn missing_webrtc_protocol() { diff --git a/transports/webrtc/src/tokio/udp_mux.rs b/transports/webrtc/src/tokio/udp_mux.rs index 7a8d960826d..dcb88592c9b 100644 --- a/transports/webrtc/src/tokio/udp_mux.rs +++ b/transports/webrtc/src/tokio/udp_mux.rs @@ -18,6 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + collections::{HashMap, HashSet}, + io, + io::ErrorKind, + net::SocketAddr, + sync::Arc, + task::{Context, Poll}, +}; + use async_trait::async_trait; use futures::{ channel::oneshot, @@ -31,16 +40,9 @@ use stun::{ }; use thiserror::Error; use tokio::{io::ReadBuf, net::UdpSocket}; -use webrtc::ice::udp_mux::{UDPMux, UDPMuxConn, UDPMuxConnParams, UDPMuxWriter}; -use webrtc::util::{Conn, Error}; - -use std::{ - collections::{HashMap, HashSet}, - io, - io::ErrorKind, - net::SocketAddr, - sync::Arc, - task::{Context, Poll}, +use webrtc::{ + ice::udp_mux::{UDPMux, UDPMuxConn, UDPMuxConnParams, UDPMuxWriter}, + util::{Conn, Error}, }; use crate::tokio::req_res_chan; @@ -303,8 +305,8 @@ impl UDPMuxNewAddr { if let Poll::Ready(Some((ufrag, response))) = self.remove_conn_command.poll_next_unpin(cx) { - // Pion's ice implementation has both `RemoveConnByFrag` and `RemoveConn`, but since `conns` - // is keyed on `ufrag` their implementation is equivalent. + // Pion's ice implementation has both `RemoveConnByFrag` and `RemoveConn`, but since + // `conns` is keyed on `ufrag` their implementation is equivalent. if let Some(removed_conn) = self.conns.remove(&ufrag) { for address in removed_conn.get_addresses() { @@ -336,8 +338,9 @@ impl UDPMuxNewAddr { let conn = self.address_map.get(&addr); let conn = match conn { - // If we couldn't find the connection based on source address, see if - // this is a STUN message and if so if we can find the connection based on ufrag. + // If we couldn't find the connection based on source address, see + // if this is a STUN message and if + // so if we can find the connection based on ufrag. None if is_stun_message(read.filled()) => { match self.conn_from_stun_message(read.filled(), &addr) { Some(Ok(s)) => Some(s), diff --git a/transports/webrtc/src/tokio/upgrade.rs b/transports/webrtc/src/tokio/upgrade.rs index 4145a5e7510..9293a474084 100644 --- a/transports/webrtc/src/tokio/upgrade.rs +++ b/transports/webrtc/src/tokio/upgrade.rs @@ -18,27 +18,23 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use libp2p_webrtc_utils::{noise, Fingerprint}; +use std::{net::SocketAddr, sync::Arc, time::Duration}; -use futures::channel::oneshot; -use futures::future::Either; +use futures::{channel::oneshot, future::Either}; use futures_timer::Delay; use libp2p_identity as identity; use libp2p_identity::PeerId; -use std::{net::SocketAddr, sync::Arc, time::Duration}; -use webrtc::api::setting_engine::SettingEngine; -use webrtc::api::APIBuilder; -use webrtc::data::data_channel::DataChannel; -use webrtc::data_channel::data_channel_init::RTCDataChannelInit; -use webrtc::dtls_transport::dtls_role::DTLSRole; -use webrtc::ice::network_type::NetworkType; -use webrtc::ice::udp_mux::UDPMux; -use webrtc::ice::udp_network::UDPNetwork; -use webrtc::peer_connection::configuration::RTCConfiguration; -use webrtc::peer_connection::RTCPeerConnection; - -use crate::tokio::sdp::random_ufrag; -use crate::tokio::{error::Error, sdp, stream::Stream, Connection}; +use libp2p_webrtc_utils::{noise, Fingerprint}; +use webrtc::{ + api::{setting_engine::SettingEngine, APIBuilder}, + data::data_channel::DataChannel, + data_channel::data_channel_init::RTCDataChannelInit, + dtls_transport::dtls_role::DTLSRole, + ice::{network_type::NetworkType, udp_mux::UDPMux, udp_network::UDPNetwork}, + peer_connection::{configuration::RTCConfiguration, RTCPeerConnection}, +}; + +use crate::tokio::{error::Error, sdp, sdp::random_ufrag, stream::Stream, Connection}; /// Creates a new outbound WebRTC connection. pub(crate) async fn outbound( diff --git a/transports/webrtc/tests/smoke.rs b/transports/webrtc/tests/smoke.rs index d606d66c41f..e27e3cee672 100644 --- a/transports/webrtc/tests/smoke.rs +++ b/transports/webrtc/tests/smoke.rs @@ -18,28 +18,34 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::channel::mpsc; -use futures::future::{BoxFuture, Either}; -use futures::stream::StreamExt; -use futures::{future, ready, AsyncReadExt, AsyncWriteExt, FutureExt, SinkExt}; -use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerExt}; -use libp2p_core::transport::{Boxed, DialOpts, ListenerId, PortUse, TransportEvent}; -use libp2p_core::{Endpoint, Multiaddr, Transport}; +use std::{ + future::Future, + num::NonZeroU8, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{ + channel::mpsc, + future, + future::{BoxFuture, Either}, + ready, + stream::StreamExt, + AsyncReadExt, AsyncWriteExt, FutureExt, SinkExt, +}; +use libp2p_core::{ + muxing::{StreamMuxerBox, StreamMuxerExt}, + transport::{Boxed, DialOpts, ListenerId, PortUse, TransportEvent}, + Endpoint, Multiaddr, Transport, +}; use libp2p_identity::PeerId; use libp2p_webrtc as webrtc; use rand::{thread_rng, RngCore}; -use std::future::Future; -use std::num::NonZeroU8; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::time::Duration; -use tracing_subscriber::EnvFilter; #[tokio::test] async fn smoke() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let (a_peer_id, mut a_transport) = create_transport(); let (b_peer_id, mut b_transport) = create_transport(); @@ -56,9 +62,7 @@ async fn smoke() { // Note: This test should likely be ported to the muxer compliance test suite. #[test] fn concurrent_connections_and_streams_tokio() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); + libp2p_test_utils::with_default_env_filter(); let rt = tokio::runtime::Runtime::new().unwrap(); let _guard = rt.enter(); diff --git a/transports/websocket-websys/CHANGELOG.md b/transports/websocket-websys/CHANGELOG.md index 9d0cb7d7726..affe9ff2551 100644 --- a/transports/websocket-websys/CHANGELOG.md +++ b/transports/websocket-websys/CHANGELOG.md @@ -3,6 +3,9 @@ - fix: Return `None` when extracting a `/dnsaddr` address See [PR 5613](https://github.com/libp2p/rust-libp2p/pull/5613) +- Fix `cargo clippy` warnings in `rustc 1.84.0-beta.1`. + See [PR 5700](https://github.com/libp2p/rust-libp2p/pull/5700). + ## 0.4.0 - Implement refactored `Transport`. diff --git a/transports/websocket-websys/Cargo.toml b/transports/websocket-websys/Cargo.toml index 1e604ba0478..f33703c1884 100644 --- a/transports/websocket-websys/Cargo.toml +++ b/transports/websocket-websys/Cargo.toml @@ -16,7 +16,6 @@ futures = { workspace = true } js-sys = "0.3.69" libp2p-core = { workspace = true } tracing = { workspace = true } -parking_lot = "0.12.3" send_wrapper = "0.6.0" thiserror = { workspace = true } wasm-bindgen = "0.2.90" diff --git a/transports/websocket-websys/src/lib.rs b/transports/websocket-websys/src/lib.rs index 17b07c71c0a..72f4068610d 100644 --- a/transports/websocket-websys/src/lib.rs +++ b/transports/websocket-websys/src/lib.rs @@ -20,23 +20,29 @@ //! Libp2p websocket transports built on [web-sys](https://rustwasm.github.io/wasm-bindgen/web-sys/index.html). +#![allow(unexpected_cfgs)] + mod web_context; +use std::{ + cmp::min, + pin::Pin, + rc::Rc, + sync::{ + atomic::{AtomicBool, Ordering}, + Mutex, + }, + task::{Context, Poll}, +}; + use bytes::BytesMut; -use futures::task::AtomicWaker; -use futures::{future::Ready, io, prelude::*}; +use futures::{future::Ready, io, prelude::*, task::AtomicWaker}; use js_sys::Array; -use libp2p_core::transport::DialOpts; use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, - transport::{ListenerId, TransportError, TransportEvent}, + transport::{DialOpts, ListenerId, TransportError, TransportEvent}, }; use send_wrapper::SendWrapper; -use std::cmp::min; -use std::rc::Rc; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Mutex; -use std::{pin::Pin, task::Context, task::Poll}; use wasm_bindgen::prelude::*; use web_sys::{CloseEvent, Event, MessageEvent, WebSocket}; @@ -60,7 +66,6 @@ use crate::web_context::WebContext; /// .multiplex(yamux::Config::default()) /// .boxed(); /// ``` -/// #[derive(Default)] pub struct Transport { _private: (), @@ -174,7 +179,8 @@ struct Inner { /// Waker for when we are waiting for the WebSocket to be opened. open_waker: Rc, - /// Waker for when we are waiting to write (again) to the WebSocket because we previously exceeded the [`MAX_BUFFER`] threshold. + /// Waker for when we are waiting to write (again) to the WebSocket because we previously + /// exceeded the [`MAX_BUFFER`] threshold. write_waker: Rc, /// Waker for when we are waiting for the WebSocket to be closed. @@ -306,7 +312,9 @@ impl Connection { .expect("to have a window or worker context") .set_interval_with_callback_and_timeout_and_arguments( on_buffered_amount_low_closure.as_ref().unchecked_ref(), - 100, // Chosen arbitrarily and likely worth tuning. Due to low impact of the /ws transport, no further effort was invested at the time. + // Chosen arbitrarily and likely worth tuning. Due to low impact of the /ws + // transport, no further effort was invested at the time. + 100, &Array::new(), ) .expect("to be able to set an interval"); @@ -432,7 +440,8 @@ impl AsyncWrite for Connection { impl Drop for Connection { fn drop(&mut self) { - // Unset event listeners, as otherwise they will be called by JS after the handlers have already been dropped. + // Unset event listeners, as otherwise they will be called by JS after the handlers have + // already been dropped. self.inner.socket.set_onclose(None); self.inner.socket.set_onerror(None); self.inner.socket.set_onopen(None); @@ -456,9 +465,10 @@ impl Drop for Connection { #[cfg(test)] mod tests { - use super::*; use libp2p_identity::PeerId; + use super::*; + #[test] fn extract_url() { let peer_id = PeerId::random(); diff --git a/transports/websocket/src/error.rs b/transports/websocket/src/error.rs index 7dc22331bcd..efab95a7621 100644 --- a/transports/websocket/src/error.rs +++ b/transports/websocket/src/error.rs @@ -18,10 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::tls; -use libp2p_core::Multiaddr; use std::{error, fmt}; +use libp2p_core::Multiaddr; + +use crate::tls; + /// Error in WebSockets. #[derive(Debug)] pub enum Error { diff --git a/transports/websocket/src/framed.rs b/transports/websocket/src/framed.rs index 259be6a68f8..3d2738cbc3c 100644 --- a/transports/websocket/src/framed.rs +++ b/transports/websocket/src/framed.rs @@ -18,11 +18,20 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::{error::Error, quicksink, tls}; +use std::{ + borrow::Cow, + collections::HashMap, + fmt, io, mem, + net::IpAddr, + ops::DerefMut, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; + use either::Either; use futures::{future::BoxFuture, prelude::*, ready, stream::BoxStream}; -use futures_rustls::rustls::pki_types::ServerName; -use futures_rustls::{client, server}; +use futures_rustls::{client, rustls::pki_types::ServerName, server}; use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, transport::{DialOpts, ListenerId, TransportError, TransportEvent}, @@ -33,12 +42,10 @@ use soketto::{ connection::{self, CloseReason}, handshake, }; -use std::borrow::Cow; -use std::net::IpAddr; -use std::{collections::HashMap, ops::DerefMut, sync::Arc}; -use std::{fmt, io, mem, pin::Pin, task::Context, task::Poll}; use url::Url; +use crate::{error::Error, quicksink, tls}; + /// Max. number of payload bytes of a single frame. const MAX_DATA_SIZE: usize = 256 * 1024 * 1024; @@ -809,10 +816,12 @@ where #[cfg(test)] mod tests { - use super::*; - use libp2p_identity::PeerId; use std::io; + use libp2p_identity::PeerId; + + use super::*; + #[test] fn listen_addr() { let tcp_addr = "/ip4/0.0.0.0/tcp/2222".parse::().unwrap(); diff --git a/transports/websocket/src/lib.rs b/transports/websocket/src/lib.rs index cbc923613dd..fbed8fddc66 100644 --- a/transports/websocket/src/lib.rs +++ b/transports/websocket/src/lib.rs @@ -27,6 +27,12 @@ pub mod framed; mod quicksink; pub mod tls; +use std::{ + io, + pin::Pin, + task::{Context, Poll}, +}; + use error::Error; use framed::{Connection, Incoming}; use futures::{future::BoxFuture, prelude::*, ready}; @@ -37,11 +43,6 @@ use libp2p_core::{ Transport, }; use rw_stream_sink::RwStreamSink; -use std::{ - io, - pin::Pin, - task::{Context, Poll}, -}; /// A Websocket transport. /// @@ -75,18 +76,28 @@ use std::{ /// # #[async_std::main] /// # async fn main() { /// -/// let mut transport = websocket::WsConfig::new(dns::async_std::Transport::system( -/// tcp::async_io::Transport::new(tcp::Config::default()), -/// ).await.unwrap()); +/// let mut transport = websocket::WsConfig::new( +/// dns::async_std::Transport::system(tcp::async_io::Transport::new(tcp::Config::default())) +/// .await +/// .unwrap(), +/// ); /// /// let rcgen_cert = generate_simple_self_signed(vec!["localhost".to_string()]).unwrap(); /// let priv_key = websocket::tls::PrivateKey::new(rcgen_cert.serialize_private_key_der()); /// let cert = websocket::tls::Certificate::new(rcgen_cert.serialize_der().unwrap()); /// transport.set_tls_config(websocket::tls::Config::new(priv_key, vec![cert]).unwrap()); /// -/// let id = transport.listen_on(ListenerId::next(), "/ip4/127.0.0.1/tcp/0/tls/ws".parse().unwrap()).unwrap(); +/// let id = transport +/// .listen_on( +/// ListenerId::next(), +/// "/ip4/127.0.0.1/tcp/0/tls/ws".parse().unwrap(), +/// ) +/// .unwrap(); /// -/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)).await.into_new_address().unwrap(); +/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)) +/// .await +/// .into_new_address() +/// .unwrap(); /// println!("Listening on {addr}"); /// /// # } @@ -105,13 +116,20 @@ use std::{ /// # #[async_std::main] /// # async fn main() { /// -/// let mut transport = websocket::WsConfig::new( -/// tcp::async_io::Transport::new(tcp::Config::default()), -/// ); +/// let mut transport = +/// websocket::WsConfig::new(tcp::async_io::Transport::new(tcp::Config::default())); /// -/// let id = transport.listen_on(ListenerId::next(), "/ip4/127.0.0.1/tcp/0/ws".parse().unwrap()).unwrap(); +/// let id = transport +/// .listen_on( +/// ListenerId::next(), +/// "/ip4/127.0.0.1/tcp/0/ws".parse().unwrap(), +/// ) +/// .unwrap(); /// -/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)).await.into_new_address().unwrap(); +/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)) +/// .await +/// .into_new_address() +/// .unwrap(); /// println!("Listening on {addr}"); /// /// # } @@ -283,7 +301,6 @@ where #[cfg(test)] mod tests { - use super::WsConfig; use futures::prelude::*; use libp2p_core::{ multiaddr::Protocol, @@ -293,6 +310,8 @@ mod tests { use libp2p_identity::PeerId; use libp2p_tcp as tcp; + use super::WsConfig; + #[test] fn dialer_connects_to_listener_ipv4() { let a = "/ip4/127.0.0.1/tcp/0/ws".parse().unwrap(); diff --git a/transports/websocket/src/quicksink.rs b/transports/websocket/src/quicksink.rs index 4f620536ea1..a0e2fb8b0f6 100644 --- a/transports/websocket/src/quicksink.rs +++ b/transports/websocket/src/quicksink.rs @@ -19,26 +19,28 @@ // ```no_run // use async_std::io; // use futures::prelude::*; +// // use crate::quicksink::Action; // // crate::quicksink::make_sink(io::stdout(), |mut stdout, action| async move { // match action { // Action::Send(x) => stdout.write_all(x).await?, // Action::Flush => stdout.flush().await?, -// Action::Close => stdout.close().await? +// Action::Close => stdout.close().await?, // } // Ok::<_, io::Error>(stdout) // }); // ``` -use futures::{ready, sink::Sink}; -use pin_project_lite::pin_project; use std::{ future::Future, pin::Pin, task::{Context, Poll}, }; +use futures::{ready, sink::Sink}; +use pin_project_lite::pin_project; + /// Returns a `Sink` impl based on the initial value and the given closure. /// /// The closure will be applied to the initial value and an [`Action`] that @@ -291,10 +293,11 @@ where #[cfg(test)] mod tests { - use crate::quicksink::{make_sink, Action}; use async_std::{io, task}; use futures::{channel::mpsc, prelude::*}; + use crate::quicksink::{make_sink, Action}; + #[test] fn smoke_test() { task::block_on(async { diff --git a/transports/websocket/src/tls.rs b/transports/websocket/src/tls.rs index 77090e21675..598dcc22765 100644 --- a/transports/websocket/src/tls.rs +++ b/transports/websocket/src/tls.rs @@ -18,9 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures_rustls::{rustls, TlsAcceptor, TlsConnector}; use std::{fmt, io, sync::Arc}; +use futures_rustls::{rustls, TlsAcceptor, TlsConnector}; + /// TLS configuration. #[derive(Clone)] pub struct Config { diff --git a/transports/webtransport-websys/CHANGELOG.md b/transports/webtransport-websys/CHANGELOG.md index 411117918bd..45a94495e4e 100644 --- a/transports/webtransport-websys/CHANGELOG.md +++ b/transports/webtransport-websys/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.4.1 + +- Fix `cargo clippy` warnings in `rustc 1.84.0-beta.1`. + See [PR 5700](https://github.com/libp2p/rust-libp2p/pull/5700). + ## 0.4.0 - Implement refactored `Transport`. diff --git a/transports/webtransport-websys/Cargo.toml b/transports/webtransport-websys/Cargo.toml index 0cfc37bf041..ef2865535bf 100644 --- a/transports/webtransport-websys/Cargo.toml +++ b/transports/webtransport-websys/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-webtransport-websys" edition = "2021" rust-version = { workspace = true } description = "WebTransport for libp2p under WASM environment" -version = "0.4.0" +version = "0.4.1" authors = [ "Yiannis Marangos ", "oblique ", diff --git a/transports/webtransport-websys/src/connection.rs b/transports/webtransport-websys/src/connection.rs index 956a66288af..75c8603864a 100644 --- a/transports/webtransport-websys/src/connection.rs +++ b/transports/webtransport-websys/src/connection.rs @@ -1,22 +1,29 @@ +use std::{ + collections::HashSet, + future::poll_fn, + pin::Pin, + task::{ready, Context, Poll}, +}; + use futures::FutureExt; -use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; -use libp2p_core::upgrade::OutboundConnectionUpgrade; -use libp2p_core::UpgradeInfo; +use libp2p_core::{ + muxing::{StreamMuxer, StreamMuxerEvent}, + upgrade::OutboundConnectionUpgrade, + UpgradeInfo, +}; use libp2p_identity::{Keypair, PeerId}; use multihash::Multihash; use send_wrapper::SendWrapper; -use std::collections::HashSet; -use std::future::poll_fn; -use std::pin::Pin; -use std::task::{ready, Context, Poll}; use wasm_bindgen_futures::JsFuture; use web_sys::ReadableStreamDefaultReader; -use crate::bindings::{WebTransport, WebTransportBidirectionalStream}; -use crate::endpoint::Endpoint; -use crate::fused_js_promise::FusedJsPromise; -use crate::utils::{detach_promise, parse_reader_response, to_js_type}; -use crate::{Error, Stream}; +use crate::{ + bindings::{WebTransport, WebTransportBidirectionalStream}, + endpoint::Endpoint, + fused_js_promise::FusedJsPromise, + utils::{detach_promise, parse_reader_response, to_js_type}, + Error, Stream, +}; /// An opened WebTransport connection. #[derive(Debug)] diff --git a/transports/webtransport-websys/src/endpoint.rs b/transports/webtransport-websys/src/endpoint.rs index 0bff1ed6186..fd209c51664 100644 --- a/transports/webtransport-websys/src/endpoint.rs +++ b/transports/webtransport-websys/src/endpoint.rs @@ -1,11 +1,14 @@ +use std::collections::HashSet; + use js_sys::{Array, Uint8Array}; use libp2p_identity::PeerId; use multiaddr::{Multiaddr, Protocol}; use multihash::Multihash; -use std::collections::HashSet; -use crate::bindings::{WebTransportHash, WebTransportOptions}; -use crate::Error; +use crate::{ + bindings::{WebTransportHash, WebTransportOptions}, + Error, +}; pub(crate) struct Endpoint { pub(crate) host: String, @@ -149,9 +152,10 @@ impl Endpoint { #[cfg(test)] mod tests { - use super::*; use std::str::FromStr; + use super::*; + fn multihash_from_str(s: &str) -> Multihash<64> { let (_base, bytes) = multibase::decode(s).unwrap(); Multihash::from_bytes(&bytes).unwrap() diff --git a/transports/webtransport-websys/src/fused_js_promise.rs b/transports/webtransport-websys/src/fused_js_promise.rs index 0ba846501c2..d3d3858a553 100644 --- a/transports/webtransport-websys/src/fused_js_promise.rs +++ b/transports/webtransport-websys/src/fused_js_promise.rs @@ -1,8 +1,11 @@ +use std::{ + future::Future, + pin::Pin, + task::{ready, Context, Poll}, +}; + use futures::FutureExt; use js_sys::Promise; -use std::future::Future; -use std::pin::Pin; -use std::task::{ready, Context, Poll}; use wasm_bindgen::JsValue; use wasm_bindgen_futures::JsFuture; diff --git a/transports/webtransport-websys/src/lib.rs b/transports/webtransport-websys/src/lib.rs index f9c59694fa3..126adc054a9 100644 --- a/transports/webtransport-websys/src/lib.rs +++ b/transports/webtransport-websys/src/lib.rs @@ -1,5 +1,7 @@ //! Libp2p WebTransport built on [web-sys](https://rustwasm.github.io/wasm-bindgen/web-sys/index.html) +#![allow(unexpected_cfgs)] + mod bindings; mod connection; mod endpoint; @@ -9,7 +11,9 @@ mod stream; mod transport; mod utils; -pub use self::connection::Connection; -pub use self::error::Error; -pub use self::stream::Stream; -pub use self::transport::{Config, Transport}; +pub use self::{ + connection::Connection, + error::Error, + stream::Stream, + transport::{Config, Transport}, +}; diff --git a/transports/webtransport-websys/src/stream.rs b/transports/webtransport-websys/src/stream.rs index ba4238ac814..b9d1669b6dc 100644 --- a/transports/webtransport-websys/src/stream.rs +++ b/transports/webtransport-websys/src/stream.rs @@ -1,16 +1,20 @@ +use std::{ + io, + pin::Pin, + task::{ready, Context, Poll}, +}; + use futures::{AsyncRead, AsyncWrite, FutureExt}; use js_sys::Uint8Array; use send_wrapper::SendWrapper; -use std::io; -use std::pin::Pin; -use std::task::ready; -use std::task::{Context, Poll}; use web_sys::{ReadableStreamDefaultReader, WritableStreamDefaultWriter}; -use crate::bindings::WebTransportBidirectionalStream; -use crate::fused_js_promise::FusedJsPromise; -use crate::utils::{detach_promise, parse_reader_response, to_io_error, to_js_type}; -use crate::Error; +use crate::{ + bindings::WebTransportBidirectionalStream, + fused_js_promise::FusedJsPromise, + utils::{detach_promise, parse_reader_response, to_io_error, to_js_type}, + Error, +}; /// A stream on a connection. #[derive(Debug)] diff --git a/transports/webtransport-websys/src/transport.rs b/transports/webtransport-websys/src/transport.rs index 6a9a9dad954..bad9509864e 100644 --- a/transports/webtransport-websys/src/transport.rs +++ b/transports/webtransport-websys/src/transport.rs @@ -1,17 +1,18 @@ +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + use futures::future::FutureExt; -use libp2p_core::muxing::StreamMuxerBox; -use libp2p_core::transport::{ - Boxed, DialOpts, ListenerId, Transport as _, TransportError, TransportEvent, +use libp2p_core::{ + muxing::StreamMuxerBox, + transport::{Boxed, DialOpts, ListenerId, Transport as _, TransportError, TransportEvent}, }; use libp2p_identity::{Keypair, PeerId}; use multiaddr::Multiaddr; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; -use crate::endpoint::Endpoint; -use crate::Connection; -use crate::Error; +use crate::{endpoint::Endpoint, Connection, Error}; /// Config for the [`Transport`]. pub struct Config { diff --git a/transports/webtransport-websys/src/utils.rs b/transports/webtransport-websys/src/utils.rs index 0b3550e5b5b..df59ee15161 100644 --- a/transports/webtransport-websys/src/utils.rs +++ b/transports/webtransport-websys/src/utils.rs @@ -1,7 +1,8 @@ +use std::io; + use js_sys::{Promise, Reflect}; use once_cell::sync::Lazy; use send_wrapper::SendWrapper; -use std::io; use wasm_bindgen::{JsCast, JsValue}; use crate::Error; @@ -17,7 +18,6 @@ static DO_NOTHING: Lazy> = Lazy::new(|| { /// A promise always runs in the background, however if you don't await it, /// or specify a `catch` handler before you drop it, it might cause some side /// effects. This function avoids any side effects. -// // Ref: https://github.com/typescript-eslint/typescript-eslint/blob/391a6702c0a9b5b3874a7a27047f2a721f090fb6/packages/eslint-plugin/docs/rules/no-floating-promises.md pub(crate) fn detach_promise(promise: Promise) { // Avoid having "floating" promise and ignore any errors. @@ -50,7 +50,6 @@ where } /// Parse response from `ReadableStreamDefaultReader::read`. -// // Ref: https://streams.spec.whatwg.org/#default-reader-prototype pub(crate) fn parse_reader_response(resp: &JsValue) -> Result, JsValue> { let value = Reflect::get(resp, &JsValue::from_str("value"))?; diff --git a/wasm-tests/webtransport-tests/Cargo.toml b/wasm-tests/webtransport-tests/Cargo.toml index d7db378ab1a..593743d1617 100644 --- a/wasm-tests/webtransport-tests/Cargo.toml +++ b/wasm-tests/webtransport-tests/Cargo.toml @@ -10,7 +10,7 @@ release = false [dependencies] futures = { workspace = true } -getrandom = { version = "0.2.15", features = ["js"] } +getrandom = { workspace = true, features = ["js"] } libp2p-core = { workspace = true } libp2p-identity = { workspace = true, features = ["rand"] } libp2p-noise = { workspace = true } diff --git a/wasm-tests/webtransport-tests/echo-server/go.mod b/wasm-tests/webtransport-tests/echo-server/go.mod index e2e0c6591ba..6ac311fcb68 100644 --- a/wasm-tests/webtransport-tests/echo-server/go.mod +++ b/wasm-tests/webtransport-tests/echo-server/go.mod @@ -43,13 +43,13 @@ require ( go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.21.0 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/net v0.23.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.18.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect google.golang.org/protobuf v1.33.0 // indirect lukechampine.com/blake3 v1.2.1 // indirect ) diff --git a/wasm-tests/webtransport-tests/echo-server/go.sum b/wasm-tests/webtransport-tests/echo-server/go.sum index a9d53233159..b6fac448bd7 100644 --- a/wasm-tests/webtransport-tests/echo-server/go.sum +++ b/wasm-tests/webtransport-tests/echo-server/go.sum @@ -213,8 +213,8 @@ golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= @@ -225,8 +225,8 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -240,8 +240,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -253,6 +253,8 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -266,14 +268,14 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= @@ -287,8 +289,8 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/wasm-tests/webtransport-tests/src/lib.rs b/wasm-tests/webtransport-tests/src/lib.rs index 938cdf0b3e1..0ff838b49e5 100644 --- a/wasm-tests/webtransport-tests/src/lib.rs +++ b/wasm-tests/webtransport-tests/src/lib.rs @@ -1,15 +1,17 @@ -use futures::channel::oneshot; -use futures::{AsyncReadExt, AsyncWriteExt}; +#![allow(unexpected_cfgs)] +use std::{future::poll_fn, pin::Pin}; + +use futures::{channel::oneshot, AsyncReadExt, AsyncWriteExt}; use getrandom::getrandom; -use libp2p_core::transport::{DialOpts, PortUse}; -use libp2p_core::{Endpoint, StreamMuxer, Transport as _}; +use libp2p_core::{ + transport::{DialOpts, PortUse}, + Endpoint, StreamMuxer, Transport as _, +}; use libp2p_identity::{Keypair, PeerId}; use libp2p_noise as noise; use libp2p_webtransport_websys::{Config, Connection, Error, Stream, Transport}; use multiaddr::{Multiaddr, Protocol}; use multihash::Multihash; -use std::future::poll_fn; -use std::pin::Pin; use wasm_bindgen::JsCast; use wasm_bindgen_futures::{spawn_local, JsFuture}; use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure}; @@ -18,7 +20,7 @@ use web_sys::{window, Response}; wasm_bindgen_test_configure!(run_in_browser); #[wasm_bindgen_test] -async fn single_conn_single_stream() { +pub async fn single_conn_single_stream() { let mut conn = new_connection_to_echo_server().await; let mut stream = create_stream(&mut conn).await; @@ -26,7 +28,7 @@ async fn single_conn_single_stream() { } #[wasm_bindgen_test] -async fn single_conn_single_stream_incoming() { +pub async fn single_conn_single_stream_incoming() { let mut conn = new_connection_to_echo_server().await; let mut stream = incoming_stream(&mut conn).await; @@ -34,7 +36,7 @@ async fn single_conn_single_stream_incoming() { } #[wasm_bindgen_test] -async fn single_conn_multiple_streams() { +pub async fn single_conn_multiple_streams() { let mut conn = new_connection_to_echo_server().await; let mut tasks = Vec::new(); let mut streams = Vec::new(); @@ -57,7 +59,7 @@ async fn single_conn_multiple_streams() { } #[wasm_bindgen_test] -async fn multiple_conn_multiple_streams() { +pub async fn multiple_conn_multiple_streams() { let mut tasks = Vec::new(); let mut conns = Vec::new(); @@ -88,7 +90,7 @@ async fn multiple_conn_multiple_streams() { } #[wasm_bindgen_test] -async fn multiple_conn_multiple_streams_sequential() { +pub async fn multiple_conn_multiple_streams_sequential() { for _ in 0..10 { let mut conn = new_connection_to_echo_server().await; @@ -105,7 +107,7 @@ async fn multiple_conn_multiple_streams_sequential() { } #[wasm_bindgen_test] -async fn read_leftovers() { +pub async fn read_leftovers() { let mut conn = new_connection_to_echo_server().await; let mut stream = create_stream(&mut conn).await; @@ -128,7 +130,7 @@ async fn read_leftovers() { } #[wasm_bindgen_test] -async fn allow_read_after_closing_writer() { +pub async fn allow_read_after_closing_writer() { let mut conn = new_connection_to_echo_server().await; let mut stream = create_stream(&mut conn).await; @@ -154,7 +156,7 @@ async fn allow_read_after_closing_writer() { } #[wasm_bindgen_test] -async fn poll_outbound_error_after_connection_close() { +pub async fn poll_outbound_error_after_connection_close() { let mut conn = new_connection_to_echo_server().await; // Make sure that poll_outbound works well before closing the connection @@ -172,7 +174,7 @@ async fn poll_outbound_error_after_connection_close() { } #[wasm_bindgen_test] -async fn poll_inbound_error_after_connection_close() { +pub async fn poll_inbound_error_after_connection_close() { let mut conn = new_connection_to_echo_server().await; // Make sure that poll_inbound works well before closing the connection @@ -190,7 +192,7 @@ async fn poll_inbound_error_after_connection_close() { } #[wasm_bindgen_test] -async fn read_error_after_connection_drop() { +pub async fn read_error_after_connection_drop() { let mut conn = new_connection_to_echo_server().await; let mut stream = create_stream(&mut conn).await; @@ -205,7 +207,7 @@ async fn read_error_after_connection_drop() { } #[wasm_bindgen_test] -async fn read_error_after_connection_close() { +pub async fn read_error_after_connection_close() { let mut conn = new_connection_to_echo_server().await; let mut stream = create_stream(&mut conn).await; @@ -223,7 +225,7 @@ async fn read_error_after_connection_close() { } #[wasm_bindgen_test] -async fn write_error_after_connection_drop() { +pub async fn write_error_after_connection_drop() { let mut conn = new_connection_to_echo_server().await; let mut stream = create_stream(&mut conn).await; @@ -238,7 +240,7 @@ async fn write_error_after_connection_drop() { } #[wasm_bindgen_test] -async fn write_error_after_connection_close() { +pub async fn write_error_after_connection_close() { let mut conn = new_connection_to_echo_server().await; let mut stream = create_stream(&mut conn).await; @@ -256,7 +258,7 @@ async fn write_error_after_connection_close() { } #[wasm_bindgen_test] -async fn connect_without_peer_id() { +pub async fn connect_without_peer_id() { let mut addr = fetch_server_addr().await; let keypair = Keypair::generate_ed25519(); @@ -278,7 +280,7 @@ async fn connect_without_peer_id() { } #[wasm_bindgen_test] -async fn error_on_unknown_peer_id() { +pub async fn error_on_unknown_peer_id() { let mut addr = fetch_server_addr().await; let keypair = Keypair::generate_ed25519(); @@ -304,7 +306,7 @@ async fn error_on_unknown_peer_id() { } #[wasm_bindgen_test] -async fn error_on_unknown_certhash() { +pub async fn error_on_unknown_certhash() { let mut addr = fetch_server_addr().await; let keypair = Keypair::generate_ed25519();