diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 214e6e7a46..c4b8e5e9ea 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -3,12 +3,13 @@ # Working group Lead Members * thibault@iota.org /.github/ thibault@iota.org alexander.schmidt@iota.org +/bee-autopeering/ alexander.schmidt@iota.org thibault@iota.org /bee-api/ samuel.rufinatscha@iota.org thibault@iota.org /bee-common/ thibault@iota.org alexander.schmidt@iota.org /bee-crypto/ thibault@iota.org alexander.schmidt@iota.org +/bee-gossip/ alexander.schmidt@iota.org thibault@iota.org /bee-ledger/ thibault@iota.org samuel.rufinatscha@iota.org joshua.barretto@iota.org /bee-message/ thibault@iota.org bingyang.lin@iota.org thoralf.mueller@iota.org -/bee-network/ alexander.schmidt@iota.org joshua.barretto@iota.org /bee-node/ thibault@iota.org alexander.schmidt@iota.org samuel.rufinatscha@iota.org joshua.barretto@iota.org /bee-peering/ alexander.schmidt@iota.org samuel.rufinatscha@iota.org /bee-pow/ thibault@iota.org alexander.schmidt@iota.org diff --git a/Cargo.lock b/Cargo.lock index 8272a9b06a..083b26e2d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -50,9 +50,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.45" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee10e43ae4a853c0a3591d4e2ada1719e553be18199d9da9d4a83f5927c2f5c7" +checksum = "8b26702f315f53b6071259e15dd9d64528213b44d61de1ec926eca7715d62203" [[package]] name = "arrayref" @@ -74,9 +74,9 @@ checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" [[package]] name = "async-trait" -version = "0.1.51" +version = "0.1.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44318e776df68115a881de9a8fd1b9e53368d7a4a5ce4cc48517da3393233a5e" +checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" dependencies = [ "proc-macro2", "quote", @@ -98,9 +98,9 @@ dependencies = [ [[package]] name = "atomic" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3410529e8288c463bedb5930f82833bc0c90e5d2fe639a56582a4d09220b281" +checksum = "b88d82667eca772c4aa12f0f1348b3ae643424c8876448f3f7bd5787032e234c" dependencies = [ "autocfg", ] @@ -140,6 +140,38 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf9ff0bbfd639f15c74af777d81383cf53efb7c93613f6cab67c6c11e05bbf8b" +[[package]] +name = "bee-autopeering" +version = "0.1.0" +dependencies = [ + "async-trait", + "base64 0.13.0", + "bincode", + "bs58", + "bytes", + "fern", + "hash32", + "hex", + "iota-crypto", + "libp2p-core", + "log", + "num", + "num-derive", + "num-traits", + "priority-queue", + "prost", + "prost-build", + "rand 0.8.4", + "ring", + "serde", + "serde_json", + "sled", + "thiserror", + "tokio", + "tokio-stream", + "toml", +] + [[package]] name = "bee-common" version = "0.5.0" @@ -152,7 +184,7 @@ dependencies = [ "rust-argon2", "serde", "thiserror", - "time 0.3.4", + "time 0.3.5", ] [[package]] @@ -176,6 +208,28 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "bee-gossip" +version = "0.3.0" +dependencies = [ + "async-trait", + "bee-runtime", + "fern", + "futures", + "hashbrown", + "hex", + "libp2p", + "libp2p-core", + "log", + "once_cell", + "rand 0.8.4", + "serde", + "serial_test", + "thiserror", + "tokio", + "tokio-stream", +] + [[package]] name = "bee-ledger" version = "0.6.0" @@ -220,28 +274,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "bee-network" -version = "0.2.2" -dependencies = [ - "async-trait", - "bee-runtime", - "fern", - "futures", - "hashbrown", - "hex", - "libp2p", - "libp2p-core", - "log", - "once_cell", - "rand 0.8.4", - "serde", - "serial_test", - "thiserror", - "tokio", - "tokio-stream", -] - [[package]] name = "bee-pow" version = "0.2.0" @@ -256,8 +288,8 @@ dependencies = [ name = "bee-protocol" version = "0.1.1" dependencies = [ + "bee-gossip", "bee-message", - "bee-network", ] [[package]] @@ -434,11 +466,20 @@ dependencies = [ "serde_json", ] +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + [[package]] name = "bindgen" -version = "0.59.1" +version = "0.59.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "453c49e5950bb0eb63bb3df640e31618846c89d5b7faa54040d76e98e0134375" +checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" dependencies = [ "bitflags", "cexpr", @@ -459,18 +500,6 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" -[[package]] -name = "bitvec" -version = "0.19.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8942c8d352ae1838c9dda0b0ca2ab657696ef2232a20147cf1b30ae1a9cb4321" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", -] - [[package]] name = "blake2" version = "0.9.2" @@ -517,9 +546,9 @@ checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" [[package]] name = "bstr" -version = "0.2.15" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a40b47ad93e1a5404e6c18dec46b628214fee441c70f4ab5d6942142cc268a3d" +checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" dependencies = [ "lazy_static", "memchr", @@ -535,9 +564,9 @@ checksum = "8f1e260c3a9040a7c19a12468758f4c16f31a81a1fe087482be9570ec864bb6c" [[package]] name = "bytemuck" -version = "1.7.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72957246c41db82b8ef88a5486143830adeb8227ef9837740bdec67724cf2c5b" +checksum = "439989e6b8c38d1b6570a384ef1e49c8848128f5a97f3914baef02920842712f" [[package]] name = "byteorder" @@ -562,18 +591,18 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.71" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79c2681d6594606957bbb8631c4b90a7fcaaa72cdb714743a437b156d6a7eedd" +checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" dependencies = [ "jobserver", ] [[package]] name = "cexpr" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db507a7679252d2276ed0dd8113c6875ec56d3089f9225b2b42c30cc1f8e5c89" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ "nom", ] @@ -644,9 +673,9 @@ dependencies = [ [[package]] name = "clap" -version = "2.33.3" +version = "2.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" +checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ "bitflags", "textwrap", @@ -706,9 +735,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" +checksum = "738c290dfaea84fc1ca15ad9c168d083b05a714e1efddd8edaab678dc28d2836" dependencies = [ "cfg-if", ] @@ -819,7 +848,7 @@ checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" dependencies = [ "bstr", "csv-core", - "itoa", + "itoa 0.4.8", "ryu", "serde", ] @@ -882,9 +911,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4620d40f6d2601794401d6dd95a5cf69b6c157852539470eeda433a99b3c0efc" +checksum = "74e1069e39f1454367eb2de793ed062fac4c35c2934b76a81d90dd9abcd28816" dependencies = [ "signature", ] @@ -924,9 +953,9 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "encoding_rs" -version = "0.8.29" +version = "0.8.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a74ea89a0a1b98f6332de42c95baff457ada66d1cb4030f9ff151b2041a1c746" +checksum = "7896dc8abb250ffdda33912550faa54c88ec8b998dec0b2c55ab224921ce11df" dependencies = [ "cfg-if", ] @@ -1000,17 +1029,11 @@ dependencies = [ "winapi", ] -[[package]] -name = "funty" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" - [[package]] name = "futures" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12aa0eb539080d55c3f2d45a67c3b58b6b0773c1a3ca2dfec66d58c97fd66ca" +checksum = "28560757fe2bb34e79f907794bb6b22ae8b0e5c669b638a1132f2592b19035b4" dependencies = [ "futures-channel", "futures-core", @@ -1023,9 +1046,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5da6ba8c3bb3c165d3c7319fc1cc8304facf1fb8db99c5de877183c08a273888" +checksum = "ba3dda0b6588335f360afc675d0564c17a77a2bda81ca178a4b6081bd86c7f0b" dependencies = [ "futures-core", "futures-sink", @@ -1033,15 +1056,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d1c26957f23603395cd326b0ffe64124b818f4449552f960d815cfba83a53d" +checksum = "d0c8ff0461b82559810cdccfde3215c3f373807f5e5232b71479bff7bb2583d7" [[package]] name = "futures-executor" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45025be030969d763025784f7f355043dc6bc74093e4ecc5000ca4dc50d8745c" +checksum = "29d6d2ff5bb10fb95c85b8ce46538a2e5f5e7fdc755623a7d4529ab8a4ed9d2a" dependencies = [ "futures-core", "futures-task", @@ -1051,18 +1074,16 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "522de2a0fe3e380f1bc577ba0474108faf3f6b18321dbf60b3b9c39a75073377" +checksum = "b1f9d34af5a1aac6fb380f735fe510746c38067c5bf16c7fd250280503c971b2" [[package]] name = "futures-macro" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18e4a4b95cea4b4ccbcf1c5675ca7c4ee4e9e75eb79944d07defde18068f79bb" +checksum = "6dbd947adfffb0efc70599b3ddcf7b5597bb5fa9e245eb99f62b3a5f7bb8bd3c" dependencies = [ - "autocfg", - "proc-macro-hack", "proc-macro2", "quote", "syn", @@ -1070,15 +1091,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36ea153c13024fe480590b3e3d4cad89a0cfacecc24577b68f86c6ced9c2bc11" +checksum = "e3055baccb68d74ff6480350f8d6eb8fcfa3aa11bdc1a1ae3afdd0514617d508" [[package]] name = "futures-task" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d3d00f4eddb73e498a54394f228cd55853bdf059259e8e7bc6e69d408892e99" +checksum = "6ee7c6485c30167ce4dfb83ac568a849fe53274c831081476ee13e0dce1aad72" [[package]] name = "futures-timer" @@ -1088,11 +1109,10 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36568465210a3a6ee45e1f165136d68671471a501e632e9a98d96872222b5481" +checksum = "d9b5cf40b47a271f77a8b1bec03ca09044d99d2372c0de244e66430761127164" dependencies = [ - "autocfg", "futures-channel", "futures-core", "futures-io", @@ -1102,8 +1122,6 @@ dependencies = [ "memchr", "pin-project-lite", "pin-utils", - "proc-macro-hack", - "proc-macro-nested", "slab", ] @@ -1144,8 +1162,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" dependencies = [ "cfg-if", + "js-sys", "libc", "wasi 0.10.2+wasi-snapshot-preview1", + "wasm-bindgen", ] [[package]] @@ -1166,9 +1186,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.3.7" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fd819562fcebdac5afc5c113c3ec36f902840b70fd4fc458799c8ce4607ae55" +checksum = "8f072413d126e57991455e0a922b31e4c8ba7c2ffbebf6b78b4f8521397d65cd" dependencies = [ "bytes", "fnv", @@ -1189,6 +1209,15 @@ version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" +[[package]] +name = "hash32" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c35f58762feb77d74ebe43bdbc3210f09be9fe6742234d573bacc26ed92b67" +dependencies = [ + "byteorder", +] + [[package]] name = "hashbrown" version = "0.11.2" @@ -1262,7 +1291,7 @@ checksum = "1323096b05d41827dadeaee54c9981958c0f94e670bc94ed80037d1a7b8b186b" dependencies = [ "bytes", "fnv", - "itoa", + "itoa 0.4.8", ] [[package]] @@ -1284,15 +1313,15 @@ checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" [[package]] name = "httpdate" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.14" +version = "0.14.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b91bb1f221b6ea1f1e4371216b70f40748774c2fb5971b450c07773fb92d26b" +checksum = "b7ec3e62bdc98a2f0393a5048e4c30ef659440ea6e0e572965103e72bd836f55" dependencies = [ "bytes", "futures-channel", @@ -1303,7 +1332,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa", + "itoa 0.4.8", "pin-project-lite", "socket2 0.4.2", "tokio", @@ -1338,9 +1367,9 @@ dependencies = [ [[package]] name = "if-addrs" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9a83ec4af652890ac713ffd8dc859e650420a5ef47f7b9be29b6664ab50fbc8" +checksum = "2273e421f7c4f0fc99e1934fe4776f59d8df2972f4199d703fc0da9f2a9f73de" dependencies = [ "if-addrs-sys", "libc", @@ -1386,7 +1415,9 @@ dependencies = [ "blake2", "digest", "ed25519-zebra", + "getrandom 0.2.3", "lazy_static", + "sha2", ] [[package]] @@ -1409,9 +1440,9 @@ checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" [[package]] name = "itertools" -version = "0.10.1" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69ddb889f9d0d08a67338271fa9b62996bc788c7796a5c18cf057420aaed5eaf" +checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" dependencies = [ "either", ] @@ -1422,6 +1453,12 @@ version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" +[[package]] +name = "itoa" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" + [[package]] name = "jobserver" version = "0.1.24" @@ -1477,15 +1514,15 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.106" +version = "0.2.112" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a60553f9a9e039a333b4e9b20573b9e9b9c0bb3a11e201ccc48ef4283456d673" +checksum = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125" [[package]] name = "libloading" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0cf036d15402bea3c5d4de17b3fce76b3e4a56ebc1f577be0e7a72f7c607cf0" +checksum = "afe203d669ec979b7128619bae5a63b7b42e9203c1b29146079ee05e2f604b52" dependencies = [ "cfg-if", "winapi", @@ -1784,15 +1821,15 @@ checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] name = "memchr" -version = "2.3.4" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "memoffset" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" dependencies = [ "autocfg", ] @@ -1803,6 +1840,12 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "mio" version = "0.7.14" @@ -1916,13 +1959,12 @@ checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" [[package]] name = "nom" -version = "6.2.1" +version = "7.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c5c51b9083a3c620fa67a2a635d1ce7d95b897e957d6b28ff9a5da960a103a6" +checksum = "1b1d11e1ef389c76fe5b81bcaf2ea32cf88b62bc494e19f493d0b30e7a930109" dependencies = [ - "bitvec", - "funty", "memchr", + "minimal-lexical", "version_check", ] @@ -1935,6 +1977,19 @@ dependencies = [ "winapi", ] +[[package]] +name = "num" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606" +dependencies = [ + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + [[package]] name = "num-bigint" version = "0.2.6" @@ -1946,6 +2001,26 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-complex" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26873667bbbb7c5182d4a37c1add32cdf09f841af72da53318fdb81543c15085" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-derive" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "num-integer" version = "0.1.44" @@ -1956,6 +2031,28 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-iter" +version = "0.1.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2021c8337a54d21aca0d59a92577a029af9431cb59b909b03252b9c164fad59" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-traits" version = "0.2.14" @@ -1977,9 +2074,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" +checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" [[package]] name = "oorandom" @@ -2015,9 +2112,9 @@ checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" [[package]] name = "openssl-sys" -version = "0.9.70" +version = "0.9.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6517987b3f8226b5da3661dad65ff7f300cc59fb5ea8333ca191fc65fde3edf" +checksum = "7e46109c383602735fa0a2e48dd2b7c892b048e1bf69e5c3b1d804b7d9c203cb" dependencies = [ "autocfg", "cc", @@ -2147,9 +2244,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.22" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12295df4f294471248581bc09bef3c38a5e46f1e36d6a37353621a0c6c357e1f" +checksum = "58893f751c9b0412871a09abd62ecd2a00298c6c83befa223ef98c52aef40cbe" [[package]] name = "plotters" @@ -2208,6 +2305,16 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba" +[[package]] +name = "priority-queue" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00ba480ac08d3cfc40dea10fd466fd2c14dee3ea6fc7873bc4079eda2727caf0" +dependencies = [ + "autocfg", + "indexmap", +] + [[package]] name = "proc-macro-crate" version = "1.1.0" @@ -2242,23 +2349,11 @@ dependencies = [ "version_check", ] -[[package]] -name = "proc-macro-hack" -version = "0.5.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" - -[[package]] -name = "proc-macro-nested" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" - [[package]] name = "proc-macro2" -version = "1.0.32" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba508cc11742c0dc5c1659771673afbab7a0efab23aa17e854cbab0837ed0b43" +checksum = "2f84e92c0f7c9d58328b85a78557813e4bd845130db68d7184635344399423b1" dependencies = [ "unicode-xid", ] @@ -2329,12 +2424,6 @@ dependencies = [ "proc-macro2", ] -[[package]] -name = "radium" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "941ba9d78d8e2f7ce474c015eea4d9c6d25b6a3327f9832ee29a4de27f91bbb8" - [[package]] name = "rand" version = "0.7.3" @@ -2502,9 +2591,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.6" +version = "0.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d2927ca2f685faf0fc620ac4834690d29e7abb153add10f5812eef20b5e280" +checksum = "07bea77bc708afa10e59905c3d4af7c8fd43c9214251673095ff8b14345fcbc5" dependencies = [ "base64 0.13.0", "bytes", @@ -2528,6 +2617,7 @@ dependencies = [ "serde_urlencoded", "tokio", "tokio-native-tls", + "tokio-util", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -2618,9 +2708,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.5" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" [[package]] name = "same-file" @@ -2696,9 +2786,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.130" +version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" +checksum = "8b9875c23cf305cd1fd7eb77234cbb705f21ea6a72c637a5c6db5fe4b8e7f008" dependencies = [ "serde_derive", ] @@ -2715,9 +2805,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.130" +version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" +checksum = "ecc0db5cb2556c0e558887d9bbdcf6ac4471e83ff66cf696e5419024d1606276" dependencies = [ "proc-macro2", "quote", @@ -2726,11 +2816,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.68" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f690853975602e1bfe1ccbf50504d67174e3bcf340f23b5ea9992e0587a52d8" +checksum = "bcbd0344bc6533bc7ec56df11d42fb70f1b912351c0825ccb7211b59d8af7cf5" dependencies = [ - "itoa", + "itoa 1.0.1", "ryu", "serde", ] @@ -2742,7 +2832,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" dependencies = [ "form_urlencoded", - "itoa", + "itoa 0.4.8", "ryu", "serde", ] @@ -2914,9 +3004,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.81" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2afee18b8beb5a596ecb4a2dce128c719b4ba399d34126b9e4396e3f9860966" +checksum = "8daf5dd0bb60cbd4137b1b587d2fc0ae729bc07cf01cd70b36a1ed5ade3b9d59" dependencies = [ "proc-macro2", "quote", @@ -2935,12 +3025,6 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - [[package]] name = "tempfile" version = "3.2.0" @@ -2996,11 +3080,11 @@ dependencies = [ [[package]] name = "time" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99beeb0daeac2bd1e86ac2c21caddecb244b39a093594da1a661ec2060c7aedd" +checksum = "41effe7cfa8af36f439fac33861b66b049edc6f9a32331e2312660529c1c24ad" dependencies = [ - "itoa", + "itoa 0.4.8", "libc", ] @@ -3025,9 +3109,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83b2a3d4d9091d0abd7eba4dc2710b1718583bd4d8992e2190720ea38f391f7" +checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" dependencies = [ "tinyvec_macros", ] @@ -3040,11 +3124,10 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.13.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "588b2d10a336da58d877567cd8fb8a14b463e2104910f8132cd054b4b96e29ee" +checksum = "fbbf1c778ec206785635ce8ad57fe52b3009ae9e0c9f574a728f3049d3e55838" dependencies = [ - "autocfg", "bytes", "libc", "memchr", @@ -3059,9 +3142,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.5.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "114383b041aa6212c579467afa0075fbbdd0718de036100bc0ba7961d8cb9095" +checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" dependencies = [ "proc-macro2", "quote", @@ -3246,9 +3329,9 @@ dependencies = [ [[package]] name = "unsigned-varint" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f8d425fafb8cd76bc3f22aace4af471d3156301d7508f2107e98fbeae10bc7f" +checksum = "d86a8dc7f45e4c1b0d30e43038c38f274e77af056aa5f74b93c2cf9eb3c1c836" dependencies = [ "asynchronous-codec", "bytes", @@ -3481,12 +3564,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "wyz" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" - [[package]] name = "x25519-dalek" version = "1.1.1" @@ -3514,18 +3591,18 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf68b08513768deaa790264a7fac27a58cbf2705cfcdc9448362229217d7e970" +checksum = "d68d9dcec5f9b43a30d38c49f91dfedfaac384cb8f085faca366c26207dd1619" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.2.0" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdff2024a851a322b08f179173ae2ba620445aef1e838f0c196820eade4ae0c7" +checksum = "65f1a51723ec88c66d5d1fe80c841f17f63587d6691901d66be9bec6c3b51f73" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 3075dd0c42..aab7394fbe 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,8 @@ members = [ "bee-crypto", "bee-ledger", "bee-message", - "bee-network", + "bee-network/bee-autopeering", + "bee-network/bee-gossip", "bee-pow", "bee-protocol", "bee-runtime", diff --git a/bee-network/README.md b/bee-network/README.md deleted file mode 100644 index d50580d39c..0000000000 --- a/bee-network/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# bee-network - -Networking functionality and types for nodes and clients participating in the IOTA protocol built on top of `libp2p`. \ No newline at end of file diff --git a/bee-network/bee-autopeering/.gitignore b/bee-network/bee-autopeering/.gitignore new file mode 100644 index 0000000000..b3fafad7fb --- /dev/null +++ b/bee-network/bee-autopeering/.gitignore @@ -0,0 +1 @@ +peerstore/ \ No newline at end of file diff --git a/bee-network/bee-autopeering/CHANGELOG.md b/bee-network/bee-autopeering/CHANGELOG.md new file mode 100644 index 0000000000..560c4aa1f1 --- /dev/null +++ b/bee-network/bee-autopeering/CHANGELOG.md @@ -0,0 +1,31 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + + + +## 0.1.0 - 2021-12-03 + +### Added + +- Local entity and peer identities; +- Peer discovery; +- Neighbor selection; +- Packet/Message handling; +- Network I/O; +- Configuration; \ No newline at end of file diff --git a/bee-network/bee-autopeering/Cargo.toml b/bee-network/bee-autopeering/Cargo.toml new file mode 100644 index 0000000000..c86403fd3d --- /dev/null +++ b/bee-network/bee-autopeering/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "bee-autopeering" +version = "0.1.0" +authors = [ "IOTA Stiftung" ] +edition = "2021" +description = "Allows peers in the same IOTA network to automatically discover each other." +readme = "README.md" +repository = "https://github.com/iotaledger/bee" +license = "Apache-2.0" +keywords = [ "iota", "bee", "framework", "network", "autopeering" ] +homepage = "https://www.iota.org" + +[dependencies] +async-trait = { version = "0.1.51", default-features = false } +base64 = { version = "0.13.0", default-features = false, features = [ "alloc" ] } +bincode = { version = "1.3.3", default-features = false } +bs58 = { version = "0.4", default-features = false, features = [ "alloc" ] } +bytes = { version = "1.0", default-features = false } +hash32 = { version = "0.2.1", default-features = false } +hex = { version = "0.4.3", default-features = false } +iota-crypto = { version = "0.9.1", default-features = false, features = [ "ed25519", "random", "sha" ] } +libp2p-core = { version = "0.29.0", default-features = false } +log = { version = "0.4", default-features = false } +num = { version = "0.4.0", default-features = false } +num-derive = { version = "0.3.3", default-features = false } +num-traits = { version = "0.2.14" , default-features = false } +priority-queue = { version = "1.2.0" , default-features = false } +prost = { version = "0.8", default-features = false, features = [ "std" ] } +rand = { version = "0.8.4" , default-features = false } +ring = { version = "0.16.20" , default-features = false } +serde = { version = "1.0", default-features = false , features = [ "derive" ] } +sled = { version = "0.34.7", default-features = false } +thiserror = { version = "1.0.29", default-features = false } +tokio = { version = "1.11", default-features = false , features = [ "macros", "rt", "time", "net" ] } +tokio-stream = { version = "0.1", default-features = false } + +[dev-dependencies] +fern = { version = "0.6.0", default-features = false } +serde_json = { version = "1.0.68", default-features = false, features = [ "std" ] } +tokio = { version = "1.11.0", default-features = false, features = [ "rt", "rt-multi-thread", "macros", "signal", "time", "io-std", "io-util" ] } +toml = { version = "0.5.8", default-features = false } + +[build-dependencies] +prost-build = { version = "0.8", default-features = false } + +[[example]] +name = "node" + diff --git a/bee-network/LICENSE b/bee-network/bee-autopeering/LICENSE similarity index 100% rename from bee-network/LICENSE rename to bee-network/bee-autopeering/LICENSE diff --git a/bee-network/bee-autopeering/README.md b/bee-network/bee-autopeering/README.md new file mode 100644 index 0000000000..6c56e8329c --- /dev/null +++ b/bee-network/bee-autopeering/README.md @@ -0,0 +1,5 @@ +# bee-autopeering + +Allows peers in the same IOTA network to automatically discover each other. + +Read [`lib.rs`](src/lib.rs) for an example on how to use this crate. diff --git a/bee-network/bee-autopeering/build.rs b/bee-network/bee-autopeering/build.rs new file mode 100644 index 0000000000..a9b2524d76 --- /dev/null +++ b/bee-network/bee-autopeering/build.rs @@ -0,0 +1,20 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use std::io::Result; + +fn main() -> Result<()> { + prost_build::compile_protos( + &[ + "src/proto/discovery.proto", + "src/proto/peering.proto", + "src/proto/packet.proto", + "src/proto/peer.proto", + "src/proto/salt.proto", + "src/proto/service.proto", + ], + &["src/"], + )?; + + Ok(()) +} diff --git a/bee-network/bee-autopeering/examples/node.rs b/bee-network/bee-autopeering/examples/node.rs new file mode 100644 index 0000000000..72029973a7 --- /dev/null +++ b/bee-network/bee-autopeering/examples/node.rs @@ -0,0 +1,143 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +#![allow(warnings)] + +use bee_autopeering::{ + config::AutopeeringConfigJsonBuilder, + init, + stores::{InMemoryPeerStore, SledPeerStore, SledPeerStoreConfig}, + AutopeeringConfig, Event, Local, NeighborValidator, Peer, ServiceProtocol, AUTOPEERING_SERVICE_NAME, +}; + +use libp2p_core::identity::ed25519::Keypair; +use log::LevelFilter; +use serde_json::Value; +use tokio::signal::ctrl_c; +use tokio_stream::StreamExt; + +use std::{future::Future, io, net::SocketAddr, pin::Pin}; + +const AUTOPEERING_VERSION: u32 = 1; +const NETWORK_SERVICE_NAME: &str = "chrysalis-mainnet"; +const BS16_ED25519_PRIVATE_KEY: &str = "1858c941a1c4454f4df77be93481b66e5f4dcff885f30c6a6a4cb214d2fea21072e7dce6a4d4d7b89460e84bb0e3b4475b528524e7ceb741f7646400ef9f2c7b"; + +fn setup_logger(level: LevelFilter) { + fern::Dispatch::new() + .level(level) + .chain(io::stdout()) + .apply() + .expect("fern"); +} + +fn read_config() -> AutopeeringConfigJsonBuilder { + let config_json = r#" + { + "enabled": true, + "bindAddress": "0.0.0.0:14626", + "entryNodes": [ + "/dns/entry-hornet-0.h.chrysalis-mainnet.iotaledger.net/udp/14626/autopeering/iotaPHdAn7eueBnXtikZMwhfPXaeGJGXDt4RBuLuGgb", + "/dns/entry-hornet-1.h.chrysalis-mainnet.iotaledger.net/udp/14626/autopeering/iotaJJqMd5CQvv1A61coSQCYW9PNT1QKPs7xh2Qg5K2", + "/dns/entry-mainnet.tanglebay.com/udp/14626/autopeering/iot4By1FD4pFLrGJ6AAe7YEeSu9RbW9xnPUmxMdQenC" + ] + }"#; + + // "entryNodesPreferIPv6": false, + // "runAsEntryNode": false + serde_json::from_str(config_json).expect("error deserializing json config") +} + +#[tokio::main] +async fn main() { + // Set up logger. + setup_logger(LevelFilter::Info); + + // Read the config from a JSON file/string. + let config = read_config().finish(); + println!("{:#?}", config); + + // Set up a local peer, that provides the Autopeering service. + let mut keypair = hex::decode(BS16_ED25519_PRIVATE_KEY).expect("error decoding keypair"); + let local = Local::from_keypair(Keypair::decode(&mut keypair).expect("error decoding keypair")) + .expect("error creating local"); + + local.add_service( + AUTOPEERING_SERVICE_NAME, + ServiceProtocol::Udp, + config.bind_addr().port(), + ); + local.add_service(NETWORK_SERVICE_NAME, ServiceProtocol::Tcp, 15600); + + // Network parameters. + let version = AUTOPEERING_VERSION; + let network_name = NETWORK_SERVICE_NAME; + + // Storage config. + // No config is necessary for the `InMemoryPeerStore`. + // let peer_store_config = (); + + // Sled peer store: + let peer_store_config = SledPeerStoreConfig::new().path("./peerstore"); + + // Neighbor validator. + let neighbor_validator = GossipNeighborValidator {}; + + // Shutdown signal. + let term_signal = ctrl_c(); + + // Initialize the Autopeering service. + let mut event_rx = bee_autopeering::init::( + config.clone(), + version, + network_name, + local, + peer_store_config, + term_signal, + neighbor_validator, + ) + .await + .expect("initializing autopeering system failed"); + + // Print to what IP addresses the entry nodes resolved to. + print_resolved_entry_nodes(config).await; + + // Enter event loop. + 'recv: loop { + tokio::select! { + e = event_rx.recv() => { + if let Some(event) = e { + handle_event(event); + } else { + break 'recv; + } + } + }; + } +} + +fn handle_event(event: Event) { + log::info!("{}", event); +} + +async fn print_resolved_entry_nodes(config: AutopeeringConfig) { + let entry_nodes = config.into_entry_nodes(); + for mut entry_node_addr in entry_nodes { + if entry_node_addr.resolve_dns().await { + let resolved_addrs = entry_node_addr.resolved_addrs(); + for resolved_addr in resolved_addrs { + println!("{} ---> {}", entry_node_addr.address(), resolved_addr); + } + } else { + println!("{} ---> unresolvable", entry_node_addr.address()); + } + } +} + +#[derive(Clone)] +struct GossipNeighborValidator {} + +impl NeighborValidator for GossipNeighborValidator { + fn is_valid(&self, peer: &Peer) -> bool { + peer.has_service(NETWORK_SERVICE_NAME) + } +} diff --git a/bee-network/bee-autopeering/src/config.rs b/bee-network/bee-autopeering/src/config.rs new file mode 100644 index 0000000000..78b0202192 --- /dev/null +++ b/bee-network/bee-autopeering/src/config.rs @@ -0,0 +1,328 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Autopeering configuration. +//! +//! ## JSON Example +//! +//! ```json +//! "autopeering": { +//! "enabled": true, +//! "bindAddress": "0.0.0.0:14626", +//! "entryNodes": [ +//! "/dns/entry-hornet-0.h.chrysalis-mainnet.iotaledger.net/udp/14626/autopeering/iotaPHdAn7eueBnXtikZMwhfPXaeGJGXDt4RBuLuGgb", +//! "/dns/entry-hornet-1.h.chrysalis-mainnet.iotaledger.net/udp/14626/autopeering/iotaJJqMd5CQvv1A61coSQCYW9PNT1QKPs7xh2Qg5K2", +//! "/dns/entry-mainnet.tanglebay.com/udp/14626/autopeering/iot4By1FD4pFLrGJ6AAe7YEeSu9RbW9xnPUmxMdQenC" +//! ], +//! "entryNodesPreferIPv6": true, +//! } +//! ``` +//! +//! ## TOML Example +//! +//! ```toml +//! [autopeering] +//! enabled = true +//! bind_address = "0.0.0.0:14626" +//! entry_nodes = [ +//! "/dns/entry-hornet-0.h.chrysalis-mainnet.iotaledger.net/udp/14626/autopeering/iotaPHdAn7eueBnXtikZMwhfPXaeGJGXDt4RBuLuGgb", +//! "/dns/entry-hornet-1.h.chrysalis-mainnet.iotaledger.net/udp/14626/autopeering/iotaJJqMd5CQvv1A61coSQCYW9PNT1QKPs7xh2Qg5K2", +//! "/dns/entry-mainnet.tanglebay.com/udp/14626/autopeering/iot4By1FD4pFLrGJ6AAe7YEeSu9RbW9xnPUmxMdQenC" +//! ] +//! entry_nodes_prefer_ipv6 = true +//! ``` + +use crate::multiaddr::AutopeeringMultiaddr; + +use serde::{Deserialize, Serialize}; + +use std::{ + fmt::Debug, + net::{IpAddr, Ipv4Addr, SocketAddr}, +}; + +const AUTOPEERING_ENABLED_DEFAULT: bool = false; +// TODO: watch out for possible constification regarding `SocketAddr::new()`. +const AUTOPEERING_BIND_ADDR_DEFAULT: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); +const AUTOPEERING_BIND_PORT_DEFAULT: u16 = 14626; +const ENTRYNODES_PREFER_IPV6_DEFAULT: bool = false; +const RUN_AS_ENTRYNODE_DEFAULT: bool = false; +const DROP_NEIGHBORS_ON_SALT_UPDATE_DEFAULT: bool = false; + +/// The autopeering config. +#[derive(Clone, Debug)] +pub struct AutopeeringConfig { + enabled: bool, + bind_addr: SocketAddr, + entry_nodes: Vec, + entry_nodes_prefer_ipv6: bool, + run_as_entry_node: bool, + drop_neighbors_on_salt_update: bool, +} + +impl AutopeeringConfig { + /// Whether autopeering should be enabled. + pub fn enabled(&self) -> bool { + self.enabled + } + + /// The bind address for the server. + pub fn bind_addr(&self) -> SocketAddr { + self.bind_addr + } + + /// The entry nodes for bootstrapping. + pub fn entry_nodes(&self) -> &[AutopeeringMultiaddr] { + &self.entry_nodes + } + + /// Whether `Ipv4` or `Ipv6` should be preferred in case a hostname supports both. + pub fn entry_nodes_prefer_ipv6(&self) -> bool { + self.entry_nodes_prefer_ipv6 + } + + /// Whether the node should run as an entry node. + pub fn run_as_entry_node(&self) -> bool { + self.run_as_entry_node + } + + /// Whether all neighbors should be disconnected from when the salts are updated. + pub fn drop_neighbors_on_salt_update(&self) -> bool { + self.drop_neighbors_on_salt_update + } + + /// Reduces this config to its list of entry node addresses. + pub fn into_entry_nodes(self) -> Vec { + self.entry_nodes + } + + /// Turns the [`AutopeeringConfig`] into its JSON representation. + pub fn into_json_config(self) -> AutopeeringConfigJsonBuilder { + AutopeeringConfigJsonBuilder { + enabled: self.enabled, + bind_addr: self.bind_addr, + entry_nodes: self.entry_nodes, + entry_nodes_prefer_ipv6: Some(self.entry_nodes_prefer_ipv6), + run_as_entry_node: Some(self.run_as_entry_node), + drop_neighbors_on_salt_update: Some(self.drop_neighbors_on_salt_update), + } + } + + /// Turns the [`AutopeeringConfig`] into its TOML representation. + pub fn into_toml_config(self) -> AutopeeringConfigTomlBuilder { + AutopeeringConfigTomlBuilder { + enabled: self.enabled, + bind_addr: self.bind_addr, + entry_nodes: self.entry_nodes, + entry_nodes_prefer_ipv6: Some(self.entry_nodes_prefer_ipv6), + run_as_entry_node: Some(self.run_as_entry_node), + drop_neighbors_on_salt_update: Some(self.drop_neighbors_on_salt_update), + } + } +} + +// Note: In case someone wonders why we use `Option`: Although serde actually provides a way to allow for the +// default of a boolean parameter to be `true` - so that missing config parameters could be created on the fly - it felt +// too awkward and also a bit too cumbersome to me: serde(default = "default_providing_function_name"). + +/// The autopeering config JSON builder. +/// +/// Note: Fields will be camel-case formatted. +#[derive(Clone, Debug, Deserialize, Serialize)] +#[cfg_attr(test, derive(Eq, PartialEq))] +#[serde(rename = "autopeering")] +pub struct AutopeeringConfigJsonBuilder { + /// Whether autopeering should be enabled. + pub enabled: bool, + /// The bind address for the server. + #[serde(rename = "bindAddress")] + pub bind_addr: SocketAddr, + /// The entry nodes for bootstrapping. + #[serde(rename = "entryNodes")] + pub entry_nodes: Vec, + /// Whether `Ipv4` or `Ipv6` should be preferred in case a hostname supports both. + #[serde(rename = "entryNodesPreferIPv6")] + pub entry_nodes_prefer_ipv6: Option, + /// Whether the node should run as an entry node. + #[serde(rename = "runAsEntryNode")] + pub run_as_entry_node: Option, + /// Whether all neighbors should be disconnected from when the salts are updated. + #[serde(rename = "dropNeighborsOnSaltUpdate")] + pub drop_neighbors_on_salt_update: Option, +} + +impl AutopeeringConfigJsonBuilder { + /// Builds the actual `AutopeeringConfig`. + pub fn finish(self) -> AutopeeringConfig { + AutopeeringConfig { + enabled: self.enabled, + bind_addr: self.bind_addr, + entry_nodes: self.entry_nodes, + entry_nodes_prefer_ipv6: self.entry_nodes_prefer_ipv6.unwrap_or(ENTRYNODES_PREFER_IPV6_DEFAULT), + run_as_entry_node: self.run_as_entry_node.unwrap_or(RUN_AS_ENTRYNODE_DEFAULT), + drop_neighbors_on_salt_update: self + .drop_neighbors_on_salt_update + .unwrap_or(DROP_NEIGHBORS_ON_SALT_UPDATE_DEFAULT), + } + } +} + +impl Default for AutopeeringConfigJsonBuilder { + fn default() -> Self { + Self { + enabled: AUTOPEERING_ENABLED_DEFAULT, + bind_addr: SocketAddr::new(AUTOPEERING_BIND_ADDR_DEFAULT, AUTOPEERING_BIND_PORT_DEFAULT), + entry_nodes: Vec::default(), + entry_nodes_prefer_ipv6: Some(ENTRYNODES_PREFER_IPV6_DEFAULT), + run_as_entry_node: Some(RUN_AS_ENTRYNODE_DEFAULT), + drop_neighbors_on_salt_update: Some(DROP_NEIGHBORS_ON_SALT_UPDATE_DEFAULT), + } + } +} + +/// The autopeering config TOML builder. +/// +/// Note: Fields will be snake-case formatted. +#[derive(Clone, Debug, Deserialize, Serialize)] +#[cfg_attr(test, derive(Eq, PartialEq))] +#[serde(rename = "autopeering")] +pub struct AutopeeringConfigTomlBuilder { + /// Wether autopeering should be enabled. + pub enabled: bool, + /// The bind address for the server. + #[serde(rename = "bind_address")] + pub bind_addr: SocketAddr, + /// The entry nodes for bootstrapping. + pub entry_nodes: Vec, + /// Whether `Ipv4` or `Ipv6` should be preferred in case a hostname supports both. + pub entry_nodes_prefer_ipv6: Option, + /// Whether the node should run as an entry node. + pub run_as_entry_node: Option, + /// Whether all neighbors should be disconnected from when the salts are updated. + pub drop_neighbors_on_salt_update: Option, +} + +impl AutopeeringConfigTomlBuilder { + /// Builds the actual `AutopeeringConfig`. + pub fn finish(self) -> AutopeeringConfig { + AutopeeringConfig { + enabled: self.enabled, + bind_addr: self.bind_addr, + entry_nodes: self.entry_nodes, + entry_nodes_prefer_ipv6: self.entry_nodes_prefer_ipv6.unwrap_or(ENTRYNODES_PREFER_IPV6_DEFAULT), + run_as_entry_node: self.run_as_entry_node.unwrap_or(RUN_AS_ENTRYNODE_DEFAULT), + drop_neighbors_on_salt_update: self + .drop_neighbors_on_salt_update + .unwrap_or(DROP_NEIGHBORS_ON_SALT_UPDATE_DEFAULT), + } + } +} + +impl Default for AutopeeringConfigTomlBuilder { + fn default() -> Self { + Self { + enabled: AUTOPEERING_ENABLED_DEFAULT, + bind_addr: SocketAddr::new(AUTOPEERING_BIND_ADDR_DEFAULT, AUTOPEERING_BIND_PORT_DEFAULT), + entry_nodes: Vec::default(), + entry_nodes_prefer_ipv6: Some(ENTRYNODES_PREFER_IPV6_DEFAULT), + run_as_entry_node: Some(RUN_AS_ENTRYNODE_DEFAULT), + drop_neighbors_on_salt_update: Some(DROP_NEIGHBORS_ON_SALT_UPDATE_DEFAULT), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::fmt; + + impl fmt::Display for AutopeeringConfigJsonBuilder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + serde_json::to_string_pretty(self).fmt(f) + } + } + + impl fmt::Display for AutopeeringConfigTomlBuilder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + toml::to_string_pretty(self).fmt(f) + } + } + + fn create_json_config_from_str() -> AutopeeringConfigJsonBuilder { + let config_json_str = r#" + { + "enabled": true, + "bindAddress": "0.0.0.0:14626", + "entryNodes": [ + "/dns/lucamoser.ch/udp/14826/autopeering/4H6WV54tB29u8xCcEaMGQMn37LFvM1ynNpp27TTXaqNM", + "/dns/entry-hornet-0.h.chrysalis-mainnet.iotaledger.net/udp/14626/autopeering/iotaPHdAn7eueBnXtikZMwhfPXaeGJGXDt4RBuLuGgb", + "/dns/entry-hornet-1.h.chrysalis-mainnet.iotaledger.net/udp/14626/autopeering/iotaJJqMd5CQvv1A61coSQCYW9PNT1QKPs7xh2Qg5K2", + "/dns/entry-mainnet.tanglebay.com/udp/14626/autopeering/iot4By1FD4pFLrGJ6AAe7YEeSu9RbW9xnPUmxMdQenC" + ], + "entryNodesPreferIPv6": true, + "runAsEntryNode": false, + "dropNeighborsOnSaltUpdate": false + }"#; + + serde_json::from_str(config_json_str).expect("error deserializing json config") + } + + fn create_toml_config_from_str() -> AutopeeringConfigTomlBuilder { + let toml_config_str = r#" + enabled = true + bind_address = "0.0.0.0:14626" + entry_nodes = [ + "/dns/lucamoser.ch/udp/14826/autopeering/4H6WV54tB29u8xCcEaMGQMn37LFvM1ynNpp27TTXaqNM", + "/dns/entry-hornet-0.h.chrysalis-mainnet.iotaledger.net/udp/14626/autopeering/iotaPHdAn7eueBnXtikZMwhfPXaeGJGXDt4RBuLuGgb", + "/dns/entry-hornet-1.h.chrysalis-mainnet.iotaledger.net/udp/14626/autopeering/iotaJJqMd5CQvv1A61coSQCYW9PNT1QKPs7xh2Qg5K2", + "/dns/entry-mainnet.tanglebay.com/udp/14626/autopeering/iot4By1FD4pFLrGJ6AAe7YEeSu9RbW9xnPUmxMdQenC" + ] + entry_nodes_prefer_ipv6 = true + run_as_entry_node = false + drop_neighbors_on_salt_update = false + "#; + + toml::from_str(toml_config_str).unwrap() + } + + fn create_config() -> AutopeeringConfig { + AutopeeringConfig { + enabled: true, + bind_addr: "0.0.0.0:14626".parse().unwrap(), + entry_nodes: vec![ + "/dns/lucamoser.ch/udp/14826/autopeering/4H6WV54tB29u8xCcEaMGQMn37LFvM1ynNpp27TTXaqNM".parse().unwrap(), + "/dns/entry-hornet-0.h.chrysalis-mainnet.iotaledger.net/udp/14626/autopeering/iotaPHdAn7eueBnXtikZMwhfPXaeGJGXDt4RBuLuGgb".parse().unwrap(), + "/dns/entry-hornet-1.h.chrysalis-mainnet.iotaledger.net/udp/14626/autopeering/iotaJJqMd5CQvv1A61coSQCYW9PNT1QKPs7xh2Qg5K2".parse().unwrap(), + "/dns/entry-mainnet.tanglebay.com/udp/14626/autopeering/iot4By1FD4pFLrGJ6AAe7YEeSu9RbW9xnPUmxMdQenC".parse().unwrap(), + ], + entry_nodes_prefer_ipv6: true, + run_as_entry_node: false, + drop_neighbors_on_salt_update: false, + } + } + + /// Tests config serialization and deserialization. + #[test] + fn config_serde() { + // Create format dependent configs from their respective string representation. + let json_config = create_json_config_from_str(); + let toml_config = create_toml_config_from_str(); + + // Manually create an instance of a config. + let config = create_config(); + + // Compare whether the deserialized JSON str equals the JSON-serialized config instance. + assert_eq!( + json_config, + config.clone().into_json_config(), + "json config de/serialization failed" + ); + + // Compare whether the deserialized TOML str equals the TOML-serialized config instance. + assert_eq!( + toml_config, + config.into_toml_config(), + "toml config de/serialization failed" + ); + } +} diff --git a/bee-network/bee-autopeering/src/delay.rs b/bee-network/bee-autopeering/src/delay.rs new file mode 100644 index 0000000000..88eec12ce9 --- /dev/null +++ b/bee-network/bee-autopeering/src/delay.rs @@ -0,0 +1,42 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use std::{ + sync::atomic::{AtomicU64, Ordering}, + time::Duration, +}; + +pub(crate) type Delay = Duration; + +pub(crate) struct ManualDelayFactory(AtomicU64); + +impl ManualDelayFactory { + /// Creates a new `ManualDelayFactory` from an initial delay. + pub(crate) const fn new(initial_delay: Delay) -> Self { + Self(AtomicU64::new(delay_to_millis(initial_delay))) + } + + /// Defines the delays produced by the factory. + /// + /// There's no corresponding `get` method. Use the `next` method ([`Iterator`] trait impl) to access them. + pub(crate) fn set(&self, delay: Delay) { + self.0.store(delay_to_millis(delay), Ordering::Relaxed); + } +} + +const fn delay_to_millis(delay: Delay) -> u64 { + // Type cast: for all practical purposes this should be fine. + delay.as_millis() as u64 +} + +const fn millis_to_delay(millis: u64) -> Delay { + Delay::from_millis(millis) +} + +impl Iterator for ManualDelayFactory { + type Item = Delay; + + fn next(&mut self) -> Option { + Some(millis_to_delay(self.0.load(Ordering::Relaxed))) + } +} diff --git a/bee-network/bee-autopeering/src/discovery/manager.rs b/bee-network/bee-autopeering/src/discovery/manager.rs new file mode 100644 index 0000000000..dcecf15845 --- /dev/null +++ b/bee-network/bee-autopeering/src/discovery/manager.rs @@ -0,0 +1,997 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + config::AutopeeringConfig, + discovery::messages::{DiscoveryRequest, DiscoveryResponse, VerificationRequest, VerificationResponse}, + event::{Event, EventTx}, + hash::message_hash, + local::{ + services::{ServiceMap, ServicePort, ServiceProtocol, AUTOPEERING_SERVICE_NAME}, + Local, + }, + multiaddr::{AddressKind, AutopeeringMultiaddr}, + packet::{IncomingPacket, MessageType, OutgoingPacket}, + peer::{ + self, + lists::{ActivePeer, ActivePeersList, EntryPeersList, ReplacementPeersList}, + peer_id::PeerId, + stores::PeerStore, + Peer, + }, + request::{self, RequestManager, RequestValue, ResponseTx, RESPONSE_TIMEOUT}, + server::{ServerRx, ServerSocket, ServerTx}, + task::{Runnable, ShutdownRx, TaskManager}, + time::{HOUR, SECOND}, +}; + +use rand::{seq::index, Rng as _}; + +use std::{net::SocketAddr, time::Duration}; + +// Time interval after which the next peer is reverified. +pub(crate) const DEFAULT_REVERIFY_INTERVAL: Duration = Duration::from_secs(10 * SECOND); +// Time interval after which peers are queried for new peers. +pub(crate) const DEFAULT_QUERY_INTERVAL: Duration = Duration::from_secs(60 * SECOND); +// Is the time until a peer verification expires (12 hours). +pub(crate) const VERIFICATION_EXPIRATION: Duration = Duration::from_secs(12 * HOUR); +// Is the maximum number of peers returned in DiscoveryResponse. +const MAX_PEERS_IN_RESPONSE: usize = 6; +// Is the minimum number of verifications required to be selected in DiscoveryResponse. +const MIN_VERIFIED_IN_RESPONSE: usize = 1; +// Is the maximum number of services a peer can support. + +pub(crate) struct DiscoveryManagerConfig { + pub(crate) entry_nodes: Vec, + pub(crate) entry_nodes_prefer_ipv6: bool, + pub(crate) version: u32, + pub(crate) network_id: u32, +} + +impl DiscoveryManagerConfig { + pub fn new(config: &AutopeeringConfig, version: u32, network_id: u32) -> Self { + Self { + entry_nodes: config.entry_nodes().to_vec(), + entry_nodes_prefer_ipv6: config.entry_nodes_prefer_ipv6(), + version, + network_id, + } + } +} + +pub(crate) struct DiscoveryManager { + // Config. + config: DiscoveryManagerConfig, + // The local id to sign outgoing packets. + local: Local, + // Channel halves for sending/receiving discovery related packets. + socket: ServerSocket, + // Handles incoming and outgoing requests. + request_mngr: RequestManager, + // Publishes discovery related events. + event_tx: EventTx, + // The storage for discovered peers. + peer_store: S, + // The list of entry peers. + entry_peers: EntryPeersList, + // The list of managed peers. + active_peers: ActivePeersList, + // The list of replacement peers. + replacements: ReplacementPeersList, +} + +impl DiscoveryManager { + #[allow(clippy::too_many_arguments)] + pub(crate) fn new( + config: DiscoveryManagerConfig, + local: Local, + socket: ServerSocket, + request_mngr: RequestManager, + peer_store: S, + entry_peers: EntryPeersList, + active_peers: ActivePeersList, + replacements: ReplacementPeersList, + event_tx: EventTx, + ) -> Self { + Self { + config, + local, + socket, + request_mngr, + event_tx, + peer_store, + entry_peers, + active_peers, + replacements, + } + } + + pub async fn init(self, task_mngr: &mut TaskManager) { + let DiscoveryManager { + config, + local, + socket, + request_mngr, + event_tx, + peer_store, + entry_peers, + active_peers, + replacements, + } = self; + + let DiscoveryManagerConfig { + mut entry_nodes, + entry_nodes_prefer_ipv6, + version, + network_id, + } = config; + + let ServerSocket { server_rx, server_tx } = socket; + + // Add previously discovered peers from the peer store. + add_peers_from_store(&peer_store, &active_peers, &replacements); + + // Add entry peers from the config. + add_entry_peers( + &mut entry_nodes, + entry_nodes_prefer_ipv6, + &local, + &entry_peers, + &active_peers, + &replacements, + ) + .await; + + let discovery_recv_handler = DiscoveryRecvHandler { + server_tx: server_tx.clone(), + server_rx, + local: local.clone(), + version, + network_id, + request_mngr: request_mngr.clone(), + event_tx, + active_peers: active_peers.clone(), + replacements, + }; + + task_mngr.run::(discovery_recv_handler); + } +} + +struct DiscoveryRecvHandler { + server_rx: ServerRx, + server_tx: ServerTx, + local: Local, + version: u32, + network_id: u32, + request_mngr: RequestManager, + event_tx: EventTx, + active_peers: ActivePeersList, + replacements: ReplacementPeersList, +} + +#[async_trait::async_trait] +impl Runnable for DiscoveryRecvHandler { + const NAME: &'static str = "DiscoveryRecvHandler"; + const SHUTDOWN_PRIORITY: u8 = 4; + + type ShutdownSignal = ShutdownRx; + + async fn run(self, mut shutdown_rx: Self::ShutdownSignal) { + let DiscoveryRecvHandler { + mut server_rx, + server_tx, + local, + version, + network_id, + request_mngr, + event_tx, + active_peers, + replacements, + .. + } = self; + + 'recv: loop { + tokio::select! { + _ = &mut shutdown_rx => { + break; + } + p = server_rx.recv() => { + if let Some(IncomingPacket { + msg_type, + msg_bytes, + peer_addr, + peer_id, + }) = p + { + let ctx = RecvContext { + peer_id: &peer_id, + msg_bytes: &msg_bytes, + server_tx: &server_tx, + local: &local, + request_mngr: &request_mngr, + peer_addr, + event_tx: &event_tx, + active_peers: &active_peers, + replacements: &replacements, + }; + + match msg_type { + MessageType::VerificationRequest => { + let verif_req = if let Ok(verif_req) = VerificationRequest::from_protobuf(&msg_bytes) { + verif_req + } else { + log::warn!("Error decoding verification request from {}.", &peer_id); + continue 'recv; + }; + + if let Err(e) = validate_verification_request(&verif_req, version, network_id) { + log::warn!("Received invalid verification request from {}. Reason: {}", &peer_id, e); + continue 'recv; + } else { + log::trace!("Received valid verification request from {}.", &peer_id); + + handle_verification_request(verif_req, ctx); + } + } + MessageType::VerificationResponse => { + let verif_res = if let Ok(verif_res) = VerificationResponse::from_protobuf(&msg_bytes) { + verif_res + } else { + log::warn!("Error decoding verification response from {}.", &peer_id); + continue 'recv; + }; + + match validate_verification_response(&verif_res, &request_mngr, &peer_id, peer_addr) { + Ok(verif_reqval) => { + log::trace!("Received valid verification response from {}.", &peer_id); + + handle_verification_response(verif_res, verif_reqval, ctx); + } + Err(e) => { + log::warn!("Received invalid verification response from {}. Reason: {:?}", &peer_id, e); + continue 'recv; + } + } + } + MessageType::DiscoveryRequest => { + let disc_req = if let Ok(disc_req) = DiscoveryRequest::from_protobuf(&msg_bytes) { + disc_req + } else { + log::warn!("Error decoding discovery request from {}.", &peer_id); + continue 'recv; + }; + + if let Err(e) = validate_discovery_request(&disc_req) { + log::warn!("Received invalid discovery request from {}. Reason: {:?}", &peer_id, e); + continue 'recv; + } else { + log::trace!("Received valid discovery request from {}.", &peer_id); + + handle_discovery_request(disc_req, ctx); + } + } + MessageType::DiscoveryResponse => { + let disc_res = if let Ok(disc_res) = DiscoveryResponse::from_protobuf(&msg_bytes) { + disc_res + } else { + log::warn!("Error decoding discovery response from {}.", &peer_id); + continue 'recv; + }; + + match validate_discovery_response(&disc_res, &request_mngr, &peer_id) { + Ok(disc_reqval) => { + log::trace!("Received valid discovery response from {}.", &peer_id); + + handle_discovery_response(disc_res, disc_reqval, ctx); + } + Err(e) => { + log::warn!("Received invalid discovery response from {}. Reason: {:?}", &peer_id, e); + continue 'recv; + } + } + } + _ => log::warn!("Received unsupported discovery message type"), + } + } + } + } + } + } +} + +fn add_peers_from_store( + peer_store: &S, + active_peers: &ActivePeersList, + replacements: &ReplacementPeersList, +) { + let mut num_added = 0; + + let mut write = active_peers.write(); + for active_peer in peer_store.fetch_all_active() { + if write.insert(active_peer) { + num_added += 1; + } + } + drop(write); + + let mut write = replacements.write(); + for replacement in peer_store.fetch_all_replacements() { + if write.insert(replacement) { + num_added += 1; + } + } + drop(write); + + log::debug!("Restored {} peer/s.", num_added); +} + +async fn add_entry_peers( + entry_nodes: &mut Vec, + entry_nodes_prefer_ipv6: bool, + local: &Local, + entry_peers: &EntryPeersList, + active_peers: &ActivePeersList, + replacements: &ReplacementPeersList, +) { + let mut num_added = 0; + + for entry_addr in entry_nodes { + let entry_socketaddr = match entry_addr.address_kind() { + AddressKind::Ip4 | AddressKind::Ip6 => { + // Panic: for those address kinds the returned option is always `Some`. + entry_addr.socket_addr().unwrap() + } + AddressKind::Dns => { + if entry_addr.resolve_dns().await { + let entry_socketaddrs = entry_addr.resolved_addrs(); + let has_ip4 = entry_socketaddrs.iter().position(SocketAddr::is_ipv4); + let has_ip6 = entry_socketaddrs.iter().position(SocketAddr::is_ipv6); + + match (has_ip4, has_ip6) { + // Only IP4 or only IP6 + (Some(index), None) | (None, Some(index)) => entry_socketaddrs[index], + // Both are available + (Some(index1), Some(index2)) => { + if entry_nodes_prefer_ipv6 { + entry_socketaddrs[index2] + } else { + entry_socketaddrs[index1] + } + } + // Both being None is not possible. + _ => unreachable!(), + } + } else { + // Ignore that entry node. + continue; + } + } + }; + + let mut peer = Peer::new(entry_socketaddr.ip(), *entry_addr.public_key()); + peer.add_service(AUTOPEERING_SERVICE_NAME, ServiceProtocol::Udp, entry_socketaddr.port()); + + entry_peers.write().insert(*peer.peer_id()); + + // Also add it as a regular peer. + if let Some(peer_id) = add_peer::(peer, local, active_peers, replacements) { + log::debug!("Added {}.", peer_id); + num_added += 1; + } + } + + log::debug!("Added {} entry node/s.", num_added); +} + +/// Attempts to add a new peer to a peer list (preferably as active). +/// If the peer is added inbound, i.e. , the "last verification timestamp" is added. +pub(crate) fn add_peer( + peer: Peer, + local: &Local, + active_peers: &ActivePeersList, + replacements: &ReplacementPeersList, +) -> Option { + // Only add new peers. + if peer::is_known(peer.peer_id(), local, active_peers, replacements) { + None + } else { + let peer_id = *peer.peer_id(); + // First try to add it to the active peer list. If that list is full, add it to the replacement list. + if !active_peers.read().is_full() { + let active_peer = if ON_REQUEST { + let mut active = ActivePeer::from(peer); + active.metrics_mut().set_last_verif_request_timestamp(); + active + } else { + ActivePeer::from(peer) + }; + if active_peers.write().insert(active_peer) { + Some(peer_id) + } else { + None + } + } else if replacements.write().insert(peer) { + Some(peer_id) + } else { + None + } + } +} + +// Note: this function is dead-lock danger zone! +/// Deletes a peer from the active peerlist if it's not an entry peer, and replaces it by a peer +/// from the replacement list. +pub(crate) fn remove_peer_from_active_list( + peer_id: &PeerId, + entry_peers: &EntryPeersList, + active_peers: &ActivePeersList, + replacements: &ReplacementPeersList, + event_tx: &EventTx, +) { + let mut active_peers = active_peers.write(); + + if let Some(mut removed_peer) = active_peers.remove(peer_id) { + // entry peers are never removed + if entry_peers.read().contains(removed_peer.peer_id()) { + // hive.go: reset verifiedCount and re-add them + removed_peer.metrics_mut().reset_verified_count(); + active_peers.insert(removed_peer); + } else { + // TODO: why is the event only triggered for verified peers? + // ```go + // if mp.verifiedCount.Load() > 0 { + // m.events.PeerDeleted.Trigger(&DeletedEvent{Peer: unwrapPeer(mp)}) + // } + // ``` + if removed_peer.metrics().verified_count() > 0 { + // Panic: we don't allow channel send errors. + event_tx + .send(Event::PeerDeleted { peer_id: *peer_id }) + .expect("error sending `PeerDeleted` event"); + } + + // ```go + // if len(m.replacements) > 0 { + // var r *mpeer + // m.replacements, r = deletePeer(m.replacements, rand.Intn(len(m.replacements))) + // m.active = pushPeer(m.active, r, maxManaged) + // } + // ``` + // Pick a random peer from the replacement list (if not empty) + if !replacements.read().is_empty() { + let index = rand::thread_rng().gen_range(0..replacements.read().len()); + // Panic: unwrapping is fine, because we checked that the list isn't empty, and `index` must be in + // range. + let peer = replacements.write().remove_at(index).unwrap(); + + active_peers.insert(peer.into()); + } + } + } +} + +pub(crate) struct RecvContext<'a> { + peer_id: &'a PeerId, + msg_bytes: &'a [u8], + server_tx: &'a ServerTx, + local: &'a Local, + request_mngr: &'a RequestManager, + peer_addr: SocketAddr, + event_tx: &'a EventTx, + active_peers: &'a ActivePeersList, + replacements: &'a ReplacementPeersList, +} + +/////////////////////////////////////////////////////////////////////////////////////////////////////////// +// VALIDATION +/////////////////////////////////////////////////////////////////////////////////////////////////////////// + +#[derive(Debug, Clone, Copy, thiserror::Error)] +pub(crate) enum ValidationError { + // The protocol version must match. + #[error("version mismatch; expected: {expected}, received: {received}")] + VersionMismatch { expected: u32, received: u32 }, + // The network id must match. + #[error("network id mismatch; expected: {expected}, received: {received}")] + NetworkIdMismatch { expected: u32, received: u32 }, + // The request must not be expired. + #[error("request expired")] + RequestExpired, + // The response must arrive in time. + #[error("no corresponding request, or timeout")] + NoCorrespondingRequestOrTimeout, + // The hash of the corresponding request must be correct. + #[error("incorrect request hash")] + IncorrectRequestHash, + // The peer must have an autopeering service. + #[error("no autopeering service")] + NoAutopeeringService, + // The service port must match with the detected port. + #[error("service port mismatch; expected: {expected}, received: {received}")] + ServicePortMismatch { + expected: ServicePort, + received: ServicePort, + }, +} + +fn validate_verification_request( + verif_req: &VerificationRequest, + version: u32, + network_id: u32, +) -> Result<(), ValidationError> { + use ValidationError::*; + + if verif_req.version() != version { + Err(VersionMismatch { + expected: version, + received: verif_req.version(), + }) + } else if verif_req.network_id() != network_id { + Err(NetworkIdMismatch { + expected: network_id, + received: verif_req.network_id(), + }) + } else if request::is_expired(verif_req.timestamp()) { + Err(RequestExpired) + } else { + // NOTE: the validity of the transmitted source and target addresses is ensured through the + // `VerificationRequest` type. + // TODO: maybe add check whether the peer sent the correct source address in the packet. + // TODO: store own external IP address as perceived by the peer + Ok(()) + } +} + +fn validate_verification_response( + verif_res: &VerificationResponse, + request_mngr: &RequestManager, + peer_id: &PeerId, + source_socket_addr: SocketAddr, +) -> Result { + use ValidationError::*; + + if let Some(reqv) = request_mngr.write().remove::(peer_id) { + if verif_res.request_hash() == reqv.request_hash { + let res_services = verif_res.services(); + if let Some(autopeering_svc) = res_services.get(AUTOPEERING_SERVICE_NAME) { + if autopeering_svc.port() == source_socket_addr.port() { + Ok(reqv) + } else { + Err(ServicePortMismatch { + expected: source_socket_addr.port(), + received: autopeering_svc.port(), + }) + } + } else { + Err(NoAutopeeringService) + } + } else { + Err(IncorrectRequestHash) + } + } else { + Err(NoCorrespondingRequestOrTimeout) + } +} + +fn validate_discovery_request(disc_req: &DiscoveryRequest) -> Result<(), ValidationError> { + use ValidationError::*; + + if request::is_expired(disc_req.timestamp()) { + Err(RequestExpired) + } else { + Ok(()) + } +} + +fn validate_discovery_response( + disc_res: &DiscoveryResponse, + request_mngr: &RequestManager, + peer_id: &PeerId, +) -> Result { + use ValidationError::*; + + if let Some(reqv) = request_mngr.write().remove::(peer_id) { + if disc_res.request_hash() == &reqv.request_hash[..] { + // TODO: consider performing some checks on the peers we received, for example: + // * does the peer have necessary services (autopeering, gossip, fpc, ...) + // * is the ip address valid (not a 0.0.0.0, etc) + // for peer in disc_res.peers() {} + + Ok(reqv) + } else { + Err(IncorrectRequestHash) + } + } else { + Err(NoCorrespondingRequestOrTimeout) + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////////////// +// HANDLING +/////////////////////////////////////////////////////////////////////////////////////////////////////////// + +fn handle_verification_request(verif_req: VerificationRequest, ctx: RecvContext) { + log::trace!("Handling verification request."); + + // In any case send a response. + send_verification_response_to_addr( + ctx.peer_addr, + ctx.peer_id, + &verif_req, + ctx.msg_bytes, + ctx.server_tx, + ctx.local, + ); + + // Is this a known peer? + if peer::is_known(ctx.peer_id, ctx.local, ctx.active_peers, ctx.replacements) { + // Update verification request timestamp + if let Some(peer) = ctx.active_peers.write().find_mut(ctx.peer_id) { + peer.metrics_mut().set_last_verif_request_timestamp(); + } + + if !peer::is_verified(ctx.peer_id, ctx.active_peers) { + // Peer is known, but no longer verified. + send_verification_request_to_addr(ctx.peer_addr, ctx.peer_id, ctx.request_mngr, ctx.server_tx, None); + } + } else { + // Add it as a new peer with autopeering service. + let mut peer = Peer::new(ctx.peer_addr.ip(), *ctx.peer_id.public_key()); + peer.add_service(AUTOPEERING_SERVICE_NAME, ServiceProtocol::Udp, ctx.peer_addr.port()); + + if let Some(peer_id) = add_peer::(peer, ctx.local, ctx.active_peers, ctx.replacements) { + log::debug!("Added {}.", peer_id); + } + + // Peer is unknown, thus still unverified. + send_verification_request_to_addr(ctx.peer_addr, ctx.peer_id, ctx.request_mngr, ctx.server_tx, None); + } +} + +// The peer must be known (since it's a valid response). That means that the peer is part of the active list currently. +fn handle_verification_response(verif_res: VerificationResponse, verif_reqval: RequestValue, ctx: RecvContext) { + log::trace!("Handling verification response."); + + if let Some(verified_count) = peer::set_front_and_update(ctx.peer_id, ctx.active_peers) { + // If this is the first time the peer was verified: + // * Update its services; + // * Fire the "peer discovered" event; + if verified_count == 1 { + if let Some(peer) = ctx.active_peers.write().find_mut(ctx.peer_id) { + peer.peer_mut().set_services(verif_res.services().clone()) + } + + ctx.event_tx + .send(Event::PeerDiscovered { peer_id: *ctx.peer_id }) + .expect("error publishing peer-discovered event"); + } + } + + // Send the response notification. + if let Some(tx) = verif_reqval.response_tx { + // Panic: we don't allow channel send errors. + tx.send(verif_res.to_protobuf().to_vec()) + .expect("error sending response signal"); + } +} + +fn handle_discovery_request(_disc_req: DiscoveryRequest, ctx: RecvContext) { + log::trace!("Handling discovery request."); + + let request_hash = message_hash(MessageType::DiscoveryRequest, ctx.msg_bytes); + + let chosen_peers = + choose_n_random_peers_from_active_list(ctx.active_peers, MAX_PEERS_IN_RESPONSE, MIN_VERIFIED_IN_RESPONSE); + + let disc_res = DiscoveryResponse::new(request_hash, chosen_peers); + let disc_res_bytes = disc_res.to_protobuf().to_vec(); + + // Panic: we don't allow channel send errors. + ctx.server_tx + .send(OutgoingPacket { + msg_type: MessageType::DiscoveryResponse, + msg_bytes: disc_res_bytes, + peer_addr: ctx.peer_addr, + }) + .expect("error sending verification response to server"); +} + +fn handle_discovery_response(disc_res: DiscoveryResponse, disc_reqval: RequestValue, ctx: RecvContext) { + // Remove the corresponding request from the request manager. + log::trace!("Handling discovery response."); + + let mut num_added = 0; + + // Add discovered peers to the peer list and peer store. + for peer in disc_res.into_peers() { + // Note: we only fire `PeerDiscovered` if it can be verified. + if let Some(peer_id) = add_peer::(peer, ctx.local, ctx.active_peers, ctx.replacements) { + log::debug!("Added: {}.", peer_id); + num_added += 1; + } + } + + // Remember how many new peers were discovered thanks to that peer. + // Panic: we don't allow internal data inconsistencies. + ctx.active_peers + .write() + .find_mut(ctx.peer_id) + .expect("inconsistent active peers list") + .metrics_mut() + .set_last_new_peers(num_added); + + // Send the response notification. + if let Some(tx) = disc_reqval.response_tx { + // Panic: we don't allow channel send errors. + tx.send(ctx.msg_bytes.to_vec()).expect("error sending response signal"); + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////////////// +// SENDING +/////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Initiates a verification request to a peer waiting for the peer's response, which must arrive in time. +/// +/// Returns `Some(ServiceMap)` if the request was successful, otherwise `None`. +pub(crate) async fn begin_verification( + peer_id: &PeerId, + active_peers: &ActivePeersList, + request_mngr: &RequestManager, + server_tx: &ServerTx, +) -> Option { + let (response_tx, response_rx) = request::response_chan(); + + send_verification_request_to_peer(peer_id, active_peers, request_mngr, server_tx, Some(response_tx)); + + match tokio::time::timeout(RESPONSE_TIMEOUT, response_rx).await { + Ok(Ok(bytes)) => match VerificationResponse::from_protobuf(&bytes).map(|r| r.into_services()) { + Ok(services) => Some(services), + Err(e) => { + log::debug!("Verification response decode error: {}", e); + None + } + }, + Ok(Err(e)) => { + log::debug!("Verification response error: {}", e); + None + } + Err(e) => { + log::debug!("Verification response timeout: {}", e); + + // The response didn't arrive in time => remove the request. + let _ = request_mngr.write().remove::(peer_id); + + None + } + } +} + +/// Sends a verification request to a peer. +/// +/// The function is non-blocking. +pub(crate) fn send_verification_request_to_peer( + peer_id: &PeerId, + active_peers: &ActivePeersList, + request_mngr: &RequestManager, + server_tx: &ServerTx, + response_tx: Option, +) { + let peer_addr = active_peers + .read() + .find(peer_id) + .map(|p| { + p.peer() + .service_socketaddr(AUTOPEERING_SERVICE_NAME) + .expect("peer doesn't support autopeering") + }) + // Panic: Requests are sent to listed peers only + .expect("peer not in active peers list"); + + send_verification_request_to_addr(peer_addr, peer_id, request_mngr, server_tx, response_tx); +} + +/// Sends a verification request to a peer. +pub(crate) fn send_verification_request_to_addr( + peer_addr: SocketAddr, + peer_id: &PeerId, + request_mngr: &RequestManager, + server_tx: &ServerTx, + response_tx: Option, +) { + log::trace!("Sending verification request to: {}/{}", peer_id, peer_addr); + + let verif_req = request_mngr + .write() + .new_verification_request(*peer_id, peer_addr.ip(), response_tx); + + let msg_bytes = verif_req.to_protobuf().to_vec(); + + // Panic: we don't allow channel send errors. + server_tx + .send(OutgoingPacket { + msg_type: MessageType::VerificationRequest, + msg_bytes, + peer_addr, + }) + .expect("error sending verification request to server"); +} + +/// Sends a verification response to a peer. +pub(crate) fn send_verification_response_to_addr( + peer_addr: SocketAddr, + peer_id: &PeerId, + verif_req: &VerificationRequest, + msg_bytes: &[u8], + server_tx: &ServerTx, + local: &Local, +) { + log::trace!("Sending verification response to: {}/{}", peer_id, peer_addr); + + let request_hash = message_hash(MessageType::VerificationRequest, msg_bytes); + + let verif_res = VerificationResponse::new(request_hash, local.services(), peer_addr.ip()); + + let msg_bytes = verif_res.to_protobuf().to_vec(); + + // Note: the destination address uses the source IP address of the packet plus the src_port from the message + // (see hive.go for reference) + + // Panic: we don't allow channel send errors. + server_tx + .send(OutgoingPacket { + msg_type: MessageType::VerificationResponse, + msg_bytes, + peer_addr: SocketAddr::new(peer_addr.ip(), verif_req.source_addr().port()), + }) + .expect("error sending verification response to server"); +} + +/// Initiates a discovery request to a peer by fetching its endpoint data from the peer store and waiting +/// for the peer's response, which must arrive in time. +/// +/// Returns `Some(Vec)` of discovered peers, if the request was successful, otherwise `None`. +pub(crate) async fn begin_discovery( + peer_id: &PeerId, + active_peers: &ActivePeersList, + request_mngr: &RequestManager, + server_tx: &ServerTx, +) -> Option> { + let (response_tx, response_rx) = request::response_chan(); + + send_discovery_request_to_peer(peer_id, active_peers, request_mngr, server_tx, Some(response_tx)); + + match tokio::time::timeout(RESPONSE_TIMEOUT, response_rx).await { + Ok(Ok(bytes)) => { + match DiscoveryResponse::from_protobuf(&bytes) { + Ok(disc_res) => Some(disc_res.into_peers()), + Err(e) => { + // The peer sent a faulty response. + log::debug!("Discovery response error: {}", e); + Some(Vec::new()) + } + } + } + Ok(Err(e)) => { + // This shouldn't happen under normal circumstances. + log::debug!("Discovery response error: {}", e); + Some(Vec::new()) + } + Err(e) => { + log::debug!("Discovery response timeout: {}", e); + + // The response didn't arrive in time => remove the request. + let _ = request_mngr.write().remove::(peer_id); + + None + } + } +} + +/// Sends a discovery request to a peer. +/// +/// The function is non-blocking. +pub(crate) fn send_discovery_request_to_peer( + peer_id: &PeerId, + active_peers: &ActivePeersList, + request_mngr: &RequestManager, + server_tx: &ServerTx, + response_tx: Option, +) { + let peer_addr = active_peers + .read() + .find(peer_id) + .map(|p| { + p.peer() + .service_socketaddr(AUTOPEERING_SERVICE_NAME) + .expect("peer doesn't support autopeering") + }) + // Panic: Requests are sent to listed peers only + .expect("peer not in active peers list"); + + send_discovery_request_to_addr(peer_addr, peer_id, request_mngr, server_tx, response_tx); +} + +/// Sends a discovery request to a peer's address. +pub(crate) fn send_discovery_request_to_addr( + peer_addr: SocketAddr, + peer_id: &PeerId, + request_mngr: &RequestManager, + server_tx: &ServerTx, + response_tx: Option, +) { + log::trace!("Sending discovery request to: {:?}", peer_id); + + let disc_req = request_mngr.write().new_discovery_request(*peer_id, response_tx); + + let msg_bytes = disc_req.to_protobuf().to_vec(); + + server_tx + .send(OutgoingPacket { + msg_type: MessageType::DiscoveryRequest, + msg_bytes, + peer_addr, + }) + .expect("error sending discovery request to server"); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////////////// +// HELPERS +/////////////////////////////////////////////////////////////////////////////////////////////////////////// + +fn choose_n_random_peers_from_active_list( + active_peers: &ActivePeersList, + n: usize, + min_verified_count: usize, +) -> Vec { + let num_active_peers = active_peers.read().len(); + + if num_active_peers <= n { + // No randomization required => return all we got - if possible. + let mut all_peers = Vec::with_capacity(num_active_peers); + all_peers.extend(active_peers.read().iter().filter_map(|active| { + if active.metrics().verified_count() >= min_verified_count { + Some(active.peer().clone()) + } else { + None + } + })); + all_peers + } else { + // TODO: should this better be a `CryptoRng`? + let mut random_peers = Vec::with_capacity(n); + let mut rng = rand::thread_rng(); + let index_vec = index::sample(&mut rng, num_active_peers, num_active_peers); + random_peers.extend( + index_vec + .iter() + // Panic: unwrapping is safe due to the length check. + .map(|index| active_peers.read().get(index).unwrap().clone()) + .filter_map(|active| { + if active.metrics().verified_count() >= min_verified_count { + Some(active.peer().clone()) + } else { + None + } + }) + .take(n), + ); + random_peers + } +} + +// Hive.go: returns all the currently managed peers that have been verified at least once. +pub(crate) fn get_verified_peers(active_peers: &ActivePeersList) -> Vec { + let mut peers = Vec::with_capacity(active_peers.read().len()); + + peers.extend(active_peers.read().iter().filter_map(|p| { + if p.metrics().verified_count() > 0 { + Some(p.clone()) + } else { + None + } + })); + + peers +} diff --git a/bee-network/bee-autopeering/src/discovery/messages.rs b/bee-network/bee-autopeering/src/discovery/messages.rs new file mode 100644 index 0000000000..0457fc2eec --- /dev/null +++ b/bee-network/bee-autopeering/src/discovery/messages.rs @@ -0,0 +1,306 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use crate::{local::services::ServiceMap, peer::Peer, proto, request::Request}; + +use crypto::hashes::sha::SHA256_LEN; +use prost::{bytes::BytesMut, DecodeError, EncodeError, Message as _}; + +use std::{ + fmt, + net::{AddrParseError, IpAddr, SocketAddr}, +}; + +#[derive(Clone, Copy)] +pub(crate) struct VerificationRequest { + version: u32, + network_id: u32, + timestamp: u64, + source_addr: SocketAddr, + target_addr: IpAddr, +} + +impl VerificationRequest { + pub(crate) fn new(version: u32, network_id: u32, source_addr: SocketAddr, target_addr: IpAddr) -> Self { + let timestamp = crate::time::unix_now_secs(); + + Self { + version, + network_id, + timestamp, + source_addr, + target_addr, + } + } + + pub(crate) fn version(&self) -> u32 { + self.version + } + + pub(crate) fn network_id(&self) -> u32 { + self.network_id + } + + pub(crate) fn timestamp(&self) -> u64 { + self.timestamp + } + + pub(crate) fn source_addr(&self) -> SocketAddr { + self.source_addr + } + + pub(crate) fn from_protobuf(bytes: &[u8]) -> Result { + let proto::Ping { + version, + network_id, + timestamp, + src_addr, + src_port, + dst_addr, + } = proto::Ping::decode(bytes)?; + + let ip_addr: IpAddr = src_addr.parse().map_err(Error::InvalidSourceIpAddress)?; + let port = src_port as u16; + + let source_addr = SocketAddr::new(ip_addr, port); + let target_addr: IpAddr = dst_addr.parse().map_err(Error::InvalidTargetIpAddress)?; + + Ok(Self { + version, + network_id, + timestamp: timestamp as u64, + source_addr, + target_addr, + }) + } + + #[allow(clippy::wrong_self_convention)] + pub(crate) fn to_protobuf(&self) -> BytesMut { + let ping = proto::Ping { + version: self.version, + network_id: self.network_id, + timestamp: self.timestamp as i64, + src_addr: self.source_addr.ip().to_string(), + src_port: self.source_addr.port() as u32, + dst_addr: self.target_addr.to_string(), + }; + + let mut bytes = BytesMut::with_capacity(ping.encoded_len()); + + // Panic: we have allocated a properly sized buffer. + ping.encode(&mut bytes).expect("encoding discovery request failed"); + + bytes + } +} + +impl fmt::Debug for VerificationRequest { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("VerificationRequest") + .field("version", &self.version) + .field("network_id", &self.network_id) + .field("timestamp", &self.timestamp) + .field("source_addr", &self.source_addr) + .field("target_addr", &self.target_addr) + .finish() + } +} + +impl Request for VerificationRequest {} + +#[derive(Clone)] +pub(crate) struct VerificationResponse { + request_hash: [u8; SHA256_LEN], + services: ServiceMap, + target_addr: IpAddr, +} + +impl VerificationResponse { + pub(crate) fn new(request_hash: [u8; SHA256_LEN], services: ServiceMap, target_addr: IpAddr) -> Self { + Self { + request_hash, + services, + target_addr, + } + } + + pub(crate) fn request_hash(&self) -> &[u8] { + &self.request_hash + } + + pub(crate) fn services(&self) -> &ServiceMap { + &self.services + } + + pub(crate) fn from_protobuf(bytes: &[u8]) -> Result { + let proto::Pong { + req_hash, + services, + dst_addr, + } = proto::Pong::decode(bytes)?; + + Ok(Self { + request_hash: req_hash.try_into().map_err(|_| Error::RestoreRequestHash)?, + services: services.ok_or(Error::MissingServices)?.try_into()?, + target_addr: dst_addr.parse().map_err(Error::InvalidTargetIpAddress)?, + }) + } + + #[allow(clippy::wrong_self_convention)] + pub(crate) fn to_protobuf(&self) -> BytesMut { + let pong = proto::Pong { + req_hash: self.request_hash.to_vec(), + services: Some(self.services().into()), + dst_addr: self.target_addr.to_string(), + }; + + let mut bytes = BytesMut::with_capacity(pong.encoded_len()); + + // Panic: we have allocated a properly sized buffer. + pong.encode(&mut bytes).expect("encoding discovery response failed"); + + bytes + } + + pub(crate) fn into_services(self) -> ServiceMap { + self.services + } +} + +impl fmt::Debug for VerificationResponse { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("VerificationResponse") + .field("request_hash", &bs58::encode(&self.request_hash).into_string()) + .field("services", &self.services.to_string()) + .field("target_addr", &self.target_addr.to_string()) + .finish() + } +} + +#[derive(Clone, Copy)] +pub(crate) struct DiscoveryRequest { + timestamp: u64, +} + +impl DiscoveryRequest { + pub(crate) fn new() -> Self { + let timestamp = crate::time::unix_now_secs(); + + Self { timestamp } + } + + pub(crate) fn timestamp(&self) -> u64 { + self.timestamp + } + + pub(crate) fn from_protobuf(bytes: &[u8]) -> Result { + let proto::DiscoveryRequest { timestamp } = proto::DiscoveryRequest::decode(bytes)?; + + Ok(Self { + timestamp: timestamp as u64, + }) + } + + #[allow(clippy::wrong_self_convention)] + pub(crate) fn to_protobuf(&self) -> BytesMut { + let discover_request = proto::DiscoveryRequest { + timestamp: self.timestamp as i64, + }; + + let mut bytes = BytesMut::with_capacity(discover_request.encoded_len()); + + // Panic: we have allocated a properly sized buffer. + discover_request + .encode(&mut bytes) + .expect("encoding discovery request failed"); + + bytes + } +} + +impl fmt::Debug for DiscoveryRequest { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("DiscoveryRequest") + .field("timestamp", &self.timestamp) + .finish() + } +} + +impl Request for DiscoveryRequest {} + +#[derive(Clone)] +pub(crate) struct DiscoveryResponse { + request_hash: [u8; SHA256_LEN], + peers: Vec, +} + +impl DiscoveryResponse { + pub(crate) fn new(request_hash: [u8; SHA256_LEN], peers: Vec) -> Self { + Self { request_hash, peers } + } + + pub(crate) fn request_hash(&self) -> &[u8] { + &self.request_hash + } + + pub(crate) fn from_protobuf(bytes: &[u8]) -> Result { + let proto::DiscoveryResponse { req_hash, peers } = proto::DiscoveryResponse::decode(bytes)?; + let peers = peers + .into_iter() + .filter_map(|p| proto::Peer::try_into(p).ok()) + .collect(); + + Ok(Self { + request_hash: req_hash.try_into().expect("todo: error type"), + peers, + }) + } + + #[allow(clippy::wrong_self_convention)] + pub(crate) fn to_protobuf(&self) -> BytesMut { + let peers = self.peers.iter().map(Into::into).collect(); + + let disc_res = proto::DiscoveryResponse { + req_hash: self.request_hash.to_vec(), + peers, + }; + + let mut bytes = BytesMut::with_capacity(disc_res.encoded_len()); + + // Panic: we have allocated a properly sized buffer. + disc_res.encode(&mut bytes).expect("encoding discovery response failed"); + + bytes + } + + pub(crate) fn into_peers(self) -> Vec { + self.peers + } +} + +impl fmt::Debug for DiscoveryResponse { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("DiscoveryResponse") + .field("request_hash", &bs58::encode(&self.request_hash).into_string()) + .field("peers", &self.peers) + .finish() + } +} + +#[derive(Debug, thiserror::Error)] +pub(crate) enum Error { + #[error("the peer did not announce any services")] + MissingServices, + #[error("invalid source ip address due to: {0}.")] + InvalidSourceIpAddress(AddrParseError), + #[error("invalid target ip address due to: {0}.")] + InvalidTargetIpAddress(AddrParseError), + #[error("invalid service description")] + Service(#[from] crate::local::services::Error), + #[error("{0}")] + ProtobufDecode(#[from] DecodeError), + #[error("{0}")] + ProtobufEncode(#[from] EncodeError), + #[error("restore request hash")] + RestoreRequestHash, +} diff --git a/bee-network/bee-autopeering/src/discovery/mod.rs b/bee-network/bee-autopeering/src/discovery/mod.rs new file mode 100644 index 0000000000..476199cc60 --- /dev/null +++ b/bee-network/bee-autopeering/src/discovery/mod.rs @@ -0,0 +1,6 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +pub(crate) mod manager; +pub(crate) mod messages; +pub(crate) mod query; diff --git a/bee-network/bee-autopeering/src/discovery/query.rs b/bee-network/bee-autopeering/src/discovery/query.rs new file mode 100644 index 0000000000..f76199d80a --- /dev/null +++ b/bee-network/bee-autopeering/src/discovery/query.rs @@ -0,0 +1,261 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + discovery::manager, + event::EventTx, + peer::{ + lists::{ActivePeer, ActivePeersList, EntryPeersList, ReplacementPeersList}, + PeerId, + }, + request::RequestManager, + server::ServerTx, + task::Repeat, +}; + +use rand::{thread_rng, Rng}; + +#[derive(Clone)] +pub(crate) struct QueryContext { + pub(crate) request_mngr: RequestManager, + pub(crate) entry_peers: EntryPeersList, + pub(crate) active_peers: ActivePeersList, + pub(crate) replacements: ReplacementPeersList, + pub(crate) server_tx: ServerTx, + pub(crate) event_tx: EventTx, +} + +// Hive.go: pings the oldest active peer. +pub(crate) fn reverify_fn() -> Repeat { + Box::new(|ctx| { + // Determine the next peer to re/verifiy. + if let Some(peer_id) = peer_to_reverify(&ctx.active_peers) { + log::debug!("Reverifying {}...", peer_id); + + let ctx_ = ctx.clone(); + + // TODO: introduce `UnsupervisedTask` type, that always finishes after a timeout. + let _ = tokio::spawn(async move { + if let Some(services) = + manager::begin_verification(&peer_id, &ctx_.active_peers, &ctx_.request_mngr, &ctx_.server_tx).await + { + // Hive.go: no need to do anything here, as the peer is bumped when handling the pong + log::debug!( + "Reverification successful. Peer offers {} service/s: {}", + services.len(), + services + ); + } else { + log::debug!("Reverification failed. Removing peer {}.", peer_id); + + manager::remove_peer_from_active_list( + &peer_id, + &ctx_.entry_peers, + &ctx_.active_peers, + &ctx_.replacements, + &ctx_.event_tx, + ) + } + }); + } else { + log::debug!("Currently no peers to reverify."); + } + }) +} + +// Hive.go: returns the oldest peer, or nil if empty. +fn peer_to_reverify(active_peers: &ActivePeersList) -> Option { + active_peers.read().get_oldest().map(|p| *p.peer_id()) +} + +// Hive.go: +// The current strategy is to always select the latest verified peer and one of +// the peers that returned the most number of peers the last time it was queried. +pub(crate) fn query_fn() -> Repeat { + Box::new(|ctx| { + let peers = select_peers_to_query(&ctx.active_peers); + if peers.is_empty() { + log::warn!("No peers to query."); + } else { + log::debug!("Querying {} peer/s...", peers.len()); + + for peer_id in peers.into_iter() { + let ctx_ = ctx.clone(); + + // TODO: introduce `UnsupervisedTask` type, that always finishes after a timeout. + tokio::spawn(async move { + if let Some(peers) = + manager::begin_discovery(&peer_id, &ctx_.active_peers, &ctx_.request_mngr, &ctx_.server_tx) + .await + { + log::debug!("Query successful. Received {} peers.", peers.len()); + } else { + log::debug!("Query unsuccessful. Removing peer {}.", peer_id); + + manager::remove_peer_from_active_list( + &peer_id, + &ctx_.entry_peers, + &ctx_.active_peers, + &ctx_.replacements, + &ctx_.event_tx, + ) + } + }); + } + } + }) +} + +// Hive.go: selects the peers that should be queried. +fn select_peers_to_query(active_peers: &ActivePeersList) -> Vec { + let mut verif_peers = manager::get_verified_peers(active_peers); + + // If we have less than 3 verified peers, then we use those for the query. + if verif_peers.len() < 3 { + verif_peers.into_iter().map(|ap| *ap.peer_id()).collect::>() + } else { + // Note: this macro is useful to remove some noise from the pattern matching rules. + macro_rules! num { + ($t:expr) => { + // Panic: we made sure, that unwrap is always okay. + $t.as_ref().unwrap().metrics().last_new_peers() + }; + } + + let latest = *verif_peers.remove(0).peer_id(); + let len = verif_peers.len().min(3); + + // Note: This loop finds the three "heaviest" peers with one iteration over an unsorted vec of verified peers. + let heaviest3 = verif_peers.into_iter().fold( + (None, None, None), + |(x, y, z): (Option, Option, Option), p| { + let n = p.metrics().last_new_peers(); + + match (&x, &y, &z) { + // set 1st + (None, _, _) => (Some(p), y, z), + // shift-right + set 1st + (t, None, _) if n < num!(t) => (Some(p), t.clone(), z), + // set 2nd + (t, None, _) if n >= num!(t) => (x, Some(p), z), + // shift-right + shift-right + set 1st + (s, t, None) if n < num!(s) => (Some(p), s.clone(), t.clone()), + // shift-right + set 1st + (_, t, None) if n < num!(t) => (x, Some(p), t.clone()), + // set 3rd + (_, t, None) if n >= num!(t) => (x, y, Some(p)), + // no-op + (t, _, _) if n < num!(t) => (x, y, z), + // set 1st + (_, t, _) if n < num!(t) => (Some(p), y, z), + // shift-left + set 2nd + (_, _, t) if n < num!(t) => (y, Some(p), z), + // shift-left + shift-left + set 3rd + (_, _, _) => (y, z, Some(p)), + } + }, + ); + + let r = thread_rng().gen_range(0..len); + let heaviest = *match r { + 0 => heaviest3.0, + 1 => heaviest3.1, + 2 => heaviest3.2, + _ => unreachable!(), + } + // Panic: we made sure that the unwrap is always possible. + .unwrap() + .peer_id(); + + vec![latest, heaviest] + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::peer::{lists::ActivePeer, Peer}; + + fn create_peerlist_of_size(n: usize) -> ActivePeersList { + // Create a set of active peer entries. + let entries = (0..n as u8).map(Peer::new_test_peer).map(ActivePeer::new); + + // Create a peerlist, and insert the peer entries setting the `last_new_peers` metric + // equal to its peerlist index. We also need to set the `verified_count` to at least 1. + let peerlist = ActivePeersList::default(); + let mut pl = peerlist.write(); + for (i, mut entry) in entries.into_iter().enumerate() { + entry.metrics_mut().set_last_new_peers((n - 1) - i); + entry.metrics_mut().increment_verified_count(); + + pl.insert(entry); + } + drop(pl); + peerlist + } + + #[test] + fn find_peers_to_query_in_peerlist_1() { + let peerlist = create_peerlist_of_size(1); + + let selected = select_peers_to_query(&peerlist); + assert_eq!(1, selected.len()); + } + + #[test] + fn find_peers_to_query_in_peerlist_2() { + let peerlist = create_peerlist_of_size(2); + + let selected = select_peers_to_query(&peerlist); + assert_eq!(2, selected.len()); + } + + #[test] + fn find_peers_to_query_in_peerlist_3() { + let peerlist = create_peerlist_of_size(3); + + macro_rules! equal { + ($a:expr, $b:expr) => {{ $a == peerlist.read().get($b).unwrap().peer_id() }}; + } + + let selected = select_peers_to_query(&peerlist); + assert_eq!(2, selected.len()); + + assert!(equal!(&selected[0], 0)); + assert!(equal!(&selected[1], 1) || equal!(&selected[1], 2)); + } + + #[test] + fn find_peers_to_query_in_peerlist_10() { + let peerlist = create_peerlist_of_size(10); + + macro_rules! equal { + ($a:expr, $b:expr) => {{ $a == peerlist.read().get($b).unwrap().peer_id() }}; + } + + // 0 1 2 3 4 ... 7 8 9 (index) + // 0 1 2 3 4 ... 7 8 9 (last_new_peers) + // ^ ^ ^ ^ + // 0 1 1 1 (expected) + let selected = select_peers_to_query(&peerlist); + assert_eq!(2, selected.len()); + + // Always the newest peer (index 0) is selected. + assert!(equal!(&selected[0], 0)); + // Either of the 3 "heaviest" peers is selected. + assert!(equal!(&selected[1], 7) || equal!(&selected[1], 8) || equal!(&selected[1], 9)); + + // 0 1 2 3 4 ... 7 8 9 (index) + // 8 9 0 1 2 ... 5 6 7 (last_new_peers) + // ^ ^ ^ ^ + // 0 1 1 1 (expected) + peerlist.write().rotate_forwards(); + peerlist.write().rotate_forwards(); + + let selected = select_peers_to_query(&peerlist); + assert_eq!(2, selected.len()); + + assert!(equal!(&selected[0], 0)); + assert!(equal!(&selected[1], 1) || equal!(&selected[1], 8) || equal!(&selected[1], 9)); + } +} diff --git a/bee-network/bee-autopeering/src/event.rs b/bee-network/bee-autopeering/src/event.rs new file mode 100644 index 0000000000..5ee7c38591 --- /dev/null +++ b/bee-network/bee-autopeering/src/event.rs @@ -0,0 +1,84 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Events published to the user. + +use crate::{ + peer::{Peer, PeerId}, + peering::neighbor::Distance, +}; + +use tokio::sync::mpsc; + +use std::fmt; + +/// Autopeering related events. +#[derive(Debug)] +pub enum Event { + /// A new peer has been discovered. + PeerDiscovered { + /// The identity of the discovered peer. + peer_id: PeerId, + }, + /// A peer has been deleted (e.g. due to a failed re-verification). + PeerDeleted { + /// The identity of the deleted peer. + peer_id: PeerId, + }, + /// Private and public salt were updated. + SaltUpdated { + /// Lifetime of the public salt. + public_salt_lifetime: u64, + /// Lifetime of the private salt. + private_salt_lifetime: u64, + }, + /// An outgoing peering request has been accepted remotely. + OutgoingPeering { + /// The associated peer. + peer: Peer, + /// The distance between the local and the remote peer. + distance: Distance, + }, + /// An incoming peering request has been accepted locally. + IncomingPeering { + /// The associated peer. + peer: Peer, + /// The distance between the local and the remote peer. + distance: Distance, + }, + /// A peering was dropped due to one end sending a drop message to the other. + PeeringDropped { + /// The identity of the dropped peer. + peer_id: PeerId, + }, +} + +/// Exposes autopeering related events. +pub type EventRx = mpsc::UnboundedReceiver; +pub(crate) type EventTx = mpsc::UnboundedSender; + +pub(crate) fn event_chan() -> (EventTx, EventRx) { + mpsc::unbounded_channel::() +} + +impl fmt::Display for Event { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + use Event::*; + + match self { + PeerDiscovered { peer_id } => write!(f, "Discovered: {}.", peer_id), + PeerDeleted { peer_id } => write!(f, "Removed offline: {}.", peer_id), + SaltUpdated { + public_salt_lifetime, + private_salt_lifetime, + } => write!( + f, + "Salts updated => outbound: {} / inbound: {}.", + public_salt_lifetime, private_salt_lifetime, + ), + OutgoingPeering { peer, .. } => write!(f, "Peered: {} (outgoing).", peer.peer_id()), + IncomingPeering { peer, .. } => write!(f, "Peered: {} (incoming).", peer.peer_id()), + PeeringDropped { peer_id } => write!(f, "Dropped: {}.", peer_id), + } + } +} diff --git a/bee-network/bee-autopeering/src/hash.rs b/bee-network/bee-autopeering/src/hash.rs new file mode 100644 index 0000000000..ff17469690 --- /dev/null +++ b/bee-network/bee-autopeering/src/hash.rs @@ -0,0 +1,63 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use crate::packet::MessageType; + +use crypto::hashes::{ + sha::{Sha256, SHA256}, + Digest, +}; +use hash32::{FnvHasher, Hasher as _}; + +pub(crate) use crypto::hashes::sha::SHA256_LEN; + +/// Creates the SHA-256 hash of a generic byte sequence. +pub(crate) fn data_hash(data: &[u8]) -> [u8; SHA256_LEN] { + let mut digest = [0; SHA256_LEN]; + SHA256(data, &mut digest); + digest +} + +/// Creates the SHA-256 hash of a particular network message. +pub(crate) fn message_hash(msg_type: MessageType, msg_data: &[u8]) -> [u8; SHA256_LEN] { + let mut sha256 = Sha256::new(); + sha256.update([msg_type as u8]); + sha256.update(msg_data); + + let mut digest = [0u8; SHA256_LEN]; + digest.copy_from_slice(&sha256.finalize()); + digest +} + +/// Creates the 32bit fnv hash of the network name. +pub(crate) fn network_hash(network_name: impl AsRef) -> u32 { + let mut hasher = FnvHasher::default(); + hasher.write(network_name.as_ref().as_bytes()); + hasher.finish() +} + +#[cfg(test)] +mod tests { + use super::*; + + fn compare_message_hash(msg_type: MessageType, msg_data: &[u8]) -> [u8; SHA256_LEN] { + let mut bytes = vec![0u8; msg_data.len() + 1]; + bytes[0] = msg_type as u8; + bytes[1..].copy_from_slice(msg_data); + + let mut digest = [0; SHA256_LEN]; + SHA256(&bytes, &mut digest); + digest + } + + #[test] + fn create_message_hash() { + let msg_type = MessageType::DiscoveryRequest; + let msg_data = [1u8; 150]; + + assert_eq!( + message_hash(msg_type, &msg_data), + compare_message_hash(msg_type, &msg_data) + ); + } +} diff --git a/bee-network/bee-autopeering/src/init.rs b/bee-network/bee-autopeering/src/init.rs new file mode 100644 index 0000000000..4c84cdda52 --- /dev/null +++ b/bee-network/bee-autopeering/src/init.rs @@ -0,0 +1,196 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Autopeering initialization. + +use crate::{ + config::AutopeeringConfig, + delay, + discovery::{ + manager::{DiscoveryManager, DiscoveryManagerConfig, DEFAULT_QUERY_INTERVAL, DEFAULT_REVERIFY_INTERVAL}, + query::{self, QueryContext}, + }, + event::{self, EventRx}, + hash, + local::Local, + multiaddr, + packet::IncomingPacket, + peer::{ + lists::{ActivePeersList, EntryPeersList, ReplacementPeersList}, + stores::PeerStore, + }, + peering::{ + filter::NeighborFilter, + manager::{InboundNeighborhood, OutboundNeighborhood, PeeringManager, SaltUpdateContext, SALT_UPDATE_SECS}, + update::{self, UpdateContext, OPEN_OUTBOUND_NBH_UPDATE_SECS}, + NeighborValidator, + }, + request::{self, RequestManager, EXPIRED_REQUEST_REMOVAL_INTERVAL}, + server::{server_chan, IncomingPacketSenders, Server, ServerConfig, ServerSocket}, + task::{TaskManager, MAX_SHUTDOWN_PRIORITY}, +}; + +use std::{error, future::Future, iter}; + +const NUM_TASKS: usize = 9; + +/// Initializes the autopeering service. +pub async fn init( + config: AutopeeringConfig, + version: u32, + network_name: I, + local: Local, + peer_store_config: ::Config, + term_signal: Q, + neighbor_validator: V, +) -> Result> +where + S: PeerStore + 'static, + I: AsRef, + Q: Future + Send + 'static, + V: NeighborValidator + 'static, +{ + let network_id = hash::network_hash(&network_name); + + log::info!("---------------------------------------------------------------------------------------------------"); + log::info!("WARNING:"); + log::info!("Autopeering will disclose your public IP address to possibly all nodes and entry points."); + log::info!("Please disable it if you do not want this to happen!"); + log::info!("---------------------------------------------------------------------------------------------------"); + log::info!("Network name/id: {}/{}", network_name.as_ref(), network_id); + log::info!("Protocol_version: {}", version); + log::info!("Public key: {}", multiaddr::pubkey_to_base58(&local.public_key())); + log::info!("Bind address: {}", config.bind_addr()); + + // Create or load a peer store. + let peer_store = S::new(peer_store_config); + + // Create peer lists. + let entry_peers = EntryPeersList::default(); + let active_peers = ActivePeersList::default(); + let replacements = ReplacementPeersList::default(); + + // Create a task manager to have good control over the tokio task spawning business. + let mut task_mngr = + TaskManager::<_, NUM_TASKS>::new(peer_store.clone(), active_peers.clone(), replacements.clone()); + + // Create channels for inbound/outbound communication with the UDP server. + let (discovery_tx, discovery_rx) = server_chan::(); + let (peering_tx, peering_rx) = server_chan::(); + let incoming_senders = IncomingPacketSenders { + discovery_tx, + peering_tx, + }; + + // Event channel to publish events to the user. + let (event_tx, event_rx) = event::event_chan(); + + // Initialize the server managing the UDP socket I/O. + let server_config = ServerConfig::new(&config); + let (server, server_tx) = Server::new(server_config, local.clone(), incoming_senders); + server.init(&mut task_mngr).await; + + // Create a request manager that creates and keeps track of outgoing requests. + let request_mngr = RequestManager::new(version, network_id, config.bind_addr()); + + // Create the discovery manager handling the discovery request/response protocol. + let discovery_config = DiscoveryManagerConfig::new(&config, version, network_id); + let discovery_socket = ServerSocket::new(discovery_rx, server_tx.clone()); + + let discovery_mngr = DiscoveryManager::new( + discovery_config, + local.clone(), + discovery_socket, + request_mngr.clone(), + peer_store.clone(), + entry_peers.clone(), + active_peers.clone(), + replacements.clone(), + event_tx.clone(), + ); + discovery_mngr.init(&mut task_mngr).await; + + // Create neighborhoods and neighbor candidate filter. + let inbound_nbh = InboundNeighborhood::new(); + let outbound_nbh = OutboundNeighborhood::new(); + let nb_filter = NeighborFilter::new(local.peer_id(), neighbor_validator); + + // Create the autopeering manager handling the peering request/response protocol. + let peering_socket = ServerSocket::new(peering_rx, server_tx.clone()); + + let peering_mngr = PeeringManager::new( + local.clone(), + peering_socket, + request_mngr.clone(), + active_peers.clone(), + event_tx.clone(), + inbound_nbh.clone(), + outbound_nbh.clone(), + nb_filter.clone(), + ); + task_mngr.run(peering_mngr); + + // TODO: remove this when sure that all open requests are garbage collected. + // Remove expired requests regularly. + let f = request::remove_expired_requests_fn(); + let delay = iter::repeat(EXPIRED_REQUEST_REMOVAL_INTERVAL); + let ctx = request_mngr.clone(); + task_mngr.repeat(f, delay, ctx, "Expired-Request-Removal", MAX_SHUTDOWN_PRIORITY); + + let ctx = SaltUpdateContext::new( + local.clone(), + nb_filter.clone(), + inbound_nbh, + outbound_nbh.clone(), + server_tx.clone(), + event_tx.clone(), + ); + + // Update salts regularly. + let f = crate::peering::manager::update_salts_fn(config.drop_neighbors_on_salt_update()); + let delay = iter::repeat(SALT_UPDATE_SECS); + task_mngr.repeat(f, delay, ctx, "Salt-Update", MAX_SHUTDOWN_PRIORITY); + + let ctx = QueryContext { + request_mngr: request_mngr.clone(), + entry_peers: entry_peers.clone(), + active_peers: active_peers.clone(), + replacements: replacements.clone(), + server_tx: server_tx.clone(), + event_tx: event_tx.clone(), + }; + + // Reverify old peers regularly. + let f = query::reverify_fn(); + let delay = iter::repeat(DEFAULT_REVERIFY_INTERVAL); + task_mngr.repeat(f, delay, ctx.clone(), "Reverification", MAX_SHUTDOWN_PRIORITY); + + // Discover new peers regularly. + let f = query::query_fn(); + let delay = iter::repeat(DEFAULT_QUERY_INTERVAL); + task_mngr.repeat(f, delay, ctx, "Discovery", MAX_SHUTDOWN_PRIORITY); + + let ctx = UpdateContext { + local, + request_mngr, + active_peers, + nb_filter, + outbound_nbh, + server_tx, + }; + + // Update the outbound neighborhood regularly (interval depends on whether slots available or not). + let f = update::update_outbound_neighborhood_fn(); + let delay = delay::ManualDelayFactory::new(OPEN_OUTBOUND_NBH_UPDATE_SECS); + task_mngr.repeat(f, delay, ctx, "Outbound neighborhood update", MAX_SHUTDOWN_PRIORITY); + + // Await the shutdown signal (in a separate task). + tokio::spawn(async move { + term_signal.await; + task_mngr.shutdown().await; + }); + + log::debug!("Autopeering initialized."); + + Ok(event_rx) +} diff --git a/bee-network/bee-autopeering/src/lib.rs b/bee-network/bee-autopeering/src/lib.rs new file mode 100644 index 0000000000..b6bde0547c --- /dev/null +++ b/bee-network/bee-autopeering/src/lib.rs @@ -0,0 +1,149 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Allows peers in the same IOTA network to automatically discover each other. +//! +//! In order to integrate the Autopeering functionality in your node implementation you need to provide its `init` +//! function with the following data: +//! * an `AutopeeringConfig`; +//! * a protocol version (`u32`); +//! * a network name, e.g. "chrysalis-mainnet"; +//! * a `Local` entity (either randomly created or from an `Ed25519` keypair), that additionally announces one or more +//! services; +//! * a shutdown signal (`Future`); +//! * a peer store, e.g. the `InMemoryPeerStore` (non-persistent) or the `SledPeerStore` (persistent), or a custom peer +//! store implementing the `PeerStore` trait; +//! +//! ## Example +//! +//! ```no_run +//! use bee_autopeering::{ +//! config::AutopeeringConfigJsonBuilder, +//! init, +//! stores::{SledPeerStore, SledPeerStoreConfig}, +//! AutopeeringConfig, Event, Local, NeighborValidator, Peer, ServiceProtocol, AUTOPEERING_SERVICE_NAME, +//! }; +//! +//! const NETWORK: &str = "chrysalis-mainnet"; +//! +//! // An example autopeering config in JSON format: +//! fn read_config() -> AutopeeringConfig { +//! let config_json = r#" +//! { +//! "enabled": true, +//! "bindAddress": "0.0.0.0:14627", +//! "entryNodes": [ +//! "/dns/entry-hornet-0.h.chrysalis-mainnet.iotaledger.net/udp/14626/autopeering/iotaPHdAn7eueBnXtikZMwhfPXaeGJGXDt4RBuLuGgb", +//! "/dns/entry-hornet-1.h.chrysalis-mainnet.iotaledger.net/udp/14626/autopeering/iotaJJqMd5CQvv1A61coSQCYW9PNT1QKPs7xh2Qg5K2" +//! ], +//! "entryNodesPreferIPv6": false, +//! "runAsEntryNode": false +//! }"#; +//! +//! serde_json::from_str::(config_json) +//! .expect("error deserializing json config builder") +//! .finish() +//! } +//! +//! #[tokio::main] +//! async fn main() { +//! // Peers will only accept each other as peer if they agree on the protocol version and the +//! // network name. +//! const VERSION: u32 = 1; +//! +//! // Read the config from a JSON file/string (TOML is also supported). +//! let config = read_config(); +//! +//! // Create a random local entity, that announces two services: +//! let local = { +//! let l = Local::generate(); +//! +//! l.add_service( +//! AUTOPEERING_SERVICE_NAME, +//! ServiceProtocol::Udp, +//! config.bind_addr().port(), +//! ); +//! l.add_service(NETWORK, ServiceProtocol::Tcp, 15600); +//! l +//! }; +//! +//! // You can choose between the `InMemoryPeerStore` (non-persistent), the `SledPeerStore` +//! // (persistent), or your own implementation that implements the `PeerStore` trait. +//! let peer_store_config = SledPeerStoreConfig::new().path("./peerstore"); +//! +//! // The `NeighborValidator` allows you to accept only certain peers as neighbors, e.g. only those +//! // with enabled Gossip service. +//! let neighbor_validator = GossipNeighborValidator {}; +//! +//! // You need to provide some form of shutdown signal (any `Future` impl is allowed). +//! let term_signal = tokio::signal::ctrl_c(); +//! +//! // With initializing the autopeering system you receive an event stream receiver. +//! let mut event_rx = bee_autopeering::init::( +//! config.clone(), +//! VERSION, +//! NETWORK, +//! local, +//! peer_store_config, +//! term_signal, +//! neighbor_validator, +//! ) +//! .await +//! .expect("initializing autopeering system failed"); +//! +//! // You can then process autopeering events. +//! loop { +//! tokio::select! { +//! e = event_rx.recv() => { +//! if let Some(event) = e { +//! // handle the event +//! // process(event); +//! } else { +//! break; +//! } +//! } +//! }; +//! } +//! } +//! +//! #[derive(Clone)] +//! struct GossipNeighborValidator {} +//! +//! impl NeighborValidator for GossipNeighborValidator { +//! fn is_valid(&self, peer: &Peer) -> bool { +//! peer.has_service(NETWORK) +//! } +//! } +//! ``` + +#![deny(missing_docs)] + +mod delay; +mod discovery; +mod hash; +mod local; +mod multiaddr; +mod packet; +mod peer; +mod peering; +mod proto { + include!(concat!(env!("OUT_DIR"), "/proto.rs")); +} +mod request; +mod server; +mod task; +mod time; + +pub mod config; +pub mod event; +pub mod init; + +pub use config::AutopeeringConfig; +pub use event::Event; +pub use init::init; +pub use local::{ + services::{ServiceEndpoint, ServiceMap, ServiceName, ServiceProtocol, AUTOPEERING_SERVICE_NAME}, + Local, +}; +pub use peer::{peer_id, peer_id::PeerId, stores, Peer}; +pub use peering::{Distance, NeighborValidator, Status}; diff --git a/bee-network/bee-autopeering/src/local/mod.rs b/bee-network/bee-autopeering/src/local/mod.rs new file mode 100644 index 0000000000..00d24c8769 --- /dev/null +++ b/bee-network/bee-autopeering/src/local/mod.rs @@ -0,0 +1,241 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +pub(crate) mod salt; +pub mod services; + +use self::{ + salt::{Salt, SALT_LIFETIME_SECS}, + services::{ServiceMap, ServiceProtocol}, +}; + +use crate::peer::PeerId; + +use crypto::signatures::ed25519::{PublicKey, SecretKey as PrivateKey, Signature, SECRET_KEY_LENGTH}; +use libp2p_core::identity::ed25519::Keypair; + +use std::{ + fmt, + sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard}, +}; + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("could not create Salt from ED25519 keypair")] + SaltFromEd25519Keypair, + #[error("could not create Salt from Base16/Hex private key")] + SaltFromBase16EncodedPrivateKey, + #[error("could not create Salt from Base58 private key")] + SaltFromBase58EncodedPrivateKey, + #[error("could not deserialize Salt from Protobuf")] + DeserializeFromProtobuf, +} + +/// Represents a local entity. +/// +/// It allows: +/// * message signing and verification; +/// * neighbor distance calculation; +/// * service announcements; +#[derive(Clone)] +pub struct Local { + inner: Arc>, +} + +pub struct LocalInner { + peer_id: PeerId, + public_salt: Salt, + private_key: PrivateKey, + private_salt: Salt, + services: ServiceMap, +} + +impl Local { + /// Generates a new random local identity. + pub fn generate() -> Self { + // Panic: will only fail if there's an OS issue. + let private_key = PrivateKey::generate().expect("error generating private key"); + let peer_id = PeerId::from_public_key(private_key.public_key()); + + let inner = LocalInner { + peer_id, + public_salt: Salt::default(), + private_key, + private_salt: Salt::default(), + services: ServiceMap::default(), + }; + + Self { + inner: Arc::new(RwLock::new(inner)), + } + } + + /// Creates a local identity from an ED25519 keypair. + pub fn from_keypair(keypair: Keypair) -> Result { + let private_key_bytes: [u8; SECRET_KEY_LENGTH] = keypair + .secret() + .as_ref() + .try_into() + .map_err(|_| Error::SaltFromEd25519Keypair)?; + + Ok(Self::from_private_key_bytes(private_key_bytes)) + } + + /// Creates a local identity from a 'base16/hex' encoded ED25519 private key. + pub fn from_bs16_encoded_private_key(private_key: impl AsRef) -> Result { + let mut private_key_bytes = [0u8; SECRET_KEY_LENGTH]; + hex::decode_to_slice(private_key.as_ref(), &mut private_key_bytes) + .map_err(|_| Error::SaltFromBase16EncodedPrivateKey)?; + + Ok(Self::from_private_key_bytes(private_key_bytes)) + } + + /// Creates a local identity from a 'base58' encoded ED25519 private key. + pub fn from_bs58_encoded_private_key(private_key: impl AsRef) -> Result { + // Restore the private key + let mut private_key_bytes = [0u8; SECRET_KEY_LENGTH]; + bs58::decode(private_key.as_ref()) + .into(&mut private_key_bytes) + .map_err(|_| Error::SaltFromBase58EncodedPrivateKey)?; + + Ok(Self::from_private_key_bytes(private_key_bytes)) + } + + /// Creates a local identity from bytes representing an ED25519 private key. + pub fn from_private_key_bytes(private_key_bytes: [u8; SECRET_KEY_LENGTH]) -> Self { + let private_key = PrivateKey::from_bytes(private_key_bytes); + let public_key = private_key.public_key(); + let peer_id = PeerId::from_public_key(public_key); + + Self { + inner: Arc::new(RwLock::new(LocalInner { + peer_id, + private_key, + private_salt: Salt::new(SALT_LIFETIME_SECS), + public_salt: Salt::new(SALT_LIFETIME_SECS), + services: ServiceMap::default(), + })), + } + } + + /// Returns the peer id of this identity. + pub(crate) fn peer_id(&self) -> PeerId { + *self.read().peer_id() + } + + /// Returns the public key of this identity. + pub(crate) fn public_key(&self) -> PublicKey { + *self.read().public_key() + } + + /// Returns the current private salt of this identity. + pub(crate) fn private_salt(&self) -> Salt { + self.read().private_salt().clone() + } + + /// Sets a new private salt. + pub(crate) fn set_private_salt(&self, salt: Salt) { + self.write().set_private_salt(salt); + } + + /// Returns the current public salt of this identity. + pub(crate) fn public_salt(&self) -> Salt { + self.read().public_salt().clone() + } + + /// Sets a new public salt. + pub(crate) fn set_public_salt(&self, salt: Salt) { + self.write().set_public_salt(salt); + } + + /// Signs a message using the private key. + pub(crate) fn sign(&self, msg: &[u8]) -> Signature { + self.read().sign(msg) + } + + /// Adds a service to this local peer. + pub fn add_service(&self, service_name: impl ToString, protocol: ServiceProtocol, port: u16) { + self.write().add_service(service_name, protocol, port); + } + + /// Returns the list of services this identity supports. + pub(crate) fn services(&self) -> ServiceMap { + self.read().services().clone() + } + + fn read(&self) -> RwLockReadGuard { + // Panic: we do not allow the lock to be poisened. + self.inner.read().expect("error getting read access") + } + + fn write(&self) -> RwLockWriteGuard { + // Panic: we do not allow the lock to be poisened. + self.inner.write().expect("error getting write access") + } +} + +impl LocalInner { + fn peer_id(&self) -> &PeerId { + &self.peer_id + } + + fn public_key(&self) -> &PublicKey { + self.peer_id().public_key() + } + + fn private_salt(&self) -> &Salt { + &self.private_salt + } + + fn set_private_salt(&mut self, salt: Salt) { + self.private_salt = salt; + } + + fn public_salt(&self) -> &Salt { + &self.public_salt + } + + fn set_public_salt(&mut self, salt: Salt) { + self.public_salt = salt; + } + + fn sign(&self, msg: &[u8]) -> Signature { + self.private_key.sign(msg) + } + + fn add_service(&mut self, service_name: impl ToString, protocol: ServiceProtocol, port: u16) { + self.services.insert(service_name, protocol, port) + } + + fn services(&self) -> &ServiceMap { + &self.services + } +} + +impl fmt::Debug for Local { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Local") + .field("peer_id", &self.read().peer_id()) + .finish() + } +} + +impl fmt::Display for Local { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.read().peer_id()) + } +} + +impl Eq for Local {} +impl PartialEq for Local { + fn eq(&self, other: &Self) -> bool { + self.read().peer_id() == other.read().peer_id() + } +} + +impl Eq for LocalInner {} +impl PartialEq for LocalInner { + fn eq(&self, other: &Self) -> bool { + self.peer_id == other.peer_id + } +} diff --git a/bee-network/bee-autopeering/src/local/salt.rs b/bee-network/bee-autopeering/src/local/salt.rs new file mode 100644 index 0000000000..e57bdd801a --- /dev/null +++ b/bee-network/bee-autopeering/src/local/salt.rs @@ -0,0 +1,93 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use super::Error; + +use crate::{ + proto, + time::{self, Timestamp, HOUR}, +}; + +use ring::rand::{SecureRandom as _, SystemRandom}; + +use std::time::{Duration, SystemTime}; + +const SALT_BYTE_LEN: usize = 20; +pub(crate) const SALT_LIFETIME_SECS: Duration = Duration::from_secs(2 * HOUR); + +#[derive(Clone, Debug)] +pub struct Salt { + pub(crate) bytes: [u8; SALT_BYTE_LEN], + pub(crate) expiration_time: u64, +} + +impl Salt { + pub fn new(lifetime: Duration) -> Self { + let expiration_time = time::unix_time_secs( + SystemTime::now() + .checked_add(lifetime) + .expect("system clock error or lifetime too long"), + ); + + let mut rand_bytes = [0u8; SALT_BYTE_LEN]; + let crypto_rng = SystemRandom::new(); + crypto_rng + .fill(&mut rand_bytes) + .expect("error generating secure random bytes"); + + Self { + bytes: rand_bytes, + expiration_time, + } + } + + pub fn bytes(&self) -> &[u8; SALT_BYTE_LEN] { + &self.bytes + } + + pub fn expiration_time(&self) -> u64 { + self.expiration_time + } +} + +impl Default for Salt { + fn default() -> Self { + Self::new(SALT_LIFETIME_SECS) + } +} + +impl TryFrom for Salt { + type Error = Error; + + fn try_from(salt: proto::Salt) -> Result { + let proto::Salt { bytes, exp_time } = salt; + + Ok(Self { + bytes: bytes.try_into().map_err(|_| Error::DeserializeFromProtobuf)?, + expiration_time: exp_time, + }) + } +} + +pub(crate) fn is_expired(timestamp: Timestamp) -> bool { + timestamp < time::unix_now_secs() +} + +#[cfg(test)] +mod tests { + use super::*; + + impl Salt { + pub(crate) fn new_zero_salt() -> Self { + let expiration_time = time::unix_time_secs( + SystemTime::now() + .checked_add(SALT_LIFETIME_SECS) + .expect("system clock error or lifetime too long"), + ); + Self { + bytes: [0u8; SALT_BYTE_LEN], + expiration_time, + } + } + } +} diff --git a/bee-network/bee-autopeering/src/local/services.rs b/bee-network/bee-autopeering/src/local/services.rs new file mode 100644 index 0000000000..ce2441902d --- /dev/null +++ b/bee-network/bee-autopeering/src/local/services.rs @@ -0,0 +1,194 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use crate::proto; + +use libp2p_core::multiaddr::Protocol; +use serde::{Deserialize, Serialize}; + +use std::{collections::HashMap, fmt, io, str::FromStr}; + +/// Represents the name of a service. +pub type ServiceName = String; +pub(crate) type ServicePort = u16; + +/// The announced name of the autopeering service. +pub const AUTOPEERING_SERVICE_NAME: &str = "peering"; + +/// A mapping between a service name and its endpoint data. +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +pub struct ServiceMap(HashMap); + +impl ServiceMap { + /// Registers a service with its bind address. + pub(crate) fn insert(&mut self, service_name: impl ToString, protocol: ServiceProtocol, port: ServicePort) { + self.0 + .insert(service_name.to_string(), ServiceEndpoint { protocol, port }); + } + + /// Returns the connection data associated with the given service name. + pub fn get(&self, service_name: impl AsRef) -> Option { + self.0.get(service_name.as_ref()).copied() + } + + /// Returns the number of services. + pub(crate) fn len(&self) -> usize { + self.0.len() + } +} + +impl TryFrom for ServiceMap { + type Error = Error; + + fn try_from(services: proto::ServiceMap) -> Result { + let proto::ServiceMap { map } = services; + + let mut services = HashMap::with_capacity(map.len()); + + for (service_name, proto::NetworkAddress { network, port }) in map { + let protocol: ServiceProtocol = network.parse().map_err(|_| Error::ServiceProtocol)?; + + if port > u16::MAX as u32 { + return Err(Error::PortNumber); + } + let port = port as u16; + + services.insert(service_name, ServiceEndpoint { protocol, port }); + } + + Ok(Self(services)) + } +} + +impl From<&ServiceMap> for proto::ServiceMap { + fn from(services: &ServiceMap) -> Self { + let ServiceMap(map) = services; + + let mut services = HashMap::with_capacity(map.len()); + + for (service_name, ServiceEndpoint { protocol, port }) in map { + let network_addr = proto::NetworkAddress { + network: protocol.to_string(), + port: *port as u32, + }; + + services.insert(service_name.to_owned(), network_addr); + } + + Self { map: services } + } +} + +// Example: "peering/udp/14626;gossip/tcp/14625" +impl fmt::Display for ServiceMap { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "{}", + self.0 + .iter() + .map(|(service_name, service)| format!("{}/{}/{}", service_name, service.protocol, service.port)) + .reduce(|acc, service_spec| acc + ";" + &service_spec) + .unwrap_or_default() + ) + } +} + +// TODO: consider reducing this into an enum that holds the port number. +/// Represents a service provided by a peer. +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +pub struct ServiceEndpoint { + protocol: ServiceProtocol, + port: ServicePort, +} + +impl ServiceEndpoint { + /// The transport protocol used to access the service, e.g. TCP or UDP. + pub fn protocol(&self) -> ServiceProtocol { + self.protocol + } + + /// The access port of the service. + pub fn port(&self) -> ServicePort { + self.port + } + + /// Creates the corresponding `libp2p_core::multiaddr::Protocol` of this service endpoint. + pub fn to_libp2p_protocol(&self) -> Protocol<'_> { + match self.protocol { + ServiceProtocol::Tcp => Protocol::Tcp(self.port), + ServiceProtocol::Udp => Protocol::Udp(self.port), + } + } +} + +/// Supported protocols of announced services. +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +pub enum ServiceProtocol { + /// TCP + Tcp, + /// UDP + Udp, +} + +impl fmt::Display for ServiceProtocol { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let protocol = match self { + ServiceProtocol::Udp => "udp", + ServiceProtocol::Tcp => "tcp", + }; + write!(f, "{}", protocol) + } +} + +impl FromStr for ServiceProtocol { + type Err = io::Error; + + fn from_str(s: &str) -> Result { + match s { + "tcp" => Ok(Self::Tcp), + "udp" => Ok(Self::Udp), + _ => Err(io::Error::new( + io::ErrorKind::InvalidData, + "unsupported transport protocol", + )), + } + } +} + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("parsing service protocol failed")] + ServiceProtocol, + #[error("invalid port number")] + PortNumber, +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::proto; + + #[test] + fn convert_service_map() { + let mut map = HashMap::new(); + map.insert( + "autopeering".into(), + proto::NetworkAddress { + network: "udp".into(), + port: 80, + }, + ); + map.insert( + "fpc".into(), + proto::NetworkAddress { + network: "tcp".into(), + port: 8000, + }, + ); + let proto_services = proto::ServiceMap { map }; + + let services: &ServiceMap = &proto_services.try_into().unwrap(); + let _: proto::ServiceMap = services.into(); + } +} diff --git a/bee-network/bee-autopeering/src/multiaddr.rs b/bee-network/bee-autopeering/src/multiaddr.rs new file mode 100644 index 0000000000..0d640da312 --- /dev/null +++ b/bee-network/bee-autopeering/src/multiaddr.rs @@ -0,0 +1,297 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use crypto::signatures::ed25519::{PublicKey, PUBLIC_KEY_LENGTH}; +use libp2p_core::multiaddr::{Multiaddr, Protocol}; +use serde::{ + de::{self, Visitor}, + Deserialize, Serialize, Serializer, +}; +use tokio::net::lookup_host; + +use std::{ + fmt, + hash::Hash, + net::{IpAddr, SocketAddr}, + ops::RangeInclusive, + str::FromStr, +}; + +const AUTOPEERING_MULTIADDR_PROTOCOL_NAME: &str = "autopeering"; +const PUBKEY_BASE58_SIZE_RANGE: RangeInclusive = 42..=44; + +/// The different supported kinds of addresses. +pub enum AddressKind { + /// Static `IPv4` address. + Ip4, + /// Static `IPv6` address. + Ip6, + /// A domain name that needs to be resolved to an `IPv4`, `IPv6` address (or both) at runtime. + Dns, +} + +// Note: +// Go-libp2p allows Hornet to introduce a custom autopeering [`Protocol`]. In rust-libp2p we unfortunately can't do +// that, so what we'll do is to introduce a wrapper type, which understands Hornet's custom multiaddr, and internally +// stores the address part and the key part separatedly. The details are abstracted away and the behavior identical +// to a standard libp2p multiaddress. + +/// Represents a special type of [`Multiaddr`] used to describe peers that particpate in autopeering, i.e. make +/// themselves discoverable by other peers. +/// +/// Example: +/// +/// "/dns/chrysalis-mainnet.iotaledger.net/udp/14626/autopeering/iotaPHdAn7eueBnXtikZMwhfPXaeGJGXDt4RBuLuGgb" +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Hash)] +pub struct AutopeeringMultiaddr { + address: Multiaddr, + public_key: PublicKey, + resolved_addrs: Vec, +} + +impl AutopeeringMultiaddr { + /// Returns the address part. + pub fn address(&self) -> &Multiaddr { + &self.address + } + + /// Returns the kind of this address. + pub fn address_kind(&self) -> AddressKind { + // Panic: `self` must always contain a valid address. + let kind = self.address.iter().next().expect("invalid multiaddress"); + match kind { + Protocol::Ip4(_) => AddressKind::Ip4, + Protocol::Ip6(_) => AddressKind::Ip6, + Protocol::Dns(_) => AddressKind::Dns, + _ => panic!("unsupported address kind"), + } + } + + /// Returns the corresponding [`SocketAddr`] iff it contains an IPv4 or IPv6 address. + /// + /// Note: If the [`Multiaddr`] contains a DNS address, then `None` will be returned. In that case you + /// should call `resolve_dns` and then `resolved_addrs` to get the corresponding [`SocketAddr`]s. + pub fn socket_addr(&self) -> Option { + let mut multiaddr_iter = self.address().iter(); + + // Panic: `self` must always contain a valid address. + let ip_addr = match multiaddr_iter.next().expect("error extracting ip address") { + Protocol::Ip4(ip4_addr) => IpAddr::V4(ip4_addr), + Protocol::Ip6(ip6_addr) => IpAddr::V6(ip6_addr), + Protocol::Dns(_) => return None, + _ => panic!("invalid multiaddr"), + }; + + // Panic: `self` must always contain a valid address. + let port = match multiaddr_iter.next().expect("error extracting port") { + Protocol::Udp(port) => port, + _ => panic!("invalid autopeering multiaddr"), + }; + + Some(SocketAddr::new(ip_addr, port)) + } + + /// Returns the [`PublicKey`]. + pub fn public_key(&self) -> &PublicKey { + &self.public_key + } + + /// Returns the resolved [`SocketAddr`]s determined by the last call to `resolve_dns`. If that method + /// was never called before, the returned slice will be empty. + pub fn resolved_addrs(&self) -> &[SocketAddr] { + &self.resolved_addrs[..] + } + + /// Performs DNS resolution if this multiaddr contains a DNS address. + pub async fn resolve_dns(&mut self) -> bool { + self.resolved_addrs.clear(); + + let mut address_iter = self.address.iter(); + + // Panic: `self` must always contain a valid address. + let dns = match address_iter.next().expect("error extracting ip address") { + Protocol::Dns(dns) => dns, + _ => return false, + }; + + // Panic: `self` must always contain a valid address. + let port = match address_iter.next().expect("error extracting port") { + Protocol::Udp(port) => port, + _ => panic!("invalid autopeering multiaddr"), + }; + + let host = format!("{}:{}", dns.as_ref(), port); + + if let Ok(socket_addrs) = lookup_host(host).await { + self.resolved_addrs.extend(socket_addrs); + true + } else { + false + } + } +} + +impl<'de> Deserialize<'de> for AutopeeringMultiaddr { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_str(AutopeeringMultiaddrVisitor) + } +} + +impl Serialize for AutopeeringMultiaddr { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let s = format_autopeering_multiaddr(&self.address, &self.public_key); + serializer.serialize_str(&s) + } +} + +impl fmt::Debug for AutopeeringMultiaddr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("AutopeeringMultiaddr") + .field("address", &self.address) + .field("public_key", &pubkey_to_base58(&self.public_key)) + .finish() + } +} + +impl fmt::Display for AutopeeringMultiaddr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(&format_autopeering_multiaddr(&self.address, &self.public_key)) + } +} + +fn format_autopeering_multiaddr(host_multiaddr: &Multiaddr, public_key: &PublicKey) -> String { + format!( + "{}/{}/{}", + host_multiaddr, + AUTOPEERING_MULTIADDR_PROTOCOL_NAME, + pubkey_to_base58(public_key), + ) +} + +impl FromStr for AutopeeringMultiaddr { + type Err = Error; + + fn from_str(s: &str) -> Result { + let parts = s + .split_terminator(&format!("/{}/", AUTOPEERING_MULTIADDR_PROTOCOL_NAME)) + .collect::>(); + + if parts.len() != 2 { + return Err(Error::AutopeeringMultiaddr); + } + + let address = parts[0].parse().map_err(|_| Error::AutopeeringMultiaddrAddressPart)?; + let public_key = base58_to_pubkey(parts[1])?; + let resolved_addrs = Vec::new(); + + Ok(Self { + address, + public_key, + resolved_addrs, + }) + } +} + +struct AutopeeringMultiaddrVisitor; + +impl<'de> Visitor<'de> for AutopeeringMultiaddrVisitor { + type Value = AutopeeringMultiaddr; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("autopeering multiaddr") + } + + fn visit_string(self, value: String) -> Result + where + E: de::Error, + { + value.parse().map_err(de::Error::custom) + } + + fn visit_str(self, value: &str) -> Result + where + E: de::Error, + { + value.parse().map_err(de::Error::custom) + } + + fn visit_borrowed_str(self, value: &str) -> Result + where + E: de::Error, + { + value.parse().map_err(de::Error::custom) + } +} + +pub(crate) fn pubkey_to_base58(pub_key: &PublicKey) -> String { + bs58::encode(pub_key.to_bytes()).into_string() +} + +pub(crate) fn base58_to_pubkey(base58_pubkey: impl AsRef) -> Result { + if !PUBKEY_BASE58_SIZE_RANGE.contains(&base58_pubkey.as_ref().len()) { + return Err(Error::Base58PublicKeyEncoding); + } + + let mut bytes = [0u8; PUBLIC_KEY_LENGTH]; + + bs58::decode(base58_pubkey.as_ref()) + .into(&mut bytes) + .map_err(|_| Error::Base58PublicKeyEncoding)?; + + PublicKey::try_from_bytes(bytes).map_err(|_| Error::Base58PublicKeyEncoding) +} + +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Returned, if the host address part wasn't a valid multi address. + #[error("invalid autopeering multi address address part")] + AutopeeringMultiaddrAddressPart, + /// Returned, if the public key part wasn't a base58 encoded ed25519 public key. + #[error("invalid autopeering multi address public key part")] + AutopeeringMultiaddrPubKeyPart, + /// Returned, if it's not a valid autopeering multi address for any other reason. + #[error("invalid autopeering multi address")] + AutopeeringMultiaddr, + /// Returned, if the base58 encoding of the public key was invalid. + #[error("invalid base58 encoded public key")] + Base58PublicKeyEncoding, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn convert_between_base58_and_pubkey() { + let base58_pubkey = "4H6WV54tB29u8xCcEaMGQMn37LFvM1ynNpp27TTXaqNM"; + let pubkey = base58_to_pubkey(base58_pubkey).unwrap(); + + assert_eq!(base58_pubkey, pubkey_to_base58(&pubkey)) + } + + #[test] + fn parse_sample_autopeering_multiaddr() { + let bs58_pubkey = "HmKTkSd9F6nnERBvVbr55FvL1hM5WfcLvsc9bc3hWxWc"; + let autopeering_multiaddr = format!("/ip4/127.0.0.1/udp/14626/autopeering/{}", bs58_pubkey); + + let _: AutopeeringMultiaddr = autopeering_multiaddr + .parse() + .expect("parsing autopeering multiaddr failed"); + } + + #[test] + fn parse_entrynode_multiaddrs() { + let _: AutopeeringMultiaddr = "/dns/entry-hornet-0.h.chrysalis-mainnet.iotaledger.net/udp/14626/autopeering/iotaPHdAn7eueBnXtikZMwhfPXaeGJGXDt4RBuLuGgb".parse().unwrap(); + let _: AutopeeringMultiaddr = "/dns/entry-hornet-1.h.chrysalis-mainnet.iotaledger.net/udp/14626/autopeering/iotaJJqMd5CQvv1A61coSQCYW9PNT1QKPs7xh2Qg5K2".parse().unwrap(); + let _: AutopeeringMultiaddr = + "/dns/entry-mainnet.tanglebay.com/udp/14626/autopeering/iot4By1FD4pFLrGJ6AAe7YEeSu9RbW9xnPUmxMdQenC" + .parse() + .unwrap(); + } +} diff --git a/bee-network/bee-autopeering/src/packet.rs b/bee-network/bee-autopeering/src/packet.rs new file mode 100644 index 0000000000..76b1cc09b5 --- /dev/null +++ b/bee-network/bee-autopeering/src/packet.rs @@ -0,0 +1,153 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! IOTA network packets. + +use crate::{peer::peer_id::PeerId, proto}; + +use base64 as bs64; +use crypto::signatures::ed25519::{PublicKey, Signature}; +use num_derive::FromPrimitive; +use prost::{bytes::BytesMut, DecodeError, EncodeError, Message}; + +use std::{fmt, net::SocketAddr, ops::Range}; + +// From `hive.go` docs: +// * specifies the maximum allowed size of packets; +// * packets larger than this will be cut and thus treated as invalid; +pub(crate) const MAX_PACKET_SIZE: usize = 1280; + +pub(crate) const DISCOVERY_MSG_TYPE_MIN: u8 = 10; +pub(crate) const DISCOVERY_MSG_TYPE_RANGE: Range = DISCOVERY_MSG_TYPE_MIN..(DISCOVERY_MSG_TYPE_MIN + 4); +pub(crate) const PEERING_MSG_TYPE_MIN: u8 = 20; +pub(crate) const PEERING_MSG_TYPE_RANGE: Range = PEERING_MSG_TYPE_MIN..(PEERING_MSG_TYPE_MIN + 3); + +/// Represents an IOTA packet. +pub(crate) struct Packet { + msg_type: MessageType, + msg_bytes: Vec, + public_key: PublicKey, + signature: Signature, +} + +impl Packet { + /// Creates a new packet. + pub(crate) fn new(msg_type: MessageType, msg_bytes: &[u8], public_key: PublicKey, signature: Signature) -> Self { + Self { + msg_type, + msg_bytes: msg_bytes.to_vec(), + public_key, + signature, + } + } + + /// Returns the message bytes contained in this packet. + pub(crate) fn msg_bytes(&self) -> &[u8] { + // &self.0.data + &self.msg_bytes + } + + /// Returns the public key belonging to the issuer of this packet. + pub(crate) fn public_key(&self) -> &PublicKey { + &self.public_key + } + + /// Returns the signature belonging to the issuer of this packet. + pub(crate) fn signature(&self) -> &Signature { + &self.signature + } + + /// Restores a packet from its protobuf representation. + pub(crate) fn from_protobuf(bytes: &[u8]) -> Result { + let proto::Packet { + r#type, + data, + public_key, + signature, + } = proto::Packet::decode(bytes)?; + + let public_key = PublicKey::try_from_bytes(public_key.try_into().map_err(|_| Error::RestorePublicKey)?) + .map_err(|_| Error::RestorePublicKey)?; + + let signature = Signature::from_bytes(signature.try_into().map_err(|_| Error::RestoreSignature)?); + + Ok(Self { + msg_type: num::FromPrimitive::from_u8(r#type as u8).ok_or(Error::UnknownMessageType)?, + msg_bytes: data.to_vec(), + public_key, + signature, + }) + } + + /// Returns the protobuf representation of this packet + #[allow(clippy::wrong_self_convention)] + pub(crate) fn to_protobuf(&self) -> BytesMut { + let proto_packet = proto::Packet { + r#type: self.msg_type as u32, + data: self.msg_bytes.to_vec(), + public_key: self.public_key.to_bytes().to_vec(), + signature: self.signature.to_bytes().to_vec(), + }; + + let mut buf = BytesMut::with_capacity(proto_packet.encoded_len()); + + // Panic: we have allocated a properly sized buffer. + proto_packet.encode(&mut buf).expect("encoding packet failed"); + + buf + } +} + +impl fmt::Debug for Packet { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Packet") + .field("msg_type", &self.msg_type) + .field("msg_bytes", &bs64::encode(&self.msg_bytes)) + .field("public_key", &bs58::encode(&self.public_key).into_string()) + .field("signature", &bs58::encode(&self.signature.to_bytes()).into_string()) + .finish() + } +} + +#[derive(Debug, thiserror::Error)] +pub(crate) enum Error { + #[error("prost decode error")] + ProtobufDecode(#[from] DecodeError), + #[error("prost encode error")] + ProtobufEncode(#[from] EncodeError), + #[error("failed to restore public key")] + RestorePublicKey, + #[error("failed to restore signature")] + RestoreSignature, + #[error("unknown message type")] + UnknownMessageType, +} + +/// The possible types of messages stored in a packet. +#[derive(Clone, Copy, Debug, FromPrimitive)] +#[repr(u8)] +#[non_exhaustive] +pub(crate) enum MessageType { + VerificationRequest = DISCOVERY_MSG_TYPE_MIN, + VerificationResponse, + DiscoveryRequest, + DiscoveryResponse, + PeeringRequest = PEERING_MSG_TYPE_MIN, + PeeringResponse, + DropRequest, +} + +#[derive(Debug)] +pub(crate) struct IncomingPacket { + pub(crate) msg_type: MessageType, + pub(crate) msg_bytes: Vec, + pub(crate) peer_addr: SocketAddr, + pub(crate) peer_id: PeerId, +} + +#[derive(Debug)] +pub(crate) struct OutgoingPacket { + pub(crate) msg_type: MessageType, + pub(crate) msg_bytes: Vec, + pub(crate) peer_addr: SocketAddr, +} diff --git a/bee-network/bee-autopeering/src/peer/lists.rs b/bee-network/bee-autopeering/src/peer/lists.rs new file mode 100644 index 0000000000..138ddfc38d --- /dev/null +++ b/bee-network/bee-autopeering/src/peer/lists.rs @@ -0,0 +1,384 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use super::{peer_id::PeerId, Peer}; + +use crate::{ + discovery::manager::VERIFICATION_EXPIRATION, + time::{self, Timestamp}, +}; + +use serde::{ + de::{SeqAccess, Visitor}, + ser::SerializeStruct, + Deserialize, Serialize, +}; + +use std::{ + collections::{HashSet, VecDeque}, + fmt, + sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard}, +}; + +// Maximum number of peers that can be managed. +const DEFAULT_MAX_MANAGED: usize = 1000; +// Maximum number of peers kept in the replacement list. +const DEFAULT_MAX_REPLACEMENTS: usize = 10; + +type ActivePeersListInner = PeerRing; +type ReplacementPeersListInner = PeerRing; +type EntryPeersListInner = HashSet; + +#[derive(Clone)] +pub struct ActivePeer { + peer: Peer, + metrics: PeerMetrics, +} + +impl ActivePeer { + pub(crate) fn new(peer: Peer) -> Self { + Self { + peer, + metrics: PeerMetrics::default(), + } + } + + pub(crate) fn peer(&self) -> &Peer { + &self.peer + } + + pub(crate) fn peer_mut(&mut self) -> &mut Peer { + &mut self.peer + } + + pub(crate) fn peer_id(&self) -> &PeerId { + self.peer.peer_id() + } + + pub(crate) fn metrics(&self) -> &PeerMetrics { + &self.metrics + } + + pub(crate) fn metrics_mut(&mut self) -> &mut PeerMetrics { + &mut self.metrics + } + + pub(crate) fn into_peer(self) -> Peer { + self.peer + } + + pub(crate) fn to_bytes(&self) -> Vec { + bincode::serialize(self).expect("serialization error") + } + + pub(crate) fn from_bytes(bytes: &[u8]) -> Self { + bincode::deserialize(bytes).expect("deserialization error") + } +} + +impl Eq for ActivePeer {} +impl PartialEq for ActivePeer { + fn eq(&self, other: &Self) -> bool { + self.peer.peer_id() == other.peer.peer_id() + } +} + +impl From for ActivePeer { + fn from(peer: Peer) -> Self { + Self::new(peer) + } +} + +impl AsRef for ActivePeer { + fn as_ref(&self) -> &PeerId { + self.peer.peer_id() + } +} + +impl AsRef for ActivePeer { + fn as_ref(&self) -> &Peer { + &self.peer + } +} + +impl From for sled::IVec { + fn from(peer: ActivePeer) -> Self { + bincode::serialize(&peer).expect("serialization error").into() + } +} + +impl From for ActivePeer { + fn from(bytes: sled::IVec) -> Self { + bincode::deserialize(bytes.as_ref()).expect("deserialization error") + } +} + +impl<'de> Deserialize<'de> for ActivePeer { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_struct("ActivePeer", &["peer", "metrics"], ActivePeerVisitor {}) + } +} + +impl Serialize for ActivePeer { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let mut this = serializer.serialize_struct("ActivePeer", 2)?; + this.serialize_field("peer", &self.peer)?; + this.serialize_field("metrics", &self.metrics)?; + this.end() + } +} + +struct ActivePeerVisitor {} + +impl<'de> Visitor<'de> for ActivePeerVisitor { + type Value = ActivePeer; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("'ActivePeer'") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let peer = seq + .next_element::()? + .ok_or_else(|| serde::de::Error::invalid_length(0, &self))?; + + let metrics = seq + .next_element::()? + .ok_or_else(|| serde::de::Error::invalid_length(1, &self))?; + + Ok(ActivePeer { peer, metrics }) + } +} + +// TODO: stop exposing lock guards. +#[derive(Clone, Default)] +pub struct ActivePeersList { + inner: Arc>, +} + +impl ActivePeersList { + pub(crate) fn read(&self) -> RwLockReadGuard { + // Panic: we don't allow poisened locks. + self.inner.read().expect("error getting read access") + } + + pub(crate) fn write(&self) -> RwLockWriteGuard { + // Panic: we don't allow poisened locks. + self.inner.write().expect("error getting write access") + } +} + +#[derive(Clone, Default)] +pub struct ReplacementPeersList { + inner: Arc>, +} + +impl ReplacementPeersList { + pub(crate) fn read(&self) -> RwLockReadGuard { + self.inner.read().expect("error getting read access") + } + + pub(crate) fn write(&self) -> RwLockWriteGuard { + self.inner.write().expect("error getting write access") + } +} + +#[derive(Clone, Default)] +pub(crate) struct EntryPeersList { + inner: Arc>, +} + +impl EntryPeersList { + pub(crate) fn read(&self) -> RwLockReadGuard { + self.inner.read().expect("error getting read access") + } + + pub(crate) fn write(&self) -> RwLockWriteGuard { + self.inner.write().expect("error getting write access") + } +} + +#[derive(Clone, Copy, Default, Serialize, Deserialize)] +pub(crate) struct PeerMetrics { + // how often that peer has been re-verified + verified_count: usize, + // number of returned new peers when queried the last time + last_new_peers: usize, + // timestamp of last verification request received + last_verif_request: Timestamp, + // timestamp of last verification request received + last_verif_response: Timestamp, +} + +impl PeerMetrics { + pub(crate) fn verified_count(&self) -> usize { + self.verified_count + } + + /// Increments the verified counter, and returns the new value. + pub(crate) fn increment_verified_count(&mut self) -> usize { + self.verified_count += 1; + self.verified_count + } + + pub(crate) fn reset_verified_count(&mut self) { + self.verified_count = 0; + } + + pub(crate) fn last_new_peers(&self) -> usize { + self.last_new_peers + } + + pub(crate) fn set_last_new_peers(&mut self, last_new_peers: usize) { + self.last_new_peers = last_new_peers; + } + + pub(crate) fn set_last_verif_request_timestamp(&mut self) { + self.last_verif_request = time::unix_now_secs(); + } + + pub(crate) fn set_last_verif_response_timestamp(&mut self) { + self.last_verif_response = time::unix_now_secs(); + } + + pub(crate) fn is_verified(&self) -> bool { + time::since(self.last_verif_response).expect("system clock error") < VERIFICATION_EXPIRATION.as_secs() + } +} + +impl fmt::Debug for PeerMetrics { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("PeerMetrics") + .field("verified_count", &self.verified_count) + .field("last_new_peers", &self.last_new_peers) + .field("last_verif_request", &self.last_verif_request) + .field("last_verif_response", &self.last_verif_response) + .finish() + } +} + +/// TODO: consider using `IndexMap` for faster search. +#[derive(Clone)] +pub(crate) struct PeerRing(VecDeque

); + +impl, const N: usize> PeerRing { + /// Returns `false`, if the list already contains the id, otherwise `true`. + /// + /// The newest item will be at index `0`, the oldest at index `n`. + pub(crate) fn insert(&mut self, item: P) -> bool { + if self.contains(item.as_ref()) { + false + } else { + if self.is_full() { + self.remove_oldest(); + } + self.0.push_front(item); + true + } + } + + pub(crate) fn remove_oldest(&mut self) -> Option

{ + self.0.pop_back() + } + + pub(crate) fn remove(&mut self, peer_id: &PeerId) -> Option

{ + if let Some(index) = self.find_index(peer_id) { + self.remove_at(index) + } else { + None + } + } + + pub(crate) fn remove_at(&mut self, index: usize) -> Option

{ + self.0.remove(index) + } + + pub(crate) fn contains(&self, peer_id: &PeerId) -> bool { + self.0.iter().any(|v| v.as_ref() == peer_id) + } + + pub(crate) fn find_index(&self, peer_id: &PeerId) -> Option { + self.0.iter().position(|v| v.as_ref() == peer_id) + } + + pub(crate) fn find(&self, peer_id: &PeerId) -> Option<&P> { + self.find_index(peer_id).map(|index| self.get(index)).flatten() + } + + pub(crate) fn find_mut(&mut self, peer_id: &PeerId) -> Option<&mut P> { + let index = self.find_index(peer_id); + if let Some(index) = index { + self.get_mut(index) + } else { + None + } + } + + pub(crate) fn get(&self, index: usize) -> Option<&P> { + self.0.get(index) + } + + pub(crate) fn get_mut(&mut self, index: usize) -> Option<&mut P> { + self.0.get_mut(index) + } + + pub(crate) fn get_newest_mut(&mut self) -> Option<&mut P> { + self.0.get_mut(0) + } + + // NOTE: need to be atomic operations + pub(crate) fn set_newest_and_get_mut(&mut self, peer_id: &PeerId) -> Option<&mut P> { + if let Some(mid) = self.find_index(peer_id) { + if mid > 0 { + self.0.rotate_left(mid); + } + self.get_newest_mut() + } else { + None + } + } + + pub(crate) fn get_oldest(&self) -> Option<&P> { + if self.0.is_empty() { + None + } else { + self.0.get(self.0.len() - 1) + } + } + + pub(crate) fn len(&self) -> usize { + self.0.len() + } + + pub(crate) fn is_full(&self) -> bool { + self.len() >= N + } + + pub(crate) fn is_empty(&self) -> bool { + self.0.is_empty() + } + + pub(crate) fn iter(&self) -> impl Iterator { + self.0.iter() + } + + #[cfg(test)] + pub(crate) fn rotate_forwards(&mut self) { + self.0.rotate_right(1); + } +} + +impl Default for PeerRing { + fn default() -> Self { + Self(VecDeque::with_capacity(N)) + } +} diff --git a/bee-network/bee-autopeering/src/peer/mod.rs b/bee-network/bee-autopeering/src/peer/mod.rs new file mode 100644 index 0000000000..940ced8180 --- /dev/null +++ b/bee-network/bee-autopeering/src/peer/mod.rs @@ -0,0 +1,361 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +pub(crate) mod lists; + +pub mod peer_id; +pub mod stores; + +pub use peer_id::PeerId; +pub use stores::PeerStore; + +use lists::{ActivePeersList, ReplacementPeersList}; + +use crate::{ + local::{ + services::{ServiceMap, ServiceProtocol}, + Local, + }, + proto, +}; + +use bytes::BytesMut; +use crypto::signatures::ed25519::PublicKey; +use libp2p_core::{multiaddr::Protocol, Multiaddr}; +use prost::{DecodeError, EncodeError, Message}; +use serde::{ + de::{SeqAccess, Visitor}, + ser::SerializeStruct, + Deserialize, Serialize, +}; + +use std::{ + fmt, + net::{IpAddr, SocketAddr}, +}; + +/// Represents a peer. +#[derive(Clone)] +pub struct Peer { + peer_id: PeerId, + ip_address: IpAddr, + services: ServiceMap, +} + +impl Peer { + /// Creates a new instance. + pub fn new(address: IpAddr, public_key: PublicKey) -> Self { + let peer_id = PeerId::from_public_key(public_key); + + Self { + peer_id, + ip_address: address, + services: ServiceMap::default(), + } + } + + /// Returns the [`PeerId`] of this peer. + pub fn peer_id(&self) -> &PeerId { + &self.peer_id + } + + /// Returns the public key of this peer. + pub fn public_key(&self) -> &PublicKey { + self.peer_id.public_key() + } + + /// Returns the address of this peer. + pub fn ip_address(&self) -> IpAddr { + self.ip_address + } + + /// Returns the port of a service provided by this peer. + pub fn port(&self, service_name: impl AsRef) -> Option { + self.services().get(service_name).map(|s| s.port()) + } + + /// Returns the services of this peer. + pub fn services(&self) -> &ServiceMap { + &self.services + } + + /// Sets the services of this peer all at once. + pub(crate) fn set_services(&mut self, services: ServiceMap) { + self.services = services; + } + + /// Returns whether the peer provides a corresponding service. + pub fn has_service(&self, service_name: impl AsRef) -> bool { + self.services.get(service_name).is_some() + } + + /// Adds a service with address binding to this peer. + pub fn add_service(&mut self, service_name: impl ToString, protocol: ServiceProtocol, port: u16) { + self.services.insert(service_name.to_string(), protocol, port); + } + + /// Returns the [`SocketAddr`](std::net::SocketAddr) associated with the given service name. + /// + /// Example: "peering" => `127.0.0.1:14627`. + pub fn service_socketaddr(&self, service_name: impl AsRef) -> Option { + self.services + .get(service_name) + .map(|endpoint| SocketAddr::new(self.ip_address, endpoint.port())) + } + + /// Returns the [`Multiaddr`](libp2p_core::Multiaddr) associated with the given service name. + /// + /// Example: "peering" => `/ip4/127.0.0.1/udp/14627`. + pub fn service_multiaddr(&self, service_name: impl AsRef) -> Option { + self.services.get(service_name).map(|endpoint| { + let mut multiaddr = Multiaddr::empty(); + + match self.ip_address { + IpAddr::V4(ipv4_addr) => multiaddr.push(Protocol::Ip4(ipv4_addr)), + IpAddr::V6(ipv6_addr) => multiaddr.push(Protocol::Ip6(ipv6_addr)), + }; + + multiaddr.push(endpoint.to_libp2p_protocol()); + + multiaddr + }) + } + + /// Creates a peer from its Protobuf representation/encoding. + pub fn from_protobuf(bytes: &[u8]) -> Result { + proto::Peer::decode(bytes)?.try_into() + } + + /// Returns the Protobuf representation of this peer. + pub fn to_protobuf(&self) -> Result { + let services: proto::ServiceMap = self.services().into(); + + let peer = proto::Peer { + ip: self.ip_address.to_string(), + public_key: self.public_key().as_ref().to_vec(), + services: Some(services), + }; + + let mut buf = BytesMut::with_capacity(peer.encoded_len()); + peer.encode(&mut buf)?; + + Ok(buf) + } + + pub(crate) fn into_id(self) -> PeerId { + self.peer_id + } +} + +impl fmt::Debug for Peer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Peer") + .field("peer_id", &self.peer_id.to_string()) + .field("public_key", &bs58::encode(self.public_key().as_ref()).into_string()) + .field("ip_address", &self.ip_address) + .field("services", &self.services.to_string()) + .finish() + } +} + +impl TryFrom for Peer { + type Error = Error; + + fn try_from(peer: proto::Peer) -> Result { + let proto::Peer { + public_key, + ip, + services, + } = peer; + + let ip_address: IpAddr = ip.parse().map_err(|_| Error::ParseIpAddr)?; + + let public_key = PublicKey::try_from_bytes(public_key.try_into().map_err(|_| Error::PublicKeyBytes)?) + .map_err(|_| Error::PublicKeyBytes)?; + + let peer_id = PeerId::from_public_key(public_key); + + let services: ServiceMap = services.ok_or(Error::MissingServices)?.try_into()?; + + Ok(Self { + peer_id, + ip_address, + services, + }) + } +} + +impl From<&Peer> for proto::Peer { + fn from(peer: &Peer) -> Self { + Self { + ip: peer.ip_address().to_string(), + public_key: peer.public_key().as_ref().to_vec(), + services: Some(peer.services().into()), + } + } +} + +impl AsRef for Peer { + fn as_ref(&self) -> &Self { + self + } +} + +impl AsRef for Peer { + fn as_ref(&self) -> &PeerId { + self.peer_id() + } +} + +impl From for sled::IVec { + fn from(peer: Peer) -> Self { + let bytes = bincode::serialize(&peer).expect("serialization error"); + sled::IVec::from_iter(bytes.into_iter()) + } +} + +impl From for Peer { + fn from(bytes: sled::IVec) -> Self { + bincode::deserialize(&bytes).expect("deserialization error") + } +} + +impl<'de> Deserialize<'de> for Peer { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_struct("Peer", &["peer_id", "ip_address", "services"], PeerVisitor {}) + } +} + +impl Serialize for Peer { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let mut this = serializer.serialize_struct("Peer", 3)?; + this.serialize_field("peer_id", &self.peer_id)?; + this.serialize_field("ip_address", &self.ip_address)?; + this.serialize_field("services", &self.services)?; + this.end() + } +} + +struct PeerVisitor {} + +impl<'de> Visitor<'de> for PeerVisitor { + type Value = Peer; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("'Peer'") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let peer_id = seq + .next_element::()? + .ok_or_else(|| serde::de::Error::invalid_length(0, &self))?; + + let ip_address = seq + .next_element::()? + .ok_or_else(|| serde::de::Error::invalid_length(1, &self))?; + + let services = seq + .next_element::()? + .ok_or_else(|| serde::de::Error::invalid_length(2, &self))?; + + Ok(Peer { + peer_id, + ip_address, + services, + }) + } +} + +/// Returns whether the given peer id is known locally. +pub(crate) fn is_known( + peer_id: &PeerId, + local: &Local, + active_peers: &ActivePeersList, + replacements: &ReplacementPeersList, +) -> bool { + // The entry peer list doesn't need to be queried, because those are always a subset of the active peers. + peer_id == &local.peer_id() || active_peers.read().contains(peer_id) || replacements.read().contains(peer_id) +} + +// Hive.go: whether the peer has recently done an endpoint proof +// --- +/// Returns whether the corresponding peer sent a (still valid) verification response. +/// +/// Also returns `false`, if the provided `peer_id` is not found in the active peer list. +pub(crate) fn is_verified(peer_id: &PeerId, active_peers: &ActivePeersList) -> bool { + active_peers + .read() + .find(peer_id) + .map_or(false, |e| e.metrics().is_verified()) +} + +// Hive.go: moves the peer with the given ID to the front of the list of managed peers. +// --- +/// Performs 3 operations: +/// * Rotates the active peer list such that `peer_id` is at the front of the list (index 0); +/// * Updates the "last_verification_response" timestamp; +/// * Increments the "verified" counter; +pub(crate) fn set_front_and_update(peer_id: &PeerId, active_peers: &ActivePeersList) -> Option { + if let Some(p) = active_peers.write().set_newest_and_get_mut(peer_id) { + let metrics = p.metrics_mut(); + metrics.set_last_verif_response_timestamp(); + let new_count = metrics.increment_verified_count(); + + Some(new_count) + } else { + None + } +} + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("parsing peer ip address failed")] + ParseIpAddr, + #[error("peer services missing")] + MissingServices, + #[error("invalid service description")] + Service(#[from] crate::local::services::Error), + #[error("invalid public key bytes")] + PublicKeyBytes, + #[error("{0}")] + ProtobufDecode(#[from] DecodeError), + #[error("{0}")] + ProtobufEncode(#[from] EncodeError), +} + +#[cfg(test)] +mod tests { + use crate::local::services::AUTOPEERING_SERVICE_NAME; + + use super::*; + use crypto::signatures::ed25519::SecretKey as PrivateKey; + + impl Peer { + pub(crate) fn new_test_peer(index: u8) -> Self { + let mut services = ServiceMap::default(); + services.insert(AUTOPEERING_SERVICE_NAME, ServiceProtocol::Udp, 1337); + + let public_key = PrivateKey::generate().unwrap().public_key(); + let peer_id = PeerId::from_public_key(public_key); + + Self { + peer_id, + ip_address: format!("127.0.0.{}", index).parse().unwrap(), + services, + } + } + + pub(crate) fn num_services(&self) -> usize { + self.services().len() + } + } +} diff --git a/bee-network/bee-autopeering/src/peer/peer_id.rs b/bee-network/bee-autopeering/src/peer/peer_id.rs new file mode 100644 index 0000000000..cddcf404d7 --- /dev/null +++ b/bee-network/bee-autopeering/src/peer/peer_id.rs @@ -0,0 +1,198 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! A module for creating peer identities. + +use crate::hash; + +use crypto::signatures::ed25519::{PublicKey, SecretKey as PrivateKey, PUBLIC_KEY_LENGTH}; +use serde::{ + de::{SeqAccess, Visitor}, + ser::SerializeStruct, + Deserialize, Serialize, +}; + +use std::{ + fmt, + hash::{Hash, Hasher}, +}; + +const DISPLAY_LENGTH: usize = 16; + +/// Represents the unique identity of a peer in the network. +#[derive(Copy, Clone)] +pub struct PeerId { + // The wrapped ED25519 public key actually representing the ID. + public_key: PublicKey, + // The corresponding SHA256 hash of the ED25519 public key. + id_bytes: [u8; hash::SHA256_LEN], +} + +impl PeerId { + /// Generates a new random `PeerId`. + pub fn generate() -> Self { + let private_key = PrivateKey::generate().expect("error generating private key"); + + Self::from_public_key(private_key.public_key()) + } + + /// Creates a peer identity from an ED25519 public key. + pub fn from_public_key(public_key: PublicKey) -> Self { + let id_bytes = hash::data_hash(public_key.as_ref()); + + Self { id_bytes, public_key } + } + + /// Returns the public key associated with this identity. + pub fn public_key(&self) -> &PublicKey { + &self.public_key + } + + /// Returns the actual bytes representing this id. + pub fn id_bytes(&self) -> &[u8; hash::SHA256_LEN] { + &self.id_bytes + } + + /// Creates the corresponding `libp2p::PeerId`. + pub fn libp2p_peer_id(&self) -> libp2p_core::PeerId { + libp2p_peer_id(self.public_key()) + } +} + +/// Creates the corresponding `libp2p_core::PeerId` from a crypto.rs ED25519 public key. +pub fn libp2p_peer_id(public_key: &PublicKey) -> libp2p_core::PeerId { + libp2p_core::PeerId::from_public_key(libp2p_public_key(public_key)) +} + +/// Creates the corresponding `libp2p_core::PublicKey` from a crypto.rs ED25519 public key. +pub fn libp2p_public_key(public_key: &PublicKey) -> libp2p_core::PublicKey { + libp2p_core::PublicKey::Ed25519( + libp2p_core::identity::ed25519::PublicKey::decode(public_key.as_ref()) + .expect("error decoding ed25519 public key from bytes"), + ) +} + +impl Eq for PeerId {} +impl PartialEq for PeerId { + fn eq(&self, other: &Self) -> bool { + self.id_bytes == other.id_bytes + } +} +impl Hash for PeerId { + fn hash(&self, state: &mut H) { + self.id_bytes.hash(state); + } +} + +impl fmt::Debug for PeerId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = &bs58::encode(&self.id_bytes).into_string(); + + f.debug_struct("PeerId") + .field("public_key", &bs58::encode(self.public_key).into_string()) + .field("id", &s) + .finish() + } +} + +impl fmt::Display for PeerId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.libp2p_peer_id().to_base58()[..DISPLAY_LENGTH].fmt(f) + } +} + +impl AsRef for PeerId { + fn as_ref(&self) -> &PeerId { + self + } +} + +impl AsRef<[u8]> for PeerId { + fn as_ref(&self) -> &[u8] { + self.public_key.as_ref() + } +} + +impl From<&PeerId> for sled::IVec { + fn from(peer: &PeerId) -> Self { + let bytes = peer.public_key().to_bytes(); + sled::IVec::from_iter(bytes.into_iter()) + } +} + +impl From<&PeerId> for libp2p_core::PeerId { + fn from(peer_id: &PeerId) -> Self { + libp2p_peer_id(peer_id.public_key()) + } +} + +impl Serialize for PeerId { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let mut this = serializer.serialize_struct("PeerId", 2)?; + this.serialize_field("public_key", &self.public_key.to_bytes())?; + this.end() + } +} + +impl<'de> Deserialize<'de> for PeerId { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_struct("PeerId", &["public_key"], PeerIdVisitor {}) + } +} + +struct PeerIdVisitor {} + +impl<'de> Visitor<'de> for PeerIdVisitor { + type Value = PeerId; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("'PeerId'") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let bytes = seq + .next_element::<[u8; PUBLIC_KEY_LENGTH]>()? + .ok_or_else(|| serde::de::Error::invalid_length(0, &self))?; + + let public_key = PublicKey::try_from_bytes(bytes).map_err(|_| serde::de::Error::invalid_length(0, &self))?; + + Ok(PeerId::from_public_key(public_key)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::multiaddr::base58_to_pubkey; + + impl PeerId { + /// Creates a static peer id. + pub(crate) fn new_static() -> Self { + let base58_pubkey = "4H6WV54tB29u8xCcEaMGQMn37LFvM1ynNpp27TTXaqNM"; + let pubkey = base58_to_pubkey(base58_pubkey).unwrap(); + Self::from_public_key(pubkey) + } + + /// Creates a deterministic peer id from a generator char. + pub fn new_deterministic(gen: char) -> Self { + let base58_pubkey = std::iter::repeat(gen).take(44).collect::(); + let pubkey = base58_to_pubkey(base58_pubkey).unwrap(); + Self::from_public_key(pubkey) + } + } + + #[test] + fn into_libp2p_peer_id() { + let peer_id = PeerId::new_static(); + let _ = peer_id.libp2p_peer_id(); + } +} diff --git a/bee-network/bee-autopeering/src/peer/stores.rs b/bee-network/bee-autopeering/src/peer/stores.rs new file mode 100644 index 0000000000..64f9f1503a --- /dev/null +++ b/bee-network/bee-autopeering/src/peer/stores.rs @@ -0,0 +1,316 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Persistent storage of discovered peers. + +use super::{ + lists::{ActivePeer, ActivePeersList, ReplacementPeersList}, + peer_id::PeerId, + Peer, +}; + +use sled::{Batch, Db}; + +use std::{ + collections::HashMap, + sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard}, +}; + +const ACTIVE_PEERS_TREE: &str = "active_peers"; +const REPLACEMENTS_TREE: &str = "replacements"; + +/// Mandatory functionality of any peer store. +pub trait PeerStore: Clone + Send + Sync { + /// The peer store configuration. + type Config; + + /// Creates a new peer store from config. + fn new(config: Self::Config) -> Self; + + /// Stores an active peer. + fn store_active(&self, peer: ActivePeer); + + /// Stores all current active peers. + fn store_all_active(&self, peers: &ActivePeersList); + + /// Stores a replacement peer. + fn store_replacement(&self, peer: Peer); + + /// Stores all current replacement peers. + fn store_all_replacements(&self, peers: &ReplacementPeersList); + + /// Whether the store contains the given peer. + fn contains(&self, peer_id: &PeerId) -> bool; + + /// Fetches an active peer from its peer identity. + fn fetch_active(&self, peer_id: &PeerId) -> Option; + + /// Fetches all active peers. + fn fetch_all_active(&self) -> Vec; + + /// Fetches a replacement peer from its peer identity. + fn fetch_replacement(&self, peer_id: &PeerId) -> Option; + + /// Fetches all replacement peers. + fn fetch_all_replacements(&self) -> Vec; + + /// Deletes a stored peer. + fn delete(&self, peer_id: &PeerId) -> bool; + + /// Deletes all stored peers. + fn delete_all(&self); +} + +/// A non-persistent/in-memory peer store. +#[derive(Clone, Default)] +pub struct InMemoryPeerStore { + inner: Arc>, +} + +#[derive(Default)] +struct InMemoryPeerStoreInner { + active_peers: HashMap, + replacements: HashMap, +} + +impl InMemoryPeerStore { + fn read(&self) -> RwLockReadGuard { + self.inner.read().expect("error getting read access") + } + + fn write(&self) -> RwLockWriteGuard { + self.inner.write().expect("error getting write access") + } +} + +impl PeerStore for InMemoryPeerStore { + type Config = (); + + fn new(_: Self::Config) -> Self { + Self { + inner: Default::default(), + } + } + + fn store_active(&self, peer: ActivePeer) { + let peer_id = peer.peer_id(); + + let mut write = self.write(); + + let _ = write.replacements.remove(peer_id); + let _ = write.active_peers.insert(*peer_id, peer); + } + + fn store_all_active(&self, peers: &ActivePeersList) { + let read = peers.read(); + let mut write = self.write(); + + for (peer_id, peer) in read.iter().map(|p| (p.peer_id(), p)) { + let _ = write.active_peers.insert(*peer_id, peer.clone()); + } + } + + fn store_replacement(&self, peer: Peer) { + let peer_id = peer.peer_id(); + + let _ = self.write().active_peers.remove(peer_id); + let _ = self.write().replacements.insert(*peer_id, peer); + } + + fn store_all_replacements(&self, peers: &ReplacementPeersList) { + let read = peers.read(); + let mut write = self.write(); + + for (peer_id, peer) in read.iter().map(|p| (p.peer_id(), p)) { + let _ = write.replacements.insert(*peer_id, peer.clone()); + } + } + + fn contains(&self, peer_id: &PeerId) -> bool { + let read = self.read(); + read.active_peers.contains_key(peer_id) || read.replacements.contains_key(peer_id) + } + + fn fetch_active(&self, peer_id: &PeerId) -> Option { + self.read().active_peers.get(peer_id).cloned() + } + + fn fetch_all_active(&self) -> Vec { + self.read().active_peers.iter().map(|(_, p)| p).cloned().collect() + } + + fn fetch_replacement(&self, peer_id: &PeerId) -> Option { + self.read().replacements.get(peer_id).cloned() + } + + fn fetch_all_replacements(&self) -> Vec { + self.read().replacements.iter().map(|(_, p)| p).cloned().collect() + } + + fn delete(&self, peer_id: &PeerId) -> bool { + let mut write = self.write(); + write.active_peers.remove(peer_id).is_some() || write.replacements.remove(peer_id).is_some() + } + + fn delete_all(&self) { + let mut write = self.write(); + write.active_peers.clear(); + write.replacements.clear(); + } +} + +/// The config for the Sled peer store. +pub type SledPeerStoreConfig = sled::Config; + +/// The (persistent) Sled peer store. +#[derive(Clone)] +pub struct SledPeerStore { + db: Db, +} + +impl PeerStore for SledPeerStore { + type Config = SledPeerStoreConfig; + + fn new(config: Self::Config) -> Self { + let db = config.open().expect("error opening peer store"); + + db.open_tree("active_peers").expect("error opening tree"); + db.open_tree("replacements").expect("error opening tree"); + + Self { db } + } + + fn store_active(&self, active_peer: ActivePeer) { + let tree = self.db.open_tree(ACTIVE_PEERS_TREE).expect("error opening tree"); + let key = *active_peer.peer_id(); + + tree.insert(key, active_peer.to_bytes()).expect("insert error"); + } + + fn store_all_active(&self, active_peers: &ActivePeersList) { + let tree = self.db.open_tree(ACTIVE_PEERS_TREE).expect("error opening tree"); + + let mut batch = Batch::default(); + active_peers + .read() + .iter() + .for_each(|p| batch.insert(p.peer_id(), p.clone())); + + tree.apply_batch(batch).expect("error applying batch"); + } + + fn store_replacement(&self, peer: Peer) { + let tree = self.db.open_tree(REPLACEMENTS_TREE).expect("error opening tree"); + let key = *peer.peer_id(); + + tree.insert(key, peer).expect("error inserting peer"); + } + + fn store_all_replacements(&self, replacements: &ReplacementPeersList) { + let replacements_tree = self.db.open_tree(REPLACEMENTS_TREE).expect("error opening tree"); + + let mut batch = Batch::default(); + replacements + .read() + .iter() + .for_each(|p| batch.insert(p.peer_id(), p.clone())); + + replacements_tree.apply_batch(batch).expect("error applying batch"); + } + + fn contains(&self, peer_id: &PeerId) -> bool { + let tree = self.db.open_tree(ACTIVE_PEERS_TREE).expect("error opening tree"); + if tree.contains_key(peer_id).expect("db error") { + true + } else { + let tree = self.db.open_tree(REPLACEMENTS_TREE).expect("error opening tree"); + tree.contains_key(peer_id).expect("db error") + } + } + + fn fetch_active(&self, peer_id: &PeerId) -> Option { + let tree = self.db.open_tree(ACTIVE_PEERS_TREE).expect("error opening tree"); + + tree.get(peer_id).expect("db error").map(|b| ActivePeer::from_bytes(&b)) + } + + fn fetch_all_active(&self) -> Vec { + let tree = self.db.open_tree(ACTIVE_PEERS_TREE).expect("error opening tree"); + + tree.iter() + .filter_map(|p| p.ok()) + .map(|(_, b)| ActivePeer::from_bytes(&b)) + .collect::>() + } + + fn fetch_replacement(&self, peer_id: &PeerId) -> Option { + let tree = self.db.open_tree(REPLACEMENTS_TREE).expect("error opening tree"); + + tree.get(peer_id).expect("db error").map(Peer::from) + } + + fn fetch_all_replacements(&self) -> Vec { + let tree = self.db.open_tree(REPLACEMENTS_TREE).expect("error opening tree"); + + tree.iter() + .filter_map(|p| p.ok()) + .map(|(_, ivec)| Peer::from(ivec)) + .collect::>() + } + + fn delete(&self, _: &PeerId) -> bool { + unimplemented!("no need for single entry removal at the moment") + } + + fn delete_all(&self) { + self.db + .open_tree(ACTIVE_PEERS_TREE) + .expect("error opening tree") + .clear() + .expect("error clearing tree"); + + self.db + .open_tree(REPLACEMENTS_TREE) + .expect("error opening tree") + .clear() + .expect("error clearing tree"); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn create_temporary_sled_peer_store() -> SledPeerStore { + let config = SledPeerStoreConfig::new().temporary(true); + SledPeerStore::new(config) + } + + #[test] + fn store_and_fetch_active_peer() { + let peer_store = create_temporary_sled_peer_store(); + + let peer = ActivePeer::new(Peer::new_test_peer(0)); + let peer_id = *peer.peer_id(); + + peer_store.store_active(peer); + + let fetched_active_peer = peer_store.fetch_active(&peer_id).expect("missing peer"); + + assert_eq!(peer_id, *fetched_active_peer.peer_id()); + } + + #[test] + fn store_and_fetch_replacement_peer() { + let peer_store = create_temporary_sled_peer_store(); + + let peer = Peer::new_test_peer(0); + let peer_id = *peer.peer_id(); + + peer_store.store_replacement(peer); + + let fetched_peer = peer_store.fetch_replacement(&peer_id).expect("missing peer"); + + assert_eq!(peer_id, *fetched_peer.peer_id()); + } +} diff --git a/bee-network/bee-autopeering/src/peering/filter.rs b/bee-network/bee-autopeering/src/peering/filter.rs new file mode 100644 index 0000000000..c83c5c7dda --- /dev/null +++ b/bee-network/bee-autopeering/src/peering/filter.rs @@ -0,0 +1,151 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + peer::{peer_id::PeerId, Peer}, + NeighborValidator, +}; + +use std::{ + collections::HashSet, + sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard}, +}; + +#[derive(Clone)] +pub(crate) struct NeighborFilter { + inner: Arc>>, +} + +impl NeighborFilter { + /// Creates a new filter. + /// + /// A peer id same as `local_id` will always be rejected. A `validator` must be provided + /// to inject a user defined criteria. + pub fn new(local_id: PeerId, validator: V) -> Self { + Self { + inner: Arc::new(RwLock::new(NeighborFilterInner::new(local_id, validator))), + } + } + + /// Adds a single peer id that should be rejected. + pub(crate) fn add(&self, peer_id: PeerId) { + self.write().add(peer_id); + } + + /// Resets the filter (i.e. removes all currently rejected peer ids). + pub(crate) fn clear(&self) { + self.write().clear(); + } + + /// Returns `true` if the filter is okay with the candidate, otherwise `false`. + pub(crate) fn ok(&self, candidate: impl AsRef) -> bool { + self.read().ok(candidate) + } + + /// Applies the filter to a list of candidates. + pub(crate) fn apply_list<'a, P: AsRef>(&self, candidates: &'a [P]) -> Vec<&'a P> { + self.read().apply_list(candidates) + } + + fn read(&self) -> RwLockReadGuard> { + self.inner.read().expect("error getting read access") + } + + fn write(&self) -> RwLockWriteGuard> { + self.inner.write().expect("error getting write access") + } +} + +pub(crate) struct NeighborFilterInner { + local_id: PeerId, + rejected: HashSet, + validator: V, +} + +impl NeighborFilterInner { + fn new(local_id: PeerId, validator: V) -> Self { + Self { + local_id, + rejected: HashSet::new(), + validator, + } + } + + /// Adds a single peer id that should be rejected. + fn add(&mut self, peer_id: PeerId) { + self.rejected.insert(peer_id); + } + + /// Resets the filter (i.e. removes all currently rejected peer ids). + fn clear(&mut self) { + self.rejected.clear() + } + + /// Returns `true` if the filter is okay with the candidate, otherwise `false`. + fn ok(&self, candidate: impl AsRef) -> bool { + let peer = candidate.as_ref(); + let peer_id = peer.peer_id(); + + if peer_id == &self.local_id || self.rejected.contains(peer_id) { + false + } else { + self.validator.is_valid(peer) + } + } + + /// Applies the filter to a list of candidates. + fn apply_list<'a, P: AsRef>(&self, candidates: &'a [P]) -> Vec<&'a P> { + candidates.iter().filter(|c| self.ok(c)).collect::>() + } +} + +#[cfg(test)] +mod tests { + use crate::{ + local::services::{ServiceProtocol, AUTOPEERING_SERVICE_NAME}, + peer::Peer, + }; + + use super::*; + + #[derive(Clone)] + struct DummyValidator {} + impl NeighborValidator for DummyValidator { + fn is_valid(&self, peer: &Peer) -> bool { + peer.services().get(AUTOPEERING_SERVICE_NAME).unwrap().port() == 1337 + } + } + + fn setup_scenario1() -> (NeighborFilter, Peer, Peer) { + let local_id = Peer::new_test_peer(0).into_id(); + let filter = NeighborFilter::new(local_id, DummyValidator {}); + + let mut peer1 = Peer::new_test_peer(1); + peer1.add_service(AUTOPEERING_SERVICE_NAME, ServiceProtocol::Udp, 6969); + assert_eq!(1, peer1.num_services()); + + let mut peer2 = Peer::new_test_peer(2); + peer2.add_service(AUTOPEERING_SERVICE_NAME, ServiceProtocol::Udp, 1337); + assert_eq!(1, peer2.num_services()); + + (filter, peer1, peer2) + } + + #[test] + fn filter_apply() { + let (filter, peer1, peer2) = setup_scenario1(); + + assert!(!filter.read().ok(peer1)); + assert!(filter.read().ok(peer2)); + } + + #[test] + fn filter_apply_list() { + let (filter, peer1, peer2) = setup_scenario1(); + + let candidates = [peer1, peer2]; + + let included = filter.write().apply_list(&candidates); + assert_eq!(1, included.len()); + } +} diff --git a/bee-network/bee-autopeering/src/peering/manager.rs b/bee-network/bee-autopeering/src/peering/manager.rs new file mode 100644 index 0000000000..b1c7d9a849 --- /dev/null +++ b/bee-network/bee-autopeering/src/peering/manager.rs @@ -0,0 +1,763 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use super::{ + filter::NeighborFilter, + messages::{DropPeeringRequest, PeeringRequest, PeeringResponse}, + neighbor::{self, Neighborhood, SIZE_INBOUND, SIZE_OUTBOUND}, +}; + +use crate::{ + event::{Event, EventTx}, + hash::message_hash, + local::{ + salt::{self, Salt, SALT_LIFETIME_SECS}, + services::AUTOPEERING_SERVICE_NAME, + Local, + }, + packet::{IncomingPacket, MessageType, OutgoingPacket}, + peer::{self, lists::ActivePeersList, peer_id::PeerId, Peer}, + peering::neighbor::{salt_distance, Neighbor}, + request::{self, RequestManager, RequestValue, ResponseTx, RESPONSE_TIMEOUT}, + server::{ServerSocket, ServerTx}, + task::{Repeat, Runnable, ShutdownRx}, + time::SECOND, + NeighborValidator, +}; + +use std::{net::SocketAddr, time::Duration}; + +/// Salt update interval. +pub(crate) const SALT_UPDATE_SECS: Duration = Duration::from_secs(SALT_LIFETIME_SECS.as_secs() - SECOND); +const INBOUND: bool = true; +const OUTBOUND: bool = false; + +pub(crate) type InboundNeighborhood = Neighborhood; +pub(crate) type OutboundNeighborhood = Neighborhood; + +/// Represents the answer of a `PeeringRequest`. Can be either `true` (peering accepted), or `false` (peering denied). +pub type Status = bool; + +pub(crate) struct PeeringManager { + // The local peer. + local: Local, + // Channel halves for sending/receiving peering related packets. + socket: ServerSocket, + // Handles requests. + request_mngr: RequestManager, + // Publishes peering related events. + event_tx: EventTx, + // The list of managed peers. + active_peers: ActivePeersList, + // Inbound neighborhood. + inbound_nbh: InboundNeighborhood, + // Outbound neighborhood. + outbound_nbh: OutboundNeighborhood, + // The peer rejection filter. + nb_filter: NeighborFilter, +} + +impl PeeringManager { + #[allow(clippy::too_many_arguments)] + pub(crate) fn new( + local: Local, + socket: ServerSocket, + request_mngr: RequestManager, + active_peers: ActivePeersList, + event_tx: EventTx, + inbound_nbh: InboundNeighborhood, + outbound_nbh: OutboundNeighborhood, + nb_filter: NeighborFilter, + ) -> Self { + Self { + local, + socket, + request_mngr, + event_tx, + active_peers, + inbound_nbh, + outbound_nbh, + nb_filter, + } + } +} + +#[async_trait::async_trait] +impl Runnable for PeeringManager { + const NAME: &'static str = "PeeringManager"; + const SHUTDOWN_PRIORITY: u8 = 1; + + type ShutdownSignal = ShutdownRx; + + async fn run(self, mut shutdown_rx: Self::ShutdownSignal) { + let PeeringManager { + local, + socket, + request_mngr, + event_tx, + active_peers, + inbound_nbh, + outbound_nbh, + nb_filter, + } = self; + + let ServerSocket { + mut server_rx, + server_tx, + } = socket; + + 'recv: loop { + tokio::select! { + _ = &mut shutdown_rx => { + break; + } + p = server_rx.recv() => { + if let Some(IncomingPacket { + msg_type, + msg_bytes, + peer_addr, + peer_id, + }) = p + { + let ctx = RecvContext { + peer_id: &peer_id, + msg_bytes: &msg_bytes, + server_tx: &server_tx, + local: &local, + active_peers: &active_peers, + request_mngr: &request_mngr, + peer_addr, + event_tx: &event_tx, + inbound_nbh: &inbound_nbh, + outbound_nbh: &outbound_nbh, + }; + + match msg_type { + MessageType::PeeringRequest => { + let peer_req = if let Ok(peer_req) = PeeringRequest::from_protobuf(&msg_bytes) { + peer_req + } else { + log::warn!("Error decoding peering request from {}.", &peer_id); + continue 'recv; + }; + + if let Err(e) = validate_peering_request(&peer_req, &ctx) { + log::warn!("Received invalid peering request from {}. Reason: {:?}", &peer_id, e); + continue 'recv; + } else { + log::trace!("Received valid peering request from {}.", &peer_id); + + handle_peering_request(peer_req, ctx, &nb_filter); + } + } + MessageType::PeeringResponse => { + let peer_res = if let Ok(peer_res) = PeeringResponse::from_protobuf(&msg_bytes) { + peer_res + } else { + log::warn!("Error decoding peering response from {}.", &peer_id); + continue 'recv; + }; + + match validate_peering_response(&peer_res, &ctx) { + Ok(peer_reqval) => { + log::trace!("Received valid peering response from {}.", &peer_id); + + handle_peering_response(peer_res, peer_reqval, ctx, &nb_filter); + } + Err(e) => { + log::warn!("Received invalid peering response from {}. Reason: {:?}", &peer_id, e); + continue 'recv; + } + } + } + MessageType::DropRequest => { + let drop_req = if let Ok(drop_req) = DropPeeringRequest::from_protobuf(&msg_bytes) { + drop_req + } else { + log::warn!("Error decoding drop request from {}.", &peer_id); + continue 'recv; + }; + + if let Err(e) = validate_drop_request(&drop_req, &ctx) { + log::warn!("Received invalid drop request from {}. Reason: {:?}", &peer_id, e); + continue 'recv; + } else { + log::trace!("Received valid drop request from {}.", &peer_id); + + handle_drop_request(drop_req, ctx, &nb_filter); + } + } + _ => log::warn!("Received unsupported peering message type"), + } + } + } + } + } + } +} + +pub(crate) struct RecvContext<'a> { + peer_id: &'a PeerId, + msg_bytes: &'a [u8], + server_tx: &'a ServerTx, + local: &'a Local, + active_peers: &'a ActivePeersList, + request_mngr: &'a RequestManager, + peer_addr: SocketAddr, + event_tx: &'a EventTx, + inbound_nbh: &'a InboundNeighborhood, + outbound_nbh: &'a OutboundNeighborhood, +} + +/////////////////////////////////////////////////////////////////////////////////////////////////////////// +// VALIDATION +/////////////////////////////////////////////////////////////////////////////////////////////////////////// + +#[derive(Debug, Clone, Copy)] +pub(crate) enum ValidationError { + // The request must not be expired. + RequestExpired, + // The response must arrive in time. + NoCorrespondingRequestOrTimeout, + // The hash of the corresponding request must be correct. + IncorrectRequestHash, + // The peer has not been verified yet. + PeerNotVerified, + // The peer's salt is expired. + SaltExpired, +} + +fn validate_peering_request(peer_req: &PeeringRequest, ctx: &RecvContext) -> Result<(), ValidationError> { + use ValidationError::*; + + if request::is_expired(peer_req.timestamp()) { + Err(RequestExpired) + } else if !peer::is_verified(ctx.peer_id, ctx.active_peers) { + Err(PeerNotVerified) + } else if salt::is_expired(peer_req.salt().expiration_time()) { + Err(SaltExpired) + } else { + Ok(()) + } +} + +fn validate_peering_response(peer_res: &PeeringResponse, ctx: &RecvContext) -> Result { + use ValidationError::*; + + if let Some(reqv) = ctx.request_mngr.write().remove::(ctx.peer_id) { + if peer_res.request_hash() != reqv.request_hash { + Err(IncorrectRequestHash) + } else { + Ok(reqv) + } + } else { + Err(NoCorrespondingRequestOrTimeout) + } +} + +fn validate_drop_request(drop_req: &DropPeeringRequest, _: &RecvContext) -> Result<(), ValidationError> { + use ValidationError::*; + + if request::is_expired(drop_req.timestamp()) { + Err(RequestExpired) + } else { + Ok(()) + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////////////// +// HANDLING +/////////////////////////////////////////////////////////////////////////////////////////////////////////// + +fn handle_peering_request( + _peer_req: PeeringRequest, + ctx: RecvContext, + nb_filter: &NeighborFilter, +) { + log::trace!("Handling peering request."); + + let mut status = false; + + if peer::is_verified(ctx.peer_id, ctx.active_peers) { + let active_peer = ctx + .active_peers + .read() + .find(ctx.peer_id) + .cloned() + .expect("inconsistent peer list"); + + if nb_filter.ok(&active_peer.peer()) { + // Calculate the distance between the local peer and the potential neighbor. + let distance = + neighbor::salt_distance(&ctx.local.peer_id(), active_peer.peer_id(), &ctx.local.private_salt()); + + // Create a new neighbor. + let neighbor = Neighbor::new(active_peer.into_peer(), distance); + + // Check if the neighbor would be closer than the currently furthest in the inbound neighborhood. + if ctx.inbound_nbh.is_preferred(&neighbor) { + let peer = neighbor.into_peer(); + + if add_or_replace_neighbor::( + peer.clone(), + ctx.local, + ctx.inbound_nbh, + ctx.outbound_nbh, + ctx.server_tx, + ctx.event_tx, + ) { + // Change peering status to `true`. + status = true; + + // Update the neighbor filter. + nb_filter.add(*peer.peer_id()); + + // Fire `IncomingPeering` event. + publish_peering_event::( + peer, + status, + ctx.local, + ctx.event_tx, + ctx.inbound_nbh, + ctx.outbound_nbh, + ); + } + } else { + log::debug!("Denying peering request: Peer distance too large."); + } + } else { + log::debug!("Denying peering request: Peer filtered."); + } + } else { + log::debug!("Denying peering request: Peer not verified."); + } + + // In any case send a response. + send_peering_response_to_addr(ctx.peer_addr, ctx.peer_id, ctx.msg_bytes, ctx.server_tx, status); +} + +fn handle_peering_response( + peer_res: PeeringResponse, + peer_reqval: RequestValue, + ctx: RecvContext, + nb_filter: &NeighborFilter, +) { + log::trace!("Handling peering response."); + + let mut status = peer_res.status(); + + if status { + log::debug!("Peering accepted by {}.", ctx.peer_id); + + let peer = ctx + .active_peers + .read() + .find(ctx.peer_id) + .cloned() + .expect("inconsistent peer list") + .into_peer(); + + // Hive.go: if the peer is already in inbound, do not add it and remove it from inbound + // TODO: investigate why! + if ctx.inbound_nbh.remove_neighbor(ctx.peer_id).is_some() { + // Change status to `false`. + status = false; + + // Fire `OutgoingPeering` event with status = `false`. + publish_peering_event::( + peer.clone(), + status, + ctx.local, + ctx.event_tx, + ctx.inbound_nbh, + ctx.outbound_nbh, + ); + + // Drop that peer. + send_drop_peering_request_to_peer(peer, ctx.server_tx, ctx.event_tx, ctx.inbound_nbh, ctx.outbound_nbh); + } else if ctx.outbound_nbh.insert_neighbor(peer.clone(), ctx.local) { + // Update the neighbor filter. + nb_filter.add(*peer.peer_id()); + + // Fire `OutgoingPeering` event with status = `true`. + publish_peering_event::(peer, status, ctx.local, ctx.event_tx, ctx.inbound_nbh, ctx.outbound_nbh); + } else { + log::debug!("Failed to add neighbor to outbound neighborhood after successful peering request"); + } + } else { + log::debug!("Peering by {} denied.", ctx.peer_id); + } + + // Send the response notification. + if let Some(tx) = peer_reqval.response_tx { + tx.send(ctx.msg_bytes.to_vec()).expect("error sending response signal"); + } +} + +fn handle_drop_request( + _drop_req: DropPeeringRequest, + ctx: RecvContext, + nb_filter: &NeighborFilter, +) { + log::trace!("Handling drop request."); + + let mut removed_nb = ctx.inbound_nbh.remove_neighbor(ctx.peer_id); + + if let Some(nb) = ctx.outbound_nbh.remove_neighbor(ctx.peer_id) { + removed_nb.replace(nb); + + nb_filter.add(*ctx.peer_id); + + // TODO: trigger immediate outbound neighborhood update; currently we wait for the next interval + } + + if removed_nb.is_some() { + send_drop_peering_request_to_addr( + ctx.peer_addr, + *ctx.peer_id, + ctx.server_tx, + ctx.event_tx, + ctx.inbound_nbh, + ctx.outbound_nbh, + ); + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////////////// +// SENDING +/////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Initiates a peering request. +/// +/// This function is blocking, but at most for `RESPONSE_TIMEOUT` seconds. +pub(crate) async fn begin_peering( + peer_id: &PeerId, + active_peers: &ActivePeersList, + request_mngr: &RequestManager, + server_tx: &ServerTx, + local: &Local, +) -> Option { + let (response_tx, response_rx) = request::response_chan(); + + send_peering_request_to_peer(peer_id, active_peers, request_mngr, server_tx, Some(response_tx), local); + + match tokio::time::timeout(RESPONSE_TIMEOUT, response_rx).await { + Ok(Ok(bytes)) => match PeeringResponse::from_protobuf(&bytes).map(|r| r.status()) { + Ok(status) => Some(status), + Err(e) => { + log::debug!("Peering response decode error: {}", e); + None + } + }, + Ok(Err(e)) => { + log::debug!("Peering response signal error: {}", e); + None + } + Err(e) => { + log::debug!("Peering response timeout: {}", e); + + // The response didn't arrive in time => remove the request. + let _ = request_mngr.write().remove::(peer_id); + + None + } + } +} + +/// Sends a peering request to a peer. +pub(crate) fn send_peering_request_to_peer( + peer_id: &PeerId, + active_peers: &ActivePeersList, + request_mngr: &RequestManager, + server_tx: &ServerTx, + response_tx: Option, + local: &Local, +) { + let peer_addr = active_peers + .read() + .find(peer_id) + .map(|p| { + p.peer() + .service_socketaddr(AUTOPEERING_SERVICE_NAME) + .expect("peer doesn't support autopeering") + }) + // Panic: Requests are sent to listed peers only + .expect("peer not in active peers list"); + + send_peering_request_to_addr(peer_addr, peer_id, request_mngr, server_tx, response_tx, local); +} + +/// Sends a peering request to a peer's address. +pub(crate) fn send_peering_request_to_addr( + peer_addr: SocketAddr, + peer_id: &PeerId, + request_mngr: &RequestManager, + server_tx: &ServerTx, + response_tx: Option, + local: &Local, +) { + log::trace!("Sending peering request to: {}", peer_id); + + let peer_req = request_mngr.write().new_peering_request(*peer_id, response_tx, local); + + let msg_bytes = peer_req.to_protobuf().to_vec(); + + server_tx + .send(OutgoingPacket { + msg_type: MessageType::PeeringRequest, + msg_bytes, + peer_addr, + }) + .expect("error sending peering request to server"); +} + +/// Sends a peering response to a peer's address. +pub(crate) fn send_peering_response_to_addr( + peer_addr: SocketAddr, + peer_id: &PeerId, + msg_bytes: &[u8], + tx: &ServerTx, + status: bool, +) { + log::trace!("Sending peering response to: {}", peer_id); + + let request_hash = message_hash(MessageType::PeeringRequest, msg_bytes); + + let peer_res = PeeringResponse::new(request_hash, status); + + let msg_bytes = peer_res.to_protobuf().to_vec(); + + tx.send(OutgoingPacket { + msg_type: MessageType::VerificationResponse, + msg_bytes, + peer_addr, + }) + .expect("error sending peering response to server"); +} + +/// Sends a drop-peering request to a peer. +pub(crate) fn send_drop_peering_request_to_peer( + peer: Peer, + server_tx: &ServerTx, + event_tx: &EventTx, + inbound_nbh: &InboundNeighborhood, + outbound_nbh: &OutboundNeighborhood, +) { + let peer_addr = peer + .service_socketaddr(AUTOPEERING_SERVICE_NAME) + .expect("peer doesn't support autopeering"); + let peer_id = peer.into_id(); + + send_drop_peering_request_to_addr(peer_addr, peer_id, server_tx, event_tx, inbound_nbh, outbound_nbh); +} + +/// Sends a drop-peering request to a peer's address. +pub(crate) fn send_drop_peering_request_to_addr( + peer_addr: SocketAddr, + peer_id: PeerId, + server_tx: &ServerTx, + event_tx: &EventTx, + inbound_nbh: &InboundNeighborhood, + outbound_nbh: &OutboundNeighborhood, +) { + log::trace!("Sending drop request to: {}", peer_id); + + let msg_bytes = DropPeeringRequest::new().to_protobuf().to_vec(); + + server_tx + .send(OutgoingPacket { + msg_type: MessageType::DropRequest, + msg_bytes, + peer_addr, + }) + .expect("error sending drop-peering request to server"); + + publish_drop_peering_event(peer_id, event_tx, inbound_nbh, outbound_nbh); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////////////// +// EVENTS +/////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Publishes the corresponding peering event [`IncomingPeering`], or [`OutgoingPeering`]. +pub(crate) fn publish_peering_event( + peer: Peer, + status: Status, + local: &Local, + event_tx: &EventTx, + inbound_nbh: &InboundNeighborhood, + outbound_nbh: &OutboundNeighborhood, +) { + log::debug!( + "Peering with {}; status: {}, direction: {}, #out_nbh: {}, #in_nbh: {}", + if IS_INBOUND { "in" } else { "out" }, + status, + peer.peer_id(), + outbound_nbh.len(), + inbound_nbh.len(), + ); + + let distance = salt_distance(&local.peer_id(), peer.peer_id(), &{ + if IS_INBOUND { + local.private_salt() + } else { + local.public_salt() + } + }); + + // Panic: We don't allow channel send failures. + event_tx + .send(if IS_INBOUND { + Event::IncomingPeering { peer, distance } + } else { + Event::OutgoingPeering { peer, distance } + }) + .expect("error publishing incoming/outgoing peering event"); +} + +fn publish_drop_peering_event( + peer_id: PeerId, + event_tx: &EventTx, + inbound_nbh: &InboundNeighborhood, + outbound_nbh: &OutboundNeighborhood, +) { + log::debug!( + "Peering dropped with {}; #out_nbh: {} #in_nbh: {}", + peer_id, + outbound_nbh.len(), + inbound_nbh.len(), + ); + + // Panic: We don't allow channel send failures. + event_tx + .send(Event::PeeringDropped { peer_id }) + .expect("error sending peering-dropped event"); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////////////// +// HELPERS +/////////////////////////////////////////////////////////////////////////////////////////////////////////// + +#[derive(Clone)] +pub(crate) struct SaltUpdateContext { + local: Local, + nb_filter: NeighborFilter, + inbound_nbh: InboundNeighborhood, + outbound_nbh: OutboundNeighborhood, + server_tx: ServerTx, + event_tx: EventTx, +} + +impl SaltUpdateContext { + pub(crate) fn new( + local: Local, + nb_filter: NeighborFilter, + inbound_nbh: InboundNeighborhood, + outbound_nbh: OutboundNeighborhood, + server_tx: ServerTx, + event_tx: EventTx, + ) -> Self { + Self { + local, + nb_filter, + inbound_nbh, + outbound_nbh, + server_tx, + event_tx, + } + } +} + +// Regularly update the salts of the local peer. +pub(crate) fn update_salts_fn( + drop_neighbors_on_salt_update: bool, +) -> Repeat> { + Box::new(move |ctx| { + update_salts( + drop_neighbors_on_salt_update, + &ctx.local, + &ctx.nb_filter, + &ctx.inbound_nbh, + &ctx.outbound_nbh, + &ctx.server_tx, + &ctx.event_tx, + ) + }) +} + +fn update_salts( + drop_neighbors_on_salt_update: bool, + local: &Local, + nb_filter: &NeighborFilter, + inbound_nbh: &InboundNeighborhood, + outbound_nbh: &OutboundNeighborhood, + server_tx: &ServerTx, + event_tx: &EventTx, +) { + // Create a new private salt. + let private_salt = Salt::new(SALT_LIFETIME_SECS); + let private_salt_lifetime = private_salt.expiration_time(); + local.set_private_salt(private_salt); + + // Create a new public salt. + let public_salt = Salt::new(SALT_LIFETIME_SECS); + let public_salt_lifetime = public_salt.expiration_time(); + local.set_public_salt(public_salt); + + if drop_neighbors_on_salt_update { + // Drop all neighbors. + for peer in inbound_nbh.peers().into_iter().chain(outbound_nbh.peers().into_iter()) { + send_drop_peering_request_to_peer(peer, server_tx, event_tx, inbound_nbh, outbound_nbh); + } + + // Erase the neighborhoods. + inbound_nbh.clear(); + outbound_nbh.clear(); + + // Reset the neighbor filter. + nb_filter.clear(); + } else { + // Update the distances with the new salts. + inbound_nbh.update_distances(local); + outbound_nbh.update_distances(local); + } + + log::debug!( + "Salts updated; private: {}, public: {}", + private_salt_lifetime, + public_salt_lifetime, + ); + + // Publish 'SaltUpdated' event. + event_tx + .send(Event::SaltUpdated { + public_salt_lifetime, + private_salt_lifetime, + }) + .expect("error publishing salt-updated event"); +} + +/// Adds a neighbor to a neighborhood. Possibly even replaces the so far furthest neighbor. +pub(crate) fn add_or_replace_neighbor( + peer: Peer, + local: &Local, + inbound_nbh: &InboundNeighborhood, + outbound_nbh: &OutboundNeighborhood, + server_tx: &ServerTx, + event_tx: &EventTx, +) -> bool { + // Hive.go: drop furthest neighbor if necessary + if let Some(peer) = if IS_INBOUND { + inbound_nbh.remove_furthest_if_full() + } else { + outbound_nbh.remove_furthest_if_full() + } { + send_drop_peering_request_to_peer(peer, server_tx, event_tx, inbound_nbh, outbound_nbh); + } + + if IS_INBOUND { + inbound_nbh.insert_neighbor(peer, local) + } else { + outbound_nbh.insert_neighbor(peer, local) + } +} diff --git a/bee-network/bee-autopeering/src/peering/messages.rs b/bee-network/bee-autopeering/src/peering/messages.rs new file mode 100644 index 0000000000..fe04a05fa6 --- /dev/null +++ b/bee-network/bee-autopeering/src/peering/messages.rs @@ -0,0 +1,192 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use crate::{local::salt::Salt, proto, request::Request}; + +use base64 as bs64; +use crypto::hashes::sha::SHA256_LEN; +use prost::{bytes::BytesMut, DecodeError, EncodeError, Message}; + +use std::fmt; + +#[derive(Clone)] +pub(crate) struct PeeringRequest { + timestamp: u64, + salt: Salt, +} + +impl PeeringRequest { + pub(crate) fn new(salt: Salt) -> Self { + let timestamp = crate::time::unix_now_secs(); + + Self { timestamp, salt } + } + + pub(crate) fn timestamp(&self) -> u64 { + self.timestamp + } + + pub(crate) fn salt(&self) -> &Salt { + &self.salt + } + + pub(crate) fn from_protobuf(bytes: &[u8]) -> Result { + let proto::PeeringRequest { timestamp, salt } = proto::PeeringRequest::decode(bytes)?; + let proto::Salt { bytes, exp_time } = salt.ok_or(Error::MissingSalt)?; + + Ok(Self { + timestamp: timestamp as u64, + salt: Salt { + bytes: bytes.try_into().map_err(|_| Error::InvalidSalt)?, + expiration_time: exp_time, + }, + }) + } + + #[allow(clippy::wrong_self_convention)] + pub(crate) fn to_protobuf(&self) -> BytesMut { + let peering_req = proto::PeeringRequest { + timestamp: self.timestamp as i64, + salt: Some(proto::Salt { + bytes: self.salt.bytes().to_vec(), + exp_time: self.salt.expiration_time(), + }), + }; + + let mut bytes = BytesMut::with_capacity(peering_req.encoded_len()); + + // Panic: we have allocated a properly sized buffer. + peering_req.encode(&mut bytes).expect("encoding peering request failed"); + + bytes + } +} + +impl fmt::Debug for PeeringRequest { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("PeeringRequest") + .field("timestamp", &self.timestamp) + .field("salt_bytes", &bs64::encode(self.salt().bytes())) + .field("salt_expiration_time", &self.salt().expiration_time()) + .finish() + } +} + +impl Request for PeeringRequest {} + +pub(crate) struct PeeringResponse { + request_hash: [u8; SHA256_LEN], + status: bool, +} + +impl PeeringResponse { + pub(crate) fn new(request_hash: [u8; SHA256_LEN], status: bool) -> Self { + Self { request_hash, status } + } + + pub(crate) fn request_hash(&self) -> &[u8] { + &self.request_hash + } + + pub(crate) fn status(&self) -> bool { + self.status + } + + pub(crate) fn from_protobuf(bytes: &[u8]) -> Result { + let proto::PeeringResponse { req_hash, status } = proto::PeeringResponse::decode(bytes)?; + + Ok(Self { + request_hash: req_hash.try_into().map_err(|_| Error::RestoreRequestHash)?, + status, + }) + } + + #[allow(clippy::wrong_self_convention)] + pub(crate) fn to_protobuf(&self) -> BytesMut { + let peering_res = proto::PeeringResponse { + req_hash: self.request_hash.to_vec(), + status: self.status, + }; + + let mut bytes = BytesMut::with_capacity(peering_res.encoded_len()); + + // Panic: we have allocated a properly sized buffer. + peering_res + .encode(&mut bytes) + .expect("encoding peering response failed"); + + bytes + } +} + +impl fmt::Debug for PeeringResponse { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("PeeringResponse") + .field("request_hash", &bs58::encode(&self.request_hash).into_string()) + .field("status", &self.status) + .finish() + } +} + +// NOTE: We don't require a response for `DropRequest`, hence it doesn't need to impl `Request`. +pub(crate) struct DropPeeringRequest { + pub(crate) timestamp: u64, +} + +impl DropPeeringRequest { + pub(crate) fn new() -> Self { + let timestamp = crate::time::unix_now_secs(); + + Self { timestamp } + } + + pub(crate) fn timestamp(&self) -> u64 { + self.timestamp + } + + pub(crate) fn from_protobuf(bytes: &[u8]) -> Result { + let proto::PeeringDrop { timestamp } = proto::PeeringDrop::decode(bytes)?; + + Ok(Self { + timestamp: timestamp as u64, + }) + } + + #[allow(clippy::wrong_self_convention)] + pub(crate) fn to_protobuf(&self) -> BytesMut { + let peering_drop = proto::PeeringDrop { + timestamp: self.timestamp as i64, + }; + + let mut bytes = BytesMut::with_capacity(peering_drop.encoded_len()); + + // Panic: we have allocated a properly sized buffer. + peering_drop + .encode(&mut bytes) + .expect("encoding drop-peering request failed"); + + bytes + } +} + +impl fmt::Debug for DropPeeringRequest { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("DropPeeringRequest") + .field("timestamp", &self.timestamp) + .finish() + } +} + +#[derive(Debug, thiserror::Error)] +pub(crate) enum Error { + #[error("missing salt")] + MissingSalt, + #[error("invalid salt")] + InvalidSalt, + #[error("{0}")] + ProtobufDecode(#[from] DecodeError), + #[error("{0}")] + ProtobufEncode(#[from] EncodeError), + #[error("restore request hash")] + RestoreRequestHash, +} diff --git a/bee-network/bee-autopeering/src/peering/mod.rs b/bee-network/bee-autopeering/src/peering/mod.rs new file mode 100644 index 0000000000..f5a5e94893 --- /dev/null +++ b/bee-network/bee-autopeering/src/peering/mod.rs @@ -0,0 +1,11 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +pub(crate) mod filter; +pub(crate) mod manager; +pub(crate) mod messages; +pub(crate) mod neighbor; +pub(crate) mod update; + +pub use manager::Status; +pub use neighbor::{Distance, NeighborValidator}; diff --git a/bee-network/bee-autopeering/src/peering/neighbor.rs b/bee-network/bee-autopeering/src/peering/neighbor.rs new file mode 100644 index 0000000000..1c55cedff3 --- /dev/null +++ b/bee-network/bee-autopeering/src/peering/neighbor.rs @@ -0,0 +1,351 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + hash, + local::{salt::Salt, Local}, + peer::{peer_id::PeerId, Peer}, +}; + +use prost::bytes::{Buf, Bytes}; + +use std::{ + cmp, fmt, + sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard}, + vec, +}; + +/// The distance between the local entity and a neighbor. +pub type Distance = u32; + +pub(crate) const SIZE_INBOUND: usize = 4; +pub(crate) const SIZE_OUTBOUND: usize = 4; + +/// Decides whether a peer is a suitable neighbor, or not. +pub trait NeighborValidator +where + Self: Send + Sync + Clone, +{ + /// Returns `true` if the given [`Peer`](crate::peer::Peer) is a valid neighbor. + fn is_valid(&self, peer: &Peer) -> bool; +} + +// A neighbor is a peer with a distance metric. +#[derive(Clone, Debug)] +pub(crate) struct Neighbor { + peer: Peer, + distance: Distance, +} + +impl Neighbor { + pub(crate) fn new(peer: Peer, distance: Distance) -> Self { + Self { peer, distance } + } + + pub(crate) fn peer(&self) -> &Peer { + &self.peer + } + + pub(crate) fn distance(&self) -> Distance { + self.distance + } + + pub(crate) fn into_peer(self) -> Peer { + self.peer + } +} + +impl fmt::Display for Neighbor { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}:{}", self.peer().peer_id(), self.distance()) + } +} + +impl Eq for Neighbor {} +impl PartialEq for Neighbor { + fn eq(&self, other: &Self) -> bool { + self.distance == other.distance + } +} +impl PartialOrd for Neighbor { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} +impl Ord for Neighbor { + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.distance.cmp(&other.distance) + } +} + +impl AsRef for Neighbor { + fn as_ref(&self) -> &Peer { + &self.peer + } +} + +#[derive(Clone, Default)] +pub(crate) struct Neighborhood { + inner: Arc>>, +} + +impl Neighborhood { + /// Creates a new empty neighborhood. + pub(crate) fn new() -> Self { + Self::default() + } + + /// Inserts a peer to the neighborhood. + pub(crate) fn insert_neighbor(&self, peer: Peer, local: &Local) -> bool { + self.write().insert_neighbor(peer, local) + } + + /// Removes a peer from the neighborhood. + pub(crate) fn remove_neighbor(&self, peer_id: &PeerId) -> Option { + self.write().remove_neighbor(peer_id) + } + + /// Checks whether the candidate is a preferred neighbor. + pub(crate) fn is_preferred(&self, candidate: &Neighbor) -> bool { + self.write().is_preferred(candidate) + } + + /// Picks the first candidate that is closer than the currently furthest neighbor. + pub(crate) fn select_from_candidate_list<'a>(&self, candidates: &'a [&'a Neighbor]) -> Option<&'a Peer> { + self.write().select_from_candidate_list(candidates) + } + + /// Removes the furthest neighbor from the neighborhood. + pub(crate) fn remove_furthest_if_full(&self) -> Option { + self.write().remove_furthest_if_full() + } + + /// Updates all distances to the neighbors (e.g. after a salt update). + pub(crate) fn update_distances(&self, local: &Local) { + self.write().update_distances(local); + } + + /// Clears the neighborhood, removing all neighbors. + pub(crate) fn clear(&self) { + self.write().clear(); + } + + /// Returns the number of neighbors within the neighborhood. + pub(crate) fn len(&self) -> usize { + self.read().len() + } + + /// Returns whether the neighborhood is full, i.e. the upper bound is reached. + pub(crate) fn is_full(&self) -> bool { + self.read().is_full() + } + + /// Collect all peers belonging to the neighborhood into a `Vec`. + pub(crate) fn peers(&self) -> Vec { + self.read().neighbors.iter().map(|d| d.peer()).cloned().collect() + } + + fn read(&self) -> RwLockReadGuard> { + self.inner.read().expect("error getting read access") + } + + fn write(&self) -> RwLockWriteGuard> { + self.inner.write().expect("error getting write access") + } +} + +#[derive(Debug)] +pub(crate) struct NeighborhoodInner { + neighbors: Vec, +} + +impl NeighborhoodInner { + fn insert_neighbor(&mut self, peer: Peer, local: &Local) -> bool { + // If the peer already exists remove it. + // NOTE: It's a bit less efficient doing it like this, but the code requires less branching this way. + let _ = self.remove_neighbor(peer.peer_id()); + + if self.neighbors.len() >= N { + return false; + } + + // Calculate the distance to that peer. + let distance = salt_distance(&local.peer_id(), peer.peer_id(), &{ + if INBOUND { + local.private_salt() + } else { + local.public_salt() + } + }); + + self.neighbors.push(Neighbor { distance, peer }); + + true + } + + fn remove_neighbor(&mut self, peer_id: &PeerId) -> Option { + if self.neighbors.is_empty() { + None + } else if let Some(index) = self.neighbors.iter().position(|pd| pd.peer().peer_id() == peer_id) { + let Neighbor { peer, .. } = self.neighbors.remove(index); + Some(peer) + } else { + None + } + } + + fn is_preferred(&mut self, candidate: &Neighbor) -> bool { + if let Some(furthest) = self.find_furthest_if_full() { + candidate < furthest + } else { + true + } + } + + fn select_from_candidate_list<'a>(&mut self, candidates: &'a [&'a Neighbor]) -> Option<&'a Peer> { + if candidates.is_empty() { + None + } else if let Some(furthest) = self.find_furthest_if_full() { + for candidate in candidates { + if *candidate < furthest { + return Some(candidate.peer()); + } + } + None + } else { + // Any candidate can be selected: pick the first. + Some(candidates[0].peer()) + } + } + + fn find_furthest_if_full(&mut self) -> Option<&Neighbor> { + if self.neighbors.len() >= N { + self.neighbors.sort_unstable(); + self.neighbors.last() + } else { + None + } + } + + fn remove_furthest_if_full(&mut self) -> Option { + // Note: Both methods require unique access to `self`, so we need to copy the peer id. + if let Some(peer_id) = self.find_furthest_if_full().map(|d| *d.peer().peer_id()) { + self.remove_neighbor(&peer_id) + } else { + None + } + } + + fn update_distances(&mut self, local: &Local) { + let local_id = local.peer_id(); + let salt = if INBOUND { + local.private_salt() + } else { + local.public_salt() + }; + + self.neighbors.iter_mut().for_each(|pd| { + pd.distance = salt_distance(&local_id, pd.peer().peer_id(), &salt); + }); + } + + fn len(&self) -> usize { + self.neighbors.len() + } + + fn is_full(&self) -> bool { + self.neighbors.len() == N + } + + fn clear(&mut self) { + self.neighbors.clear(); + } +} + +impl Default for NeighborhoodInner { + fn default() -> Self { + Self { + neighbors: Vec::with_capacity(N), + } + } +} + +impl fmt::Display for NeighborhoodInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}/{}", self.neighbors.len(), N) + } +} + +// hive.go: +// returns the distance (uint32) between x and y by xoring the hash of x and (y + salt): +// xor( hash(x), hash(y+salt) )[:4] as little-endian uint32 +pub(crate) fn salt_distance(peer1: &PeerId, peer2: &PeerId, salt: &Salt) -> Distance { + let hash1 = hash::data_hash(peer1.id_bytes()); + let hash2 = hash::data_hash(&concat(peer2.id_bytes(), salt.bytes())); + + let xored = xor(hash1, hash2); + + Bytes::copy_from_slice(&xored[..4]).get_u32_le() +} + +fn concat(bytes1: &[u8; N], bytes2: &[u8; M]) -> Vec { + let l: usize = N + M; + let mut bytes = vec![0u8; l]; + bytes[0..N].copy_from_slice(bytes1); + bytes[N..l].copy_from_slice(bytes2); + bytes +} + +fn xor(a: [u8; N], b: [u8; N]) -> [u8; N] { + let mut xored = [0u8; N]; + // TODO: use array_zip when available (rust-lang/rust#80094) + a.iter() + .zip(b.iter()) + .enumerate() + .for_each(|(i, (a, b))| xored[i] = a ^ b); + + xored +} + +#[cfg(test)] +mod tests { + use super::*; + + fn distance(peer1: &PeerId, peer2: &PeerId) -> Distance { + let hash1 = hash::data_hash(peer1.id_bytes()); + let hash2 = hash::data_hash(peer2.id_bytes()); + let xored = xor(hash1, hash2); + Bytes::copy_from_slice(&xored[..4]).get_u32_le() + } + + #[test] + fn neighborhood_size_limit() { + let local = Local::generate(); + let outbound_nh = Neighborhood::<2, false>::new(); + for i in 0u8..5 { + outbound_nh.write().insert_neighbor(Peer::new_test_peer(i), &local); + } + assert_eq!(outbound_nh.read().len(), 2); + } + + #[test] + fn byte_array_concatenation() { + let bytes1 = [1, 2, 3, 4]; + let bytes2 = [5, 6, 7, 8, 9]; + assert_eq!(vec![1, 2, 3, 4, 5, 6, 7, 8, 9], concat(&bytes1, &bytes2)); + } + + #[test] + fn distance_calculation() { + let peer_id1 = PeerId::new_static(); + let peer_id2 = PeerId::new_static(); + assert_eq!(peer_id1, peer_id2); + + let distance = distance(&peer_id1, &peer_id2); + assert_eq!(0, distance); + + let salt = Salt::new_zero_salt(); + let salted_distance = salt_distance(&peer_id1, &peer_id2, &salt); + assert_eq!(1184183819, salted_distance); + } +} diff --git a/bee-network/bee-autopeering/src/peering/update.rs b/bee-network/bee-autopeering/src/peering/update.rs new file mode 100644 index 0000000000..4e4ccd9324 --- /dev/null +++ b/bee-network/bee-autopeering/src/peering/update.rs @@ -0,0 +1,121 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use super::{ + filter::NeighborFilter, + manager::{self, OutboundNeighborhood}, + neighbor::{salt_distance, Neighbor}, +}; + +use crate::{ + delay::ManualDelayFactory, + discovery::manager::get_verified_peers, + local::Local, + peer::lists::ActivePeersList, + request::RequestManager, + server::ServerTx, + task::Repeat, + time::{self, MINUTE, SECOND}, + NeighborValidator, +}; + +use std::time::Duration; + +/// Outbound neighborhood update interval if there are remaining slots. +#[allow(clippy::identity_op)] +pub(crate) const OPEN_OUTBOUND_NBH_UPDATE_SECS: Duration = Duration::from_secs(1 * SECOND); +/// Outbound neighborhood update interval if there are no remaining slots. +#[allow(clippy::identity_op)] +const FULL_OUTBOUND_NBH_UPDATE_SECS: Duration = Duration::from_secs(1 * MINUTE); + +pub(crate) static OUTBOUND_NBH_UPDATE_INTERVAL: ManualDelayFactory = + ManualDelayFactory::new(OPEN_OUTBOUND_NBH_UPDATE_SECS); + +#[derive(Clone)] +pub(crate) struct UpdateContext { + pub(crate) local: Local, + pub(crate) request_mngr: RequestManager, + pub(crate) active_peers: ActivePeersList, + pub(crate) nb_filter: NeighborFilter, + pub(crate) outbound_nbh: OutboundNeighborhood, + pub(crate) server_tx: ServerTx, +} + +pub(crate) fn update_outbound_neighborhood_fn() -> Repeat> { + Box::new(|ctx| update_outbound(ctx)) +} + +// Hive.go: updateOutbound updates outbound neighbors. +fn update_outbound(ctx: &UpdateContext) { + let local_id = ctx.local.peer_id(); + let local_salt = ctx.local.public_salt(); + + // TODO: write `get_verified_peers_sorted` which collects verified peers into a BTreeSet + let verif_peers = get_verified_peers(&ctx.active_peers) + .into_iter() + .map(|p| { + let peer = p.into_peer(); + let peer_id = *peer.peer_id(); + Neighbor::new(peer, salt_distance(&local_id, &peer_id, &local_salt)) + }) + .collect::>(); + + if verif_peers.is_empty() { + log::trace!("Currently no verified peers."); + return; + } + + // Apply the filter to the verified peers to yield a set of neighbor candidates. + let mut candidates = ctx.nb_filter.apply_list(&verif_peers); + + if candidates.is_empty() { + log::trace!("Currently no suitable candidates."); + return; + } + + // Sort candidats by their distance, so that we start with the closest candidate. + candidates.sort_unstable(); + + // Hive.go: select new candidate + if let Some(candidate) = ctx.outbound_nbh.select_from_candidate_list(&candidates).cloned() { + let ctx_ = ctx.clone(); + + tokio::spawn(async move { + if let Some(status) = manager::begin_peering( + candidate.peer_id(), + &ctx_.active_peers, + &ctx_.request_mngr, + &ctx_.server_tx, + &ctx_.local, + ) + .await + { + if status { + set_outbound_update_interval(&ctx_.outbound_nbh, &ctx_.local); + } else { + ctx_.nb_filter.add(*candidate.peer_id()); + } + } else { + ctx_.nb_filter.add(*candidate.peer_id()); + } + }); + } +} + +fn set_outbound_update_interval(outbound_nbh: &OutboundNeighborhood, local: &Local) { + let mut delay = OPEN_OUTBOUND_NBH_UPDATE_SECS; + + if outbound_nbh.is_full() { + delay = FULL_OUTBOUND_NBH_UPDATE_SECS + }; + + // Panic: We don't allow invalid salts. + let salt_expiration = + Duration::from_secs(time::until(local.public_salt().expiration_time()).expect("time until error")); + + if salt_expiration < delay { + delay = salt_expiration; + } + + OUTBOUND_NBH_UPDATE_INTERVAL.set(delay); +} diff --git a/bee-network/bee-autopeering/src/proto/discovery.proto b/bee-network/bee-autopeering/src/proto/discovery.proto new file mode 100644 index 0000000000..eaed3ee17b --- /dev/null +++ b/bee-network/bee-autopeering/src/proto/discovery.proto @@ -0,0 +1,47 @@ +syntax = "proto3"; + +package proto; + +import "proto/peer.proto"; +import "proto/service.proto"; + +message Ping { + // version number and network ID to classify the protocol + uint32 version = 1; + uint32 network_id = 2; + + // unix time + int64 timestamp = 3; + + // endpoint of the sender; port and string form of the return IP address (e.g. "192.0.2.1", "[2001:db8::1]") + string src_addr = 4; + uint32 src_port = 5; + + // string form of receiver's IP + // This provides a way to discover the external address (after NAT). + string dst_addr = 6; +} + +message Pong { + // hash of the ping packet + bytes req_hash = 1; + + // services supported by the sender + ServiceMap services = 2; + + // string form of receiver's IP + // This should mirror the source IP of the Ping's IP packet. It provides a way to discover the external address (after NAT). + string dst_addr = 3; +} + +message DiscoveryRequest { + // unix time + int64 timestamp = 1; +} + +message DiscoveryResponse { + // hash of the corresponding request + bytes req_hash = 1; + // list of peers + repeated Peer peers = 2; +} diff --git a/bee-network/bee-autopeering/src/proto/packet.proto b/bee-network/bee-autopeering/src/proto/packet.proto new file mode 100644 index 0000000000..02320de49a --- /dev/null +++ b/bee-network/bee-autopeering/src/proto/packet.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package proto; + +message Packet { + uint32 type = 1; + bytes data = 2; + bytes public_key = 3; + bytes signature = 4; +} diff --git a/bee-network/bee-autopeering/src/proto/peer.proto b/bee-network/bee-autopeering/src/proto/peer.proto new file mode 100644 index 0000000000..4e86fbbf80 --- /dev/null +++ b/bee-network/bee-autopeering/src/proto/peer.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package proto; + +import "proto/service.proto"; + +// Minimal encoding of a peer +message Peer { + // public key used for signing + bytes public_key = 1; + // string form of the peers IP + string ip = 2; + // services supported by the peer + ServiceMap services = 3; +} diff --git a/bee-network/bee-autopeering/src/proto/peering.proto b/bee-network/bee-autopeering/src/proto/peering.proto new file mode 100644 index 0000000000..e973b5417e --- /dev/null +++ b/bee-network/bee-autopeering/src/proto/peering.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package proto; + +import "proto/salt.proto"; + +message PeeringRequest { + // unix time + int64 timestamp = 1; + // salt of the requester + Salt salt = 2; +} + +message PeeringResponse { + // hash of the corresponding request + bytes req_hash = 1; + // response of a peering request + bool status = 2; +} + +message PeeringDrop { + // unix time + int64 timestamp = 1; +} diff --git a/bee-network/bee-autopeering/src/proto/salt.proto b/bee-network/bee-autopeering/src/proto/salt.proto new file mode 100644 index 0000000000..73dacbfbb0 --- /dev/null +++ b/bee-network/bee-autopeering/src/proto/salt.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package proto; + +message Salt { + // value of the salt + bytes bytes = 1; + // expiration time of the salt + fixed64 exp_time = 2; +} diff --git a/bee-network/bee-autopeering/src/proto/service.proto b/bee-network/bee-autopeering/src/proto/service.proto new file mode 100644 index 0000000000..4eceb5d8e9 --- /dev/null +++ b/bee-network/bee-autopeering/src/proto/service.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package proto; + +// Mapping between a service ID and its tuple network_address +// e.g., map[autopeering:&{tcp, 198.51.100.1:80}] +message ServiceMap { + map map = 1; +} + +// The service type (e.g., tcp, upd) and the address (e.g., 198.51.100.1:80) +message NetworkAddress { + string network = 1; + uint32 port = 2; +} diff --git a/bee-network/bee-autopeering/src/request.rs b/bee-network/bee-autopeering/src/request.rs new file mode 100644 index 0000000000..cffbdc33e7 --- /dev/null +++ b/bee-network/bee-autopeering/src/request.rs @@ -0,0 +1,206 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + discovery::messages::{DiscoveryRequest, VerificationRequest}, + hash, + hash::message_hash, + local::Local, + packet::MessageType, + peer::peer_id::PeerId, + peering::messages::PeeringRequest, + task::Repeat, + time::{self, Timestamp}, +}; + +use tokio::sync::oneshot; + +pub(crate) use oneshot::channel as response_chan; + +use std::{ + any::TypeId, + collections::HashMap, + fmt::Debug, + net::{IpAddr, SocketAddr}, + sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard}, + time::Duration, +}; + +type RequestHash = [u8; hash::SHA256_LEN]; +pub(crate) type ResponseTx = oneshot::Sender>; + +// If the request is not answered within that time it gets removed from the manager, and any response +// coming in later will be deemed invalid. +pub(crate) const REQUEST_EXPIRATION: Duration = Duration::from_secs(20); +pub(crate) const EXPIRED_REQUEST_REMOVAL_INTERVAL: Duration = Duration::from_secs(1); +pub(crate) const RESPONSE_TIMEOUT: Duration = Duration::from_millis(500); + +// Marker trait for requests. +pub(crate) trait Request: Debug + Clone {} + +#[derive(PartialEq, Eq, Hash, Debug, Clone)] +pub(crate) struct RequestKey { + pub(crate) peer_id: PeerId, + pub(crate) request_id: TypeId, +} + +pub(crate) struct RequestValue { + pub(crate) request_hash: RequestHash, + pub(crate) issue_time: u64, + pub(crate) response_tx: Option, +} + +#[derive(Clone)] +pub(crate) struct RequestManager { + inner: Arc>, +} + +impl RequestManager { + /// Creates a new request manager. + pub(crate) fn new(version: u32, network_id: u32, source_addr: SocketAddr) -> Self { + Self { + inner: Arc::new(RwLock::new(RequestManagerInner { + version, + network_id, + source_addr, + open_requests: HashMap::default(), + })), + } + } + + pub(crate) fn read(&self) -> RwLockReadGuard { + // Panic: We don't allow poisened locks. + self.inner.read().expect("error getting read access") + } + + pub(crate) fn write(&self) -> RwLockWriteGuard { + // Panic: We don't allow poisened locks. + self.inner.write().expect("error getting write access") + } +} + +pub(crate) struct RequestManagerInner { + version: u32, + network_id: u32, + source_addr: SocketAddr, + open_requests: HashMap, +} + +impl RequestManagerInner { + pub(crate) fn new_verification_request( + &mut self, + peer_id: PeerId, + peer_addr: IpAddr, + response_tx: Option, + ) -> VerificationRequest { + let key = RequestKey { + peer_id, + request_id: TypeId::of::(), + }; + + let verif_req = VerificationRequest::new(self.version, self.network_id, self.source_addr, peer_addr); + let timestamp = verif_req.timestamp(); + + let request_hash = message_hash(MessageType::VerificationRequest, &verif_req.to_protobuf()); + + let value = RequestValue { + request_hash, + issue_time: timestamp, + response_tx, + }; + + let _ = self.open_requests.insert(key, value); + + verif_req + } + + pub(crate) fn new_discovery_request( + &mut self, + peer_id: PeerId, + response_tx: Option, + ) -> DiscoveryRequest { + let key = RequestKey { + peer_id, + request_id: TypeId::of::(), + }; + + let disc_req = DiscoveryRequest::new(); + let timestamp = disc_req.timestamp(); + + let request_hash = message_hash(MessageType::DiscoveryRequest, &disc_req.to_protobuf()); + + let value = RequestValue { + request_hash, + issue_time: timestamp, + response_tx, + }; + + let _ = self.open_requests.insert(key, value); + + disc_req + } + + pub(crate) fn new_peering_request( + &mut self, + peer_id: PeerId, + response_tx: Option, + local: &Local, + ) -> PeeringRequest { + let key = RequestKey { + peer_id, + request_id: TypeId::of::(), + }; + + let peer_req = PeeringRequest::new(local.public_salt()); + + let timestamp = peer_req.timestamp(); + + let request_hash = message_hash(MessageType::PeeringRequest, &peer_req.to_protobuf()); + + let value = RequestValue { + request_hash, + issue_time: timestamp, + response_tx, + }; + + let _ = self.open_requests.insert(key, value); + + peer_req + } + + pub(crate) fn remove(&mut self, peer_id: &PeerId) -> Option { + let key = RequestKey { + peer_id: *peer_id, + request_id: TypeId::of::(), + }; + + self.open_requests.remove(&key) + } +} + +pub(crate) fn is_expired(past_ts: Timestamp) -> bool { + is_expired_internal(past_ts, time::unix_now_secs()) +} + +fn is_expired_internal(past_ts: Timestamp, now_ts: Timestamp) -> bool { + // Note: `time::since` returns `None` for a timestamp that lies in the future, hence it cannot be expired yet, + // and must therefore be mapped to `false` (not expired). + time::delta(past_ts, now_ts).map_or(false, |span| span >= REQUEST_EXPIRATION.as_secs()) +} + +pub(crate) fn remove_expired_requests_fn() -> Repeat { + Box::new(|mngr: &RequestManager| { + let now_ts = time::unix_now_secs(); + + // TODO: measure current time only once and reuse it. + // Retain only those that aren't expired yet, remove all others. + mngr.write() + .open_requests + .retain(|_, v| !is_expired_internal(v.issue_time, now_ts)); + + let num_open_requests = mngr.read().open_requests.len(); + if num_open_requests > 0 { + log::trace!("Open requests: {}", num_open_requests); + } + }) +} diff --git a/bee-network/bee-autopeering/src/server.rs b/bee-network/bee-autopeering/src/server.rs new file mode 100644 index 0000000000..8dfc18ce2b --- /dev/null +++ b/bee-network/bee-autopeering/src/server.rs @@ -0,0 +1,310 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + config::AutopeeringConfig, + local::Local, + packet::{ + IncomingPacket, MessageType, OutgoingPacket, Packet, DISCOVERY_MSG_TYPE_RANGE, MAX_PACKET_SIZE, + PEERING_MSG_TYPE_RANGE, + }, + peer::{peer_id::PeerId, PeerStore}, + task::{Runnable, ShutdownRx, TaskManager}, +}; + +use tokio::{net::UdpSocket, sync::mpsc}; + +use std::{net::SocketAddr, sync::Arc}; + +pub(crate) use tokio::sync::mpsc::unbounded_channel as server_chan; + +const READ_BUFFER_SIZE: usize = crate::packet::MAX_PACKET_SIZE; + +pub(crate) type ServerRx = mpsc::UnboundedReceiver; +pub(crate) type ServerTx = mpsc::UnboundedSender; + +type IncomingPacketTx = mpsc::UnboundedSender; +type OutgoingPacketRx = mpsc::UnboundedReceiver; + +pub(crate) struct ServerConfig { + pub bind_addr: SocketAddr, +} + +impl ServerConfig { + pub(crate) fn new(config: &AutopeeringConfig) -> Self { + Self { + bind_addr: config.bind_addr(), + } + } +} + +pub(crate) struct IncomingPacketSenders { + pub(crate) discovery_tx: IncomingPacketTx, + pub(crate) peering_tx: IncomingPacketTx, +} + +pub(crate) struct Server { + config: ServerConfig, + local: Local, + incoming_senders: IncomingPacketSenders, + outgoing_rx: OutgoingPacketRx, +} + +impl Server { + pub fn new(config: ServerConfig, local: Local, incoming_senders: IncomingPacketSenders) -> (Self, ServerTx) { + let (outgoing_tx, outgoing_rx) = server_chan::(); + + ( + Self { + config, + local, + incoming_senders, + outgoing_rx, + }, + outgoing_tx, + ) + } + + pub async fn init(self, task_mngr: &mut TaskManager) { + let Server { + config, + local, + incoming_senders, + outgoing_rx, + } = self; + + // Bind the UDP socket to the configured address. + // Panic: We don't allow UDP socket binding to fail. + let socket = UdpSocket::bind(&config.bind_addr) + .await + .expect("error binding udp socket"); + + // The Tokio docs explain that there's no split method, and that we have to arc the UdpSocket in order to share + // it. + let incoming_socket = Arc::new(socket); + let outgoing_socket = Arc::clone(&incoming_socket); + + let incoming_packet_handler = IncomingPacketHandler { + incoming_socket, + incoming_senders, + bind_addr: config.bind_addr, + }; + + let outgoing_packet_handler = OutgoingPacketHandler { + outgoing_socket, + outgoing_rx, + local, + bind_addr: config.bind_addr, + }; + + task_mngr.run::(incoming_packet_handler); + task_mngr.run::(outgoing_packet_handler); + } +} + +struct IncomingPacketHandler { + incoming_socket: Arc, + incoming_senders: IncomingPacketSenders, + bind_addr: SocketAddr, +} + +struct OutgoingPacketHandler { + outgoing_socket: Arc, + outgoing_rx: OutgoingPacketRx, + local: Local, + bind_addr: SocketAddr, +} + +// Note: Invalid packets from peers are not logged as warnings because the fault is not on our side. +#[async_trait::async_trait] +impl Runnable for IncomingPacketHandler { + const NAME: &'static str = "IncomingPacketHandler"; + const SHUTDOWN_PRIORITY: u8 = 2; + + type ShutdownSignal = ShutdownRx; + + async fn run(self, mut shutdown_rx: Self::ShutdownSignal) { + let IncomingPacketHandler { + incoming_socket, + incoming_senders, + bind_addr, + } = self; + + let mut packet_bytes = [0; READ_BUFFER_SIZE]; + + let IncomingPacketSenders { + discovery_tx, + peering_tx, + } = incoming_senders; + + 'recv: loop { + tokio::select! { + _ = &mut shutdown_rx => { + break; + } + r = incoming_socket.recv_from(&mut packet_bytes) => { + match r { + Ok((n, peer_addr)) => { + if peer_addr == bind_addr { + log::trace!("Received bytes from own bind address {}. Ignoring packet.", peer_addr); + continue 'recv; + } + + if n > MAX_PACKET_SIZE { + log::trace!("Received too many bytes from {}. Ignoring packet.", peer_addr); + continue 'recv; + } + + log::trace!("Received {} bytes from {}.", n, peer_addr); + + // Decode the packet. + let packet = match Packet::from_protobuf(&packet_bytes[..n]) { + Ok(packet) => packet, + Err(_) => { + log::trace!("Error decoding incoming packet from {}. Ignoring packet.", peer_addr); + continue 'recv; + } + }; + + // Unmarshal the message. + let (msg_type, msg_bytes) = match unmarshal(packet.msg_bytes()) { + Ok((msg_type, msg_bytes)) => (msg_type, msg_bytes), + Err(_) => { + log::trace!("Error unmarshalling incoming message from {}. Ignoring packet.", peer_addr); + continue 'recv; + } + }; + + // Restore the peer id. + let peer_id = PeerId::from_public_key(*packet.public_key()); + + // Verify the packet. + let message = packet.msg_bytes(); + let signature = packet.signature(); + if !packet.public_key().verify(signature, message) { + log::trace!("Received packet with invalid signature"); + continue 'recv; + } + + let packet = IncomingPacket { + msg_type, + msg_bytes, + peer_addr, + peer_id, + }; + + // Depending on the message type, forward it to the appropriate manager. + match msg_type as u8 { + t if DISCOVERY_MSG_TYPE_RANGE.contains(&t) => { + // Panic: We don't allow channel send failures. + discovery_tx.send(packet).expect("channel send error: discovery"); + } + t if PEERING_MSG_TYPE_RANGE.contains(&t) => { + // Panic: We don't allow channel send failures. + peering_tx.send(packet).expect("channel send error: peering"); + } + _ => log::trace!("Received invalid message type. Ignoring packet."), + } + } + Err(e) => { + log::error!("UDP socket read error; stopping incoming packet handler. Cause: {}", e); + // TODO: initiate graceful shutdown + break 'recv; + } + } + } + } + } + } +} + +#[async_trait::async_trait] +impl Runnable for OutgoingPacketHandler { + const NAME: &'static str = "OutgoingPacketHandler"; + const SHUTDOWN_PRIORITY: u8 = 3; + + type ShutdownSignal = ShutdownRx; + + async fn run(self, mut shutdown_rx: Self::ShutdownSignal) { + let OutgoingPacketHandler { + outgoing_socket, + mut outgoing_rx, + local, + bind_addr, + } = self; + + 'recv: loop { + tokio::select! { + _ = &mut shutdown_rx => { + break; + } + o = outgoing_rx.recv() => { + if let Some(packet) = o { + let OutgoingPacket { + msg_type, + msg_bytes, + peer_addr, + } = packet; + + if peer_addr == bind_addr { + log::warn!("Trying to send to own bind address: {}. Ignoring packet.", peer_addr); + continue 'recv; + } + + let marshalled_bytes = marshal(msg_type, &msg_bytes); + + let signature = local.sign(&marshalled_bytes); + let packet = Packet::new(msg_type, &marshalled_bytes, local.public_key(), signature); + + let bytes = packet.to_protobuf(); + + if bytes.len() > MAX_PACKET_SIZE { + log::warn!("Trying to send too many bytes to {}. Ignoring...", peer_addr); + continue 'recv; + } + + // TODO: Make sure this won't occur by introducing IPv4 and IPv6 outgoing packet handler. + let n = outgoing_socket.send_to(&bytes, peer_addr).await.expect("socket send error"); + + log::trace!("Sent {} bytes to {}.", n, peer_addr); + } else { + // All `outgoing_tx` message senders were dropped. + break 'recv; + } + } + } + } + } +} + +// TODO: @pvdrz wants to optimize this. +pub(crate) fn marshal(msg_type: MessageType, msg_bytes: &[u8]) -> Vec { + let mut marshalled_bytes = vec![0u8; msg_bytes.len() + 1]; + marshalled_bytes[0] = msg_type as u8; + marshalled_bytes[1..].copy_from_slice(msg_bytes); + marshalled_bytes +} + +// TODO: @pvdrz wants to optimize this. +pub(crate) fn unmarshal(marshalled_bytes: &[u8]) -> Result<(MessageType, Vec), ()> { + let msg_type = num::FromPrimitive::from_u8(marshalled_bytes[0]).ok_or(())?; + + let mut msg_bytes = vec![0u8; marshalled_bytes.len() - 1]; + msg_bytes[..].copy_from_slice(&marshalled_bytes[1..]); + + Ok((msg_type, msg_bytes)) +} + +pub(crate) struct ServerSocket { + pub(crate) server_rx: ServerRx, + pub(crate) server_tx: ServerTx, +} + +impl ServerSocket { + pub fn new(rx: ServerRx, tx: ServerTx) -> Self { + Self { + server_rx: rx, + server_tx: tx, + } + } +} diff --git a/bee-network/bee-autopeering/src/task.rs b/bee-network/bee-autopeering/src/task.rs new file mode 100644 index 0000000000..6aa0291630 --- /dev/null +++ b/bee-network/bee-autopeering/src/task.rs @@ -0,0 +1,152 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + peer::{ + lists::{ActivePeersList, ReplacementPeersList}, + PeerStore, + }, + time::SECOND, +}; + +use priority_queue::PriorityQueue; +use tokio::{sync::oneshot, task::JoinHandle, time}; + +use std::{collections::HashMap, future::Future, time::Duration}; + +pub(crate) const MAX_SHUTDOWN_PRIORITY: u8 = 255; +const SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(5 * SECOND); + +pub(crate) type ShutdownRx = oneshot::Receiver<()>; +type ShutdownTx = oneshot::Sender<()>; + +pub(crate) type Repeat = Box Fn(&'a T) + Send>; + +// TODO: @thibault-martinez mentioned that we should consider using `backstage` instead. +/// Represents types driving an event loop. +#[async_trait::async_trait] +pub(crate) trait Runnable { + const NAME: &'static str; + const SHUTDOWN_PRIORITY: u8; + + type ShutdownSignal: Future + Send + Unpin + 'static; + + async fn run(self, shutdown_rx: Self::ShutdownSignal); +} + +pub(crate) struct TaskManager { + shutdown_handles: HashMap>, + shutdown_senders: HashMap, + shutdown_order: PriorityQueue, + peer_store: S, + active_peers: ActivePeersList, + replacements: ReplacementPeersList, +} + +impl TaskManager { + pub(crate) fn new(peer_store: S, active_peers: ActivePeersList, replacements: ReplacementPeersList) -> Self { + Self { + shutdown_handles: HashMap::with_capacity(N), + shutdown_senders: HashMap::with_capacity(N), + shutdown_order: PriorityQueue::with_capacity(N), + peer_store, + active_peers, + replacements, + } + } + + /// Runs a `Runnable`, which is a type that features an event loop. + pub(crate) fn run(&mut self, runnable: R) + where + R: Runnable + 'static, + { + let (shutdown_tx, shutdown_rx) = oneshot::channel::<()>(); + self.shutdown_senders.insert(R::NAME.into(), shutdown_tx); + + let handle = tokio::spawn(runnable.run(shutdown_rx)); + log::trace!("`{}` running.", R::NAME); + + assert!(!self.shutdown_handles.contains_key(R::NAME)); + self.shutdown_handles.insert(R::NAME.into(), handle); + + self.shutdown_order.push(R::NAME.into(), R::SHUTDOWN_PRIORITY); + } + + /// Repeats a command in certain intervals provided a context `T`. Will be shut down gracefully with the rest of + /// all spawned tasks by specifying a `name` and a `shutdown_priority`. + pub(crate) fn repeat(&mut self, f: Repeat, mut delay: D, ctx: T, name: &str, shutdown_priority: u8) + where + T: Send + Sync + 'static, + D: Iterator + Send + 'static, + { + let (shutdown_tx, mut shutdown_rx) = oneshot::channel::<()>(); + self.shutdown_senders.insert(name.into(), shutdown_tx); + + let handle = tokio::spawn(async move { + for duration in &mut delay { + tokio::select! { + _ = &mut shutdown_rx => break, + _ = time::sleep(duration) => f(&ctx), + } + } + }); + log::trace!("`{}` repeating.", name); + + assert!(!self.shutdown_handles.contains_key(name)); + self.shutdown_handles.insert(name.into(), handle); + + self.shutdown_order.push(name.into(), shutdown_priority); + } + + /// Executes the system shutdown. + pub(crate) async fn shutdown(self) { + let TaskManager { + mut shutdown_order, + mut shutdown_handles, + mut shutdown_senders, + peer_store, + active_peers, + replacements, + } = self; + + // Send the shutdown signal to all receivers. + let mut shutdown_order_clone = shutdown_order.clone(); + while let Some((task_name, _)) = shutdown_order_clone.pop() { + // Panic: unwrapping is fine since for every entry in `shutdown_order` there's + // a corresponding entry in `shutdown_senders`. + let shutdown_tx = shutdown_senders.remove(&task_name).unwrap(); + + log::trace!("Shutting down: {}", task_name); + shutdown_tx.send(()).expect("error sending shutdown signal"); + } + + // Wait for all tasks to shutdown down in a certain order and maximum amount of time. + if let Err(e) = time::timeout(SHUTDOWN_TIMEOUT, async { + while let Some((task_name, _)) = shutdown_order.pop() { + // Panic: unwrapping is fine, because we are in control of the data. + let task_handle = shutdown_handles.remove(&task_name).unwrap(); + + match task_handle.await { + Ok(_) => { + log::trace!("`{}` stopped.", task_name); + } + Err(e) => { + log::error!("Error shutting down `{}`. Cause: {}", task_name, e); + } + } + } + }) + .await + { + log::warn!("Not all spawned tasks were shut down in time: {}.", e); + } + + log::info!("Flushing data to peer store..."); + + peer_store.delete_all(); + peer_store.store_all_active(&active_peers); + peer_store.store_all_replacements(&replacements); + + log::info!("Done."); + } +} diff --git a/bee-network/bee-autopeering/src/time.rs b/bee-network/bee-autopeering/src/time.rs new file mode 100644 index 0000000000..86f1e397d5 --- /dev/null +++ b/bee-network/bee-autopeering/src/time.rs @@ -0,0 +1,35 @@ +// Copyright 2021 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use std::time::{SystemTime, UNIX_EPOCH}; + +pub(crate) type Timestamp = u64; +pub(crate) type Timespan = u64; + +/// Measured in seconds. +pub(crate) const SECOND: u64 = 1; +/// Measured in seconds. +pub(crate) const MINUTE: u64 = 60 * SECOND; +/// Measured in seconds. +pub(crate) const HOUR: u64 = 60 * MINUTE; + +pub(crate) fn unix_now_secs() -> Timestamp { + unix_time_secs(SystemTime::now()) +} + +pub(crate) fn unix_time_secs(t: SystemTime) -> Timestamp { + // Panic: We don't allow faulty system clocks. + t.duration_since(UNIX_EPOCH).expect("system clock error").as_secs() +} + +pub(crate) fn since(past_ts: Timestamp) -> Option { + delta(past_ts, unix_now_secs()) +} + +pub(crate) fn until(future_ts: Timestamp) -> Option { + delta(unix_now_secs(), future_ts) +} + +pub(crate) fn delta(older_ts: Timestamp, newer_ts: Timestamp) -> Option { + newer_ts.checked_sub(older_ts) +} diff --git a/bee-network/CHANGELOG.md b/bee-network/bee-gossip/CHANGELOG.md similarity index 88% rename from bee-network/CHANGELOG.md rename to bee-network/bee-gossip/CHANGELOG.md index 76ca7dd860..8c0ceecf9f 100644 --- a/bee-network/CHANGELOG.md +++ b/bee-network/bee-gossip/CHANGELOG.md @@ -19,6 +19,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Security --> +## 0.3.0 - 2021-11-25 + +### Added + +- New PeerRelation variant: Discovered; +- Handling of discovered peers; + +### Changed + +- Renamed crate to `bee-gossip` and moved it into `bee-network` parent directory; + ## 0.2.2 - 2021-08-26 ### Changed diff --git a/bee-network/Cargo.toml b/bee-network/bee-gossip/Cargo.toml similarity index 81% rename from bee-network/Cargo.toml rename to bee-network/bee-gossip/Cargo.toml index 3e7f228822..9ebd6a7e41 100644 --- a/bee-network/Cargo.toml +++ b/bee-network/bee-gossip/Cargo.toml @@ -1,15 +1,13 @@ [package] -name = "bee-network" -version = "0.2.2" +name = "bee-gossip" +version = "0.3.0" authors = [ "IOTA Stiftung" ] edition = "2021" -description = """ -Networking functionality and types for nodes and clients participating in the IOTA protocol built on top of `libp2p`. -""" +description = "Allows peers in the same IOTA network to exchange gossip messages with each other." readme = "README.md" repository = "https://github.com/iotaledger/bee" license = "Apache-2.0" -keywords = [ "iota", "bee", "framework", "network", "libp2p" ] +keywords = [ "iota", "bee", "framework", "network", "gossip" ] homepage = "https://www.iota.org" [package.metadata.docs.rs] @@ -40,7 +38,7 @@ full = [ ] [dependencies] -bee-runtime = { version = "0.1.1-alpha", path = "../bee-runtime", default-features = false, optional = true } +bee-runtime = { version = "0.1.1-alpha", path = "../../bee-runtime", default-features = false, optional = true } async-trait = { version = "0.1.51", default-features = false, optional = true } futures = { version = "0.3.17", default-features = false, optional = true } @@ -59,7 +57,7 @@ tokio-stream = { version = "0.1.7", default-features = false, features = [ "time fern = { version = "0.6.0", default-features = false } hex = { version = "0.4.3", default-features = false, features = [ "alloc" ] } serial_test = { version = "0.5.1", default-features = false } -tokio = { version = "1.12.0", default-features = false, features = [ "rt", "rt-multi-thread", "macros", "signal", "time", "io-std", "io-util" ] } +tokio = { version = "1.12.0", default-features = false, features = [ "io-std", "io-util", "macros", "rt", "rt-multi-thread", "signal", "time" ] } [[example]] name = "chat" diff --git a/bee-network/bee-gossip/LICENSE b/bee-network/bee-gossip/LICENSE new file mode 100644 index 0000000000..f433b1a53f --- /dev/null +++ b/bee-network/bee-gossip/LICENSE @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/bee-network/bee-gossip/README.md b/bee-network/bee-gossip/README.md new file mode 100644 index 0000000000..1a809791c8 --- /dev/null +++ b/bee-network/bee-gossip/README.md @@ -0,0 +1,3 @@ +# bee-gossip + +Allows peers in the same IOTA network to exchange gossip messages with each other. \ No newline at end of file diff --git a/bee-network/examples/chat.rs b/bee-network/bee-gossip/examples/chat.rs similarity index 97% rename from bee-network/examples/chat.rs rename to bee-network/bee-gossip/examples/chat.rs index 92db36f13e..5be2c02fb4 100644 --- a/bee-network/examples/chat.rs +++ b/bee-network/bee-gossip/examples/chat.rs @@ -25,7 +25,7 @@ mod common; #[tokio::main] async fn main() { - use bee_network::{alias, standalone::init, Event, Multiaddr, NetworkConfig, Protocol}; + use bee_gossip::{alias, standalone::init, Event, Multiaddr, NetworkConfig, Protocol}; use common::keys_and_ids::{gen_constant_net_id, gen_deterministic_keys, gen_deterministic_peer_id}; use std::{ env, diff --git a/bee-network/examples/common/keys_and_ids.rs b/bee-network/bee-gossip/examples/common/keys_and_ids.rs similarity index 100% rename from bee-network/examples/common/keys_and_ids.rs rename to bee-network/bee-gossip/examples/common/keys_and_ids.rs diff --git a/bee-network/examples/common/mod.rs b/bee-network/bee-gossip/examples/common/mod.rs similarity index 100% rename from bee-network/examples/common/mod.rs rename to bee-network/bee-gossip/examples/common/mod.rs diff --git a/bee-network/src/alias.rs b/bee-network/bee-gossip/src/alias.rs similarity index 100% rename from bee-network/src/alias.rs rename to bee-network/bee-gossip/src/alias.rs diff --git a/bee-network/src/config.rs b/bee-network/bee-gossip/src/config.rs similarity index 92% rename from bee-network/src/config.rs rename to bee-network/bee-gossip/src/config.rs index fceca303f8..2036ddb3f4 100644 --- a/bee-network/src/config.rs +++ b/bee-network/bee-gossip/src/config.rs @@ -15,7 +15,8 @@ const DEFAULT_BIND_MULTIADDR: &str = "/ip4/0.0.0.0/tcp/15600"; pub const DEFAULT_RECONNECT_INTERVAL_SECS: u64 = 30; const MIN_RECONNECT_INTERVAL_SECS: u64 = 1; -pub const DEFAULT_MAX_UNKOWN_PEERS: usize = 4; +pub const DEFAULT_MAX_UNKNOWN_PEERS: usize = 4; +pub const DEFAULT_MAX_DISCOVERED_PEERS: usize = 4; /// [`NetworkConfigBuilder`] errors. #[derive(Debug, thiserror::Error)] @@ -63,6 +64,7 @@ pub struct NetworkConfig { pub(crate) bind_multiaddr: Multiaddr, pub(crate) reconnect_interval_secs: u64, pub(crate) max_unknown_peers: usize, + pub(crate) max_discovered_peers: usize, pub(crate) static_peers: HashSet, } @@ -154,6 +156,11 @@ impl NetworkConfig { self.max_unknown_peers } + /// Returns the maximum number of discovered peers that are allowed to connect. + pub fn max_discovered_peers(&self) -> usize { + self.max_discovered_peers + } + /// Returns the statically configured peers. pub fn static_peers(&self) -> &HashSet { &self.static_peers @@ -182,7 +189,8 @@ impl Default for NetworkConfig { // Unwrapping is fine, because we made sure that the default is parsable. bind_multiaddr: DEFAULT_BIND_MULTIADDR.parse().unwrap(), reconnect_interval_secs: DEFAULT_RECONNECT_INTERVAL_SECS, - max_unknown_peers: DEFAULT_MAX_UNKOWN_PEERS, + max_unknown_peers: DEFAULT_MAX_UNKNOWN_PEERS, + max_discovered_peers: DEFAULT_MAX_DISCOVERED_PEERS, static_peers: Default::default(), } } @@ -195,7 +203,8 @@ pub struct NetworkConfigBuilder { bind_multiaddr: Option, reconnect_interval_secs: Option, max_unknown_peers: Option, - peering: PeeringConfigBuilder, + max_discovered_peers: Option, + peering: ManualPeeringConfigBuilder, } impl NetworkConfigBuilder { @@ -279,6 +288,12 @@ impl NetworkConfigBuilder { self } + /// Specifies the maximum number of gossip connections with discovered peers. + pub fn with_max_discovered_peers(mut self, n: usize) -> Self { + self.max_discovered_peers.replace(n); + self + } + /// Builds the network config. pub fn finish(self) -> Result { Ok(NetworkConfig { @@ -288,7 +303,8 @@ impl NetworkConfigBuilder { // We made sure that the default is parsable. .unwrap_or_else(|| DEFAULT_BIND_MULTIADDR.parse().unwrap()), reconnect_interval_secs: self.reconnect_interval_secs.unwrap_or(DEFAULT_RECONNECT_INTERVAL_SECS), - max_unknown_peers: self.max_unknown_peers.unwrap_or(DEFAULT_MAX_UNKOWN_PEERS), + max_unknown_peers: self.max_unknown_peers.unwrap_or(DEFAULT_MAX_UNKNOWN_PEERS), + max_discovered_peers: self.max_discovered_peers.unwrap_or(DEFAULT_MAX_DISCOVERED_PEERS), static_peers: self.peering.finish()?.peers, }) } @@ -333,14 +349,15 @@ impl InMemoryNetworkConfigBuilder { .bind_multiaddr .unwrap_or_else(|| DEFAULT_BIND_MULTIADDR_MEM.parse().unwrap()), reconnect_interval_secs: DEFAULT_RECONNECT_INTERVAL_SECS, - max_unknown_peers: DEFAULT_MAX_UNKOWN_PEERS, + max_unknown_peers: DEFAULT_MAX_UNKNOWN_PEERS, + max_discovered_peers: DEFAULT_MAX_DISCOVERED_PEERS, static_peers: Default::default(), } } } #[derive(Clone)] -pub struct PeeringConfig { +pub struct ManualPeeringConfig { pub peers: HashSet, } @@ -364,12 +381,12 @@ impl std::hash::Hash for Peer { } #[derive(Default, Deserialize)] -pub struct PeeringConfigBuilder { +pub struct ManualPeeringConfigBuilder { pub peers: Option>, } -impl PeeringConfigBuilder { - pub fn finish(self) -> Result { +impl ManualPeeringConfigBuilder { + pub fn finish(self) -> Result { let peers = match self.peers { None => Default::default(), Some(peer_builders) => { @@ -392,7 +409,7 @@ impl PeeringConfigBuilder { } }; - Ok(PeeringConfig { peers }) + Ok(ManualPeeringConfig { peers }) } } diff --git a/bee-network/src/error.rs b/bee-network/bee-gossip/src/error.rs similarity index 100% rename from bee-network/src/error.rs rename to bee-network/bee-gossip/src/error.rs diff --git a/bee-network/src/init.rs b/bee-network/bee-gossip/src/init.rs similarity index 92% rename from bee-network/src/init.rs rename to bee-network/bee-gossip/src/init.rs index f85266bffe..970a32c761 100644 --- a/bee-network/src/init.rs +++ b/bee-network/bee-gossip/src/init.rs @@ -34,6 +34,7 @@ pub mod global { static RECONNECT_INTERVAL_SECS: OnceCell = OnceCell::new(); static NETWORK_ID: OnceCell = OnceCell::new(); static MAX_UNKNOWN_PEERS: OnceCell = OnceCell::new(); + static MAX_DISCOVERED_PEERS: OnceCell = OnceCell::new(); pub fn set_reconnect_interval_secs(reconnect_interval_secs: u64) { if cfg!(test) { @@ -44,6 +45,7 @@ pub mod global { .expect("oncecell set"); } } + pub fn reconnect_interval_secs() -> u64 { *RECONNECT_INTERVAL_SECS.get().expect("oncecell get") } @@ -55,6 +57,7 @@ pub mod global { NETWORK_ID.set(network_id).expect("oncecell set"); } } + pub fn network_id() -> u64 { *NETWORK_ID.get().expect("oncecell get") } @@ -66,9 +69,22 @@ pub mod global { MAX_UNKNOWN_PEERS.set(max_unknown_peers).expect("oncecell set"); } } + pub fn max_unknown_peers() -> usize { *MAX_UNKNOWN_PEERS.get().expect("oncecell get") } + + pub fn set_max_discovered_peers(max_discovered_peers: usize) { + if cfg!(test) { + let _ = MAX_DISCOVERED_PEERS.set(max_discovered_peers); + } else { + MAX_DISCOVERED_PEERS.set(max_discovered_peers).expect("oncecell set"); + } + } + + pub fn max_discovered_peers() -> usize { + *MAX_DISCOVERED_PEERS.get().expect("oncecell get") + } } /// Initializes a "standalone" version of the network layer. @@ -150,12 +166,14 @@ fn init( bind_multiaddr, reconnect_interval_secs, max_unknown_peers, + max_discovered_peers, static_peers: peers, } = config; global::set_reconnect_interval_secs(reconnect_interval_secs); global::set_network_id(network_id); global::set_max_unknown_peers(max_unknown_peers); + global::set_max_discovered_peers(max_discovered_peers); let (command_sender, command_receiver) = command_channel(); let (internal_command_sender, internal_command_receiver) = command_channel(); diff --git a/bee-network/src/lib.rs b/bee-network/bee-gossip/src/lib.rs similarity index 91% rename from bee-network/src/lib.rs rename to bee-network/bee-gossip/src/lib.rs index 3b355d6acd..f1a0b3caa2 100644 --- a/bee-network/src/lib.rs +++ b/bee-network/bee-gossip/src/lib.rs @@ -1,7 +1,7 @@ // Copyright 2020-2021 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 -//! Networking layer for the Bee framework. +//! Allows peers in the same IOTA network to exchange gossip messages with each other. #![warn(missing_docs)] diff --git a/bee-network/src/network/error.rs b/bee-network/bee-gossip/src/network/error.rs similarity index 100% rename from bee-network/src/network/error.rs rename to bee-network/bee-gossip/src/network/error.rs diff --git a/bee-network/src/network/host.rs b/bee-network/bee-gossip/src/network/host.rs similarity index 94% rename from bee-network/src/network/host.rs rename to bee-network/bee-gossip/src/network/host.rs index 995941c643..f1abc996d0 100644 --- a/bee-network/src/network/host.rs +++ b/bee-network/bee-gossip/src/network/host.rs @@ -173,12 +173,19 @@ async fn process_internal_command(internal_command: Command, swarm: &mut Swarm { if let Err(e) = dial_addr(swarm, address.clone(), peerlist).await { - warn!("{:?}", e); + warn!("Dialing address {} failed. Cause: {}", address, e); } } Command::DialPeer { peer_id } => { if let Err(e) = dial_peer(swarm, peer_id, peerlist).await { - warn!("{:?}", e); + warn!("Dialing peer {} failed. Cause: {}", alias!(peer_id), e); + + // Remove discovered peer if dialing it failed. + let _ = peerlist + .0 + .write() + .await + .filter_remove(&peer_id, |info, _| info.relation.is_discovered()); } } _ => {} diff --git a/bee-network/src/network/meta.rs b/bee-network/bee-gossip/src/network/meta.rs similarity index 100% rename from bee-network/src/network/meta.rs rename to bee-network/bee-gossip/src/network/meta.rs diff --git a/bee-network/src/network/mod.rs b/bee-network/bee-gossip/src/network/mod.rs similarity index 100% rename from bee-network/src/network/mod.rs rename to bee-network/bee-gossip/src/network/mod.rs diff --git a/bee-network/src/network/origin.rs b/bee-network/bee-gossip/src/network/origin.rs similarity index 100% rename from bee-network/src/network/origin.rs rename to bee-network/bee-gossip/src/network/origin.rs diff --git a/bee-network/src/peer/error.rs b/bee-network/bee-gossip/src/peer/error.rs similarity index 90% rename from bee-network/src/peer/error.rs rename to bee-network/bee-gossip/src/peer/error.rs index 5c08ecd4c7..4ba2cde56c 100644 --- a/bee-network/src/peer/error.rs +++ b/bee-network/bee-gossip/src/peer/error.rs @@ -55,4 +55,8 @@ pub enum Error { /// A failure due to hitting the maximum number of allowed unknown peers. #[error("Tried to add more unknown peers than defined in the config ({0}).")] ExceedsUnknownPeerLimit(usize), + + /// A failure due to hitting the maximum number of allowed discovered peers. + #[error("Tried to add more discovered peers than defined in the config ({0}).")] + ExceedsDiscoveredPeerLimit(usize), } diff --git a/bee-network/src/peer/info.rs b/bee-network/bee-gossip/src/peer/info.rs similarity index 60% rename from bee-network/src/peer/info.rs rename to bee-network/bee-gossip/src/peer/info.rs index 2fe792b896..f968b974af 100644 --- a/bee-network/src/peer/info.rs +++ b/bee-network/bee-gossip/src/peer/info.rs @@ -17,10 +17,18 @@ pub struct PeerInfo { /// Describes the relation with a peer. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum PeerRelation { - /// Represents a persistent peer. If the connection to such a peer drops, the network will try to reconnect. + /// Represents a known peer. + /// + /// If the connection to such a peer drops, the network will try to reconnect. Known, - /// Represents an ephemeral peer. If the connection to such a peer drops, the network won't try to reconnect. + /// Represents an unknown peer. + /// + /// If the connection to such a peer drops, the network won't try to reconnect. Unknown, + /// Represents a discovered peer. + /// + /// If the connection to such a peer drops, the network won't try to reconnect. + Discovered, } impl PeerRelation { @@ -34,15 +42,25 @@ impl PeerRelation { matches!(self, Self::Unknown) } - /// Sets the relation to `PeerRelation::Known`. + /// Returns whether the peer is discovered. + pub fn is_discovered(&self) -> bool { + matches!(self, Self::Discovered) + } + + /// Sets the relation to "known". pub fn set_known(&mut self) { *self = Self::Known; } - /// Sets the relation to `PeerRelation::Unknown`. + /// Sets the relation to "unknown". pub fn set_unknown(&mut self) { *self = Self::Unknown; } + + /// Sets the relation to "discovered". + pub fn set_discovered(&mut self) { + *self = Self::Discovered; + } } #[cfg(test)] @@ -59,5 +77,8 @@ mod tests { pr.set_unknown(); assert!(pr.is_unknown()); + + pr.set_discovered(); + assert!(pr.is_discovered()) } } diff --git a/bee-network/src/peer/list.rs b/bee-network/bee-gossip/src/peer/list.rs similarity index 96% rename from bee-network/src/peer/list.rs rename to bee-network/bee-gossip/src/peer/list.rs index 940a460e24..ccc68062af 100644 --- a/bee-network/src/peer/list.rs +++ b/bee-network/bee-gossip/src/peer/list.rs @@ -251,6 +251,7 @@ impl PeerList { // - Deny banned addresses. // - Deny already connected peers. // - Deny more than the configured unknown peers. + // - Deny more than the configured discovered peers. if peer_id == &self.local_id { Err(Error::PeerIsLocal(*peer_id)) } else if self.local_addrs.contains(peer_addr) { @@ -268,6 +269,10 @@ impl PeerList { && self.filter_count(|info, _| info.relation.is_unknown()) >= global::max_unknown_peers() { Err(Error::ExceedsUnknownPeerLimit(global::max_unknown_peers())) + } else if !self.contains(peer_id) + && self.filter_count(|info, _| info.relation.is_discovered()) >= global::max_discovered_peers() + { + Err(Error::ExceedsDiscoveredPeerLimit(global::max_discovered_peers())) } else { // All checks passed! Accept that peer. Ok(()) @@ -283,6 +288,7 @@ impl PeerList { // - Deny dialing a local address. // - Deny dialing a banned address. // - Deny dialing more than configured unkown peers. + // - Deny dialing more than configured discovered peers. if peer_id == &self.local_id { Err(Error::PeerIsLocal(*peer_id)) } else if !self.contains(peer_id) { @@ -305,6 +311,10 @@ impl PeerList { && self.filter_count(|info, _| info.relation.is_unknown()) >= global::max_unknown_peers() { Err(Error::ExceedsUnknownPeerLimit(global::max_unknown_peers())) + } else if peer_info.relation.is_discovered() + && self.filter_count(|info, _| info.relation.is_discovered()) >= global::max_discovered_peers() + { + Err(Error::ExceedsDiscoveredPeerLimit(global::max_discovered_peers())) } else { // All checks passed! Allow dialing that peer. Ok(()) diff --git a/bee-network/src/peer/meta.rs b/bee-network/bee-gossip/src/peer/meta.rs similarity index 100% rename from bee-network/src/peer/meta.rs rename to bee-network/bee-gossip/src/peer/meta.rs diff --git a/bee-network/src/peer/mod.rs b/bee-network/bee-gossip/src/peer/mod.rs similarity index 100% rename from bee-network/src/peer/mod.rs rename to bee-network/bee-gossip/src/peer/mod.rs diff --git a/bee-network/src/service/command.rs b/bee-network/bee-gossip/src/service/command.rs similarity index 100% rename from bee-network/src/service/command.rs rename to bee-network/bee-gossip/src/service/command.rs diff --git a/bee-network/src/service/error.rs b/bee-network/bee-gossip/src/service/error.rs similarity index 100% rename from bee-network/src/service/error.rs rename to bee-network/bee-gossip/src/service/error.rs diff --git a/bee-network/src/service/event.rs b/bee-network/bee-gossip/src/service/event.rs similarity index 100% rename from bee-network/src/service/event.rs rename to bee-network/bee-gossip/src/service/event.rs diff --git a/bee-network/src/service/host.rs b/bee-network/bee-gossip/src/service/host.rs similarity index 96% rename from bee-network/src/service/host.rs rename to bee-network/bee-gossip/src/service/host.rs index 0be427d473..2e3f63fbf5 100644 --- a/bee-network/src/service/host.rs +++ b/bee-network/bee-gossip/src/service/host.rs @@ -9,7 +9,7 @@ use super::{ use crate::{ alias, - init::global::reconnect_interval_secs, + init::global::{self, reconnect_interval_secs}, peer::{ error::Error as PeerError, info::{PeerInfo, PeerRelation}, @@ -221,10 +221,17 @@ async fn peerstate_checker(shutdown: Shutdown, senders: Senders, peerlist: PeerL let num_connected_known = peerlist.filter_count(|info, state| info.relation.is_known() && state.is_connected()); let num_connected_unknown = peerlist.filter_count(|info, state| info.relation.is_unknown() && state.is_connected()); + let num_connected_discovered = + peerlist.filter_count(|info, state| info.relation.is_discovered() && state.is_connected()); info!( - "Connected peers: known {}/{} unknown {}.", - num_connected_known, num_known, num_connected_unknown, + "Connected peers: known {}/{} unknown {}/{} discovered {}/{}.", + num_connected_known, + num_known, + num_connected_unknown, + global::max_unknown_peers(), + num_connected_discovered, + global::max_discovered_peers() ); for (peer_id, info) in peerlist.filter_info(|info, state| info.relation.is_known() && state.is_disconnected()) { @@ -342,8 +349,10 @@ async fn process_internal_event( // Try to disconnect, but ignore errors in-case the peer was disconnected already. let _ = peerlist.update_state(&peer_id, |state| state.set_disconnected()); - // Try to remove unknown peers. - let _ = peerlist.filter_remove(&peer_id, |peer_info, _| peer_info.relation.is_unknown()); + // Try to remove unknown and discovered peers. + let _ = peerlist.filter_remove(&peer_id, |peer_info, _| { + peer_info.relation.is_unknown() || peer_info.relation.is_discovered() + }); // We no longer need to hold the lock. drop(peerlist); diff --git a/bee-network/src/service/mod.rs b/bee-network/bee-gossip/src/service/mod.rs similarity index 100% rename from bee-network/src/service/mod.rs rename to bee-network/bee-gossip/src/service/mod.rs diff --git a/bee-network/src/swarm/behavior.rs b/bee-network/bee-gossip/src/swarm/behavior.rs similarity index 100% rename from bee-network/src/swarm/behavior.rs rename to bee-network/bee-gossip/src/swarm/behavior.rs diff --git a/bee-network/src/swarm/builder.rs b/bee-network/bee-gossip/src/swarm/builder.rs similarity index 100% rename from bee-network/src/swarm/builder.rs rename to bee-network/bee-gossip/src/swarm/builder.rs diff --git a/bee-network/src/swarm/error.rs b/bee-network/bee-gossip/src/swarm/error.rs similarity index 100% rename from bee-network/src/swarm/error.rs rename to bee-network/bee-gossip/src/swarm/error.rs diff --git a/bee-network/src/swarm/mod.rs b/bee-network/bee-gossip/src/swarm/mod.rs similarity index 100% rename from bee-network/src/swarm/mod.rs rename to bee-network/bee-gossip/src/swarm/mod.rs diff --git a/bee-network/src/swarm/protocols/iota_gossip/event.rs b/bee-network/bee-gossip/src/swarm/protocols/iota_gossip/event.rs similarity index 100% rename from bee-network/src/swarm/protocols/iota_gossip/event.rs rename to bee-network/bee-gossip/src/swarm/protocols/iota_gossip/event.rs diff --git a/bee-network/src/swarm/protocols/iota_gossip/handler.rs b/bee-network/bee-gossip/src/swarm/protocols/iota_gossip/handler.rs similarity index 100% rename from bee-network/src/swarm/protocols/iota_gossip/handler.rs rename to bee-network/bee-gossip/src/swarm/protocols/iota_gossip/handler.rs diff --git a/bee-network/src/swarm/protocols/iota_gossip/id.rs b/bee-network/bee-gossip/src/swarm/protocols/iota_gossip/id.rs similarity index 100% rename from bee-network/src/swarm/protocols/iota_gossip/id.rs rename to bee-network/bee-gossip/src/swarm/protocols/iota_gossip/id.rs diff --git a/bee-network/src/swarm/protocols/iota_gossip/io.rs b/bee-network/bee-gossip/src/swarm/protocols/iota_gossip/io.rs similarity index 100% rename from bee-network/src/swarm/protocols/iota_gossip/io.rs rename to bee-network/bee-gossip/src/swarm/protocols/iota_gossip/io.rs diff --git a/bee-network/src/swarm/protocols/iota_gossip/mod.rs b/bee-network/bee-gossip/src/swarm/protocols/iota_gossip/mod.rs similarity index 100% rename from bee-network/src/swarm/protocols/iota_gossip/mod.rs rename to bee-network/bee-gossip/src/swarm/protocols/iota_gossip/mod.rs diff --git a/bee-network/src/swarm/protocols/iota_gossip/protocol.rs b/bee-network/bee-gossip/src/swarm/protocols/iota_gossip/protocol.rs similarity index 100% rename from bee-network/src/swarm/protocols/iota_gossip/protocol.rs rename to bee-network/bee-gossip/src/swarm/protocols/iota_gossip/protocol.rs diff --git a/bee-network/src/swarm/protocols/iota_gossip/upgrade.rs b/bee-network/bee-gossip/src/swarm/protocols/iota_gossip/upgrade.rs similarity index 100% rename from bee-network/src/swarm/protocols/iota_gossip/upgrade.rs rename to bee-network/bee-gossip/src/swarm/protocols/iota_gossip/upgrade.rs diff --git a/bee-network/src/swarm/protocols/mod.rs b/bee-network/bee-gossip/src/swarm/protocols/mod.rs similarity index 100% rename from bee-network/src/swarm/protocols/mod.rs rename to bee-network/bee-gossip/src/swarm/protocols/mod.rs diff --git a/bee-network/src/tests/add_peer.rs b/bee-network/bee-gossip/src/tests/add_peer.rs similarity index 100% rename from bee-network/src/tests/add_peer.rs rename to bee-network/bee-gossip/src/tests/add_peer.rs diff --git a/bee-network/src/tests/alias.rs b/bee-network/bee-gossip/src/tests/alias.rs similarity index 100% rename from bee-network/src/tests/alias.rs rename to bee-network/bee-gossip/src/tests/alias.rs diff --git a/bee-network/src/tests/common/await_events.rs b/bee-network/bee-gossip/src/tests/common/await_events.rs similarity index 100% rename from bee-network/src/tests/common/await_events.rs rename to bee-network/bee-gossip/src/tests/common/await_events.rs diff --git a/bee-network/src/tests/common/keys_and_ids.rs b/bee-network/bee-gossip/src/tests/common/keys_and_ids.rs similarity index 100% rename from bee-network/src/tests/common/keys_and_ids.rs rename to bee-network/bee-gossip/src/tests/common/keys_and_ids.rs diff --git a/bee-network/src/tests/common/mod.rs b/bee-network/bee-gossip/src/tests/common/mod.rs similarity index 100% rename from bee-network/src/tests/common/mod.rs rename to bee-network/bee-gossip/src/tests/common/mod.rs diff --git a/bee-network/src/tests/common/network_config.rs b/bee-network/bee-gossip/src/tests/common/network_config.rs similarity index 100% rename from bee-network/src/tests/common/network_config.rs rename to bee-network/bee-gossip/src/tests/common/network_config.rs diff --git a/bee-network/src/tests/common/shutdown.rs b/bee-network/bee-gossip/src/tests/common/shutdown.rs similarity index 100% rename from bee-network/src/tests/common/shutdown.rs rename to bee-network/bee-gossip/src/tests/common/shutdown.rs diff --git a/bee-network/src/tests/connect_peer.rs b/bee-network/bee-gossip/src/tests/connect_peer.rs similarity index 100% rename from bee-network/src/tests/connect_peer.rs rename to bee-network/bee-gossip/src/tests/connect_peer.rs diff --git a/bee-network/src/tests/initialize.rs b/bee-network/bee-gossip/src/tests/initialize.rs similarity index 100% rename from bee-network/src/tests/initialize.rs rename to bee-network/bee-gossip/src/tests/initialize.rs diff --git a/bee-network/src/tests/mod.rs b/bee-network/bee-gossip/src/tests/mod.rs similarity index 100% rename from bee-network/src/tests/mod.rs rename to bee-network/bee-gossip/src/tests/mod.rs diff --git a/bee-network/src/tests/send_recv.rs b/bee-network/bee-gossip/src/tests/send_recv.rs similarity index 100% rename from bee-network/src/tests/send_recv.rs rename to bee-network/bee-gossip/src/tests/send_recv.rs diff --git a/bee-protocol/Cargo.toml b/bee-protocol/Cargo.toml index 235c5b68ca..38bebf7066 100644 --- a/bee-protocol/Cargo.toml +++ b/bee-protocol/Cargo.toml @@ -11,5 +11,5 @@ keywords = [ "iota", "tangle", "bee", "framework", "protocol" ] homepage = "https://www.iota.org" [dependencies] +bee-gossip = { version = "0.3.0", path = "../bee-network/bee-gossip", default-features = false } bee-message = { version = "0.1.6", path = "../bee-message", default-features = false, features = [ "serde" ] } -bee-network = { version = "0.2.2", path = "../bee-network", default-features = false } diff --git a/bee-protocol/src/types/peer.rs b/bee-protocol/src/types/peer.rs index f9aad34dfd..e26738e075 100644 --- a/bee-protocol/src/types/peer.rs +++ b/bee-protocol/src/types/peer.rs @@ -5,8 +5,8 @@ use crate::types::metrics::PeerMetrics; +use bee_gossip::{Multiaddr, PeerId, PeerInfo, PeerRelation}; use bee_message::milestone::MilestoneIndex; -use bee_network::{Multiaddr, PeerId, PeerInfo, PeerRelation}; use std::{ sync::atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicU8, Ordering}, diff --git a/documentation/docs/crate_overview.md b/documentation/docs/crate_overview.md index da05d97c13..86e56063c0 100644 --- a/documentation/docs/crate_overview.md +++ b/documentation/docs/crate_overview.md @@ -37,7 +37,7 @@ All types and features required to compute and maintain the ledger state. Implementation of [RFC: Message](https://github.com/GalRogozinski/protocol-rfcs/blob/message/text/0017-message/0017-message.md). -## bee-network +## bee-gossip Networking functionality and types for nodes and clients participating in the IOTA protocol built on top of `libp2p`.