diff --git a/CHANGELOG.md b/CHANGELOG.md index 596fd0d01..0af607296 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## Next release +- fix(db): fix number of files in db, startup hang, ram issues and flushing issues - feat(confg): added chain config template and fgw example - feat(v0.8.0-rc0): starknet_subscribeNewHeads - fix(rocksdb): update max open files opt diff --git a/Cargo.lock b/Cargo.lock index 0d44db49c..15ae02d38 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,6 +17,16 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +[[package]] +name = "aead" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" +dependencies = [ + "crypto-common", + "generic-array", +] + [[package]] name = "aes" version = "0.8.4" @@ -28,6 +38,20 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "aes-gcm" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" +dependencies = [ + "aead", + "aes", + "cipher", + "ctr", + "ghash", + "subtle", +] + [[package]] name = "ahash" version = "0.8.11" @@ -120,7 +144,7 @@ dependencies = [ "alloy-transport", "futures", "futures-util", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -226,7 +250,7 @@ dependencies = [ "alloy-sol-types", "serde", "serde_json", - "thiserror", + "thiserror 1.0.65", "tracing", ] @@ -248,7 +272,7 @@ dependencies = [ "async-trait", "auto_impl", "futures-utils-wasm", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -276,7 +300,7 @@ dependencies = [ "rand", "serde_json", "tempfile", - "thiserror", + "thiserror 1.0.65", "tracing", "url", ] @@ -340,7 +364,7 @@ dependencies = [ "reqwest 0.12.8", "serde", "serde_json", - "thiserror", + "thiserror 1.0.65", "tokio", "tracing", "url", @@ -365,7 +389,7 @@ checksum = "2b09cae092c27b6f1bde952653a22708691802e57bfef4a2973b80bea21efd3f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -454,7 +478,7 @@ dependencies = [ "auto_impl", "elliptic-curve", "k256", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -470,7 +494,7 @@ dependencies = [ "async-trait", "k256", "rand", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -484,7 +508,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -501,7 +525,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", "syn-solidity", "tiny-keccak", ] @@ -519,7 +543,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.85", + "syn 2.0.89", "syn-solidity", ] @@ -558,7 +582,7 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror", + "thiserror 1.0.65", "tokio", "tower 0.5.1", "tracing", @@ -838,6 +862,12 @@ dependencies = [ "rand", ] +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + [[package]] name = "arrayvec" version = "0.7.6" @@ -853,6 +883,45 @@ dependencies = [ "term", ] +[[package]] +name = "asn1-rs" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5493c3bedbacf7fd7382c6346bbd66687d12bbaad3a89a2d2c303ee6cf20b048" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits 0.2.19", + "rusticata-macros", + "thiserror 1.0.65", + "time", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", +] + [[package]] name = "assert-json-diff" version = "2.0.2" @@ -910,11 +979,22 @@ checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" dependencies = [ "async-task", "concurrent-queue", - "fastrand", - "futures-lite", + "fastrand 2.1.1", + "futures-lite 2.3.0", "slab", ] +[[package]] +name = "async-fs" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebcd09b382f40fcd159c2d695175b2ae620ffa5f3bd6f664131efff4e8b9e04a" +dependencies = [ + "async-lock 3.4.0", + "blocking", + "futures-lite 2.3.0", +] + [[package]] name = "async-global-executor" version = "2.4.1" @@ -923,32 +1003,61 @@ checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ "async-channel 2.3.1", "async-executor", - "async-io", - "async-lock", + "async-io 2.3.4", + "async-lock 3.4.0", "blocking", - "futures-lite", + "futures-lite 2.3.0", "once_cell", ] +[[package]] +name = "async-io" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +dependencies = [ + "async-lock 2.8.0", + "autocfg", + "cfg-if", + "concurrent-queue", + "futures-lite 1.13.0", + "log", + "parking", + "polling 2.8.0", + "rustix 0.37.27", + "slab", + "socket2 0.4.10", + "waker-fn", +] + [[package]] name = "async-io" version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" dependencies = [ - "async-lock", + "async-lock 3.4.0", "cfg-if", "concurrent-queue", "futures-io", - "futures-lite", + "futures-lite 2.3.0", "parking", - "polling", - "rustix", + "polling 3.7.3", + "rustix 0.38.38", "slab", "tracing", "windows-sys 0.59.0", ] +[[package]] +name = "async-lock" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +dependencies = [ + "event-listener 2.5.3", +] + [[package]] name = "async-lock" version = "3.4.0" @@ -960,6 +1069,17 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "async-net" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b948000fad4873c1c9339d60f2623323a0cfd3816e5181033c6a5cb68b2accf7" +dependencies = [ + "async-io 2.3.4", + "blocking", + "futures-lite 2.3.0", +] + [[package]] name = "async-object-pool" version = "0.1.5" @@ -976,15 +1096,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "63255f1dc2381611000436537bbedfe83183faa303a5a0edaf191edef06526bb" dependencies = [ "async-channel 2.3.1", - "async-io", - "async-lock", + "async-io 2.3.4", + "async-lock 3.4.0", "async-signal", "async-task", "blocking", "cfg-if", "event-listener 5.3.1", - "futures-lite", - "rustix", + "futures-lite 2.3.0", + "rustix 0.38.38", "tracing", ] @@ -994,13 +1114,13 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "637e00349800c0bdf8bfc21ebbc0b6524abea702b0da4168ac00d070d0c0b9f3" dependencies = [ - "async-io", - "async-lock", + "async-io 2.3.4", + "async-lock 3.4.0", "atomic-waker", "cfg-if", "futures-core", "futures-io", - "rustix", + "rustix 0.38.38", "signal-hook-registry", "slab", "windows-sys 0.59.0", @@ -1015,14 +1135,14 @@ dependencies = [ "async-attributes", "async-channel 1.9.0", "async-global-executor", - "async-io", - "async-lock", + "async-io 2.3.4", + "async-lock 3.4.0", "async-process", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite", + "futures-lite 2.3.0", "gloo-timers 0.3.0", "kv-log-macro", "log", @@ -1053,7 +1173,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -1070,7 +1190,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -1086,12 +1206,36 @@ dependencies = [ "tungstenite", ] +[[package]] +name = "asynchronous-codec" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a860072022177f903e59730004fb5dc13db9275b79bb2aef7ba8ce831956c233" +dependencies = [ + "bytes", + "futures-sink", + "futures-util", + "memchr", + "pin-project-lite", +] + [[package]] name = "atomic-waker" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" +[[package]] +name = "attohttpc" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d9a9bf8b79a749ee0b911b91b671cc2b6c670bdbc7e3dfd537576ddc94bb2a2" +dependencies = [ + "http 0.2.12", + "log", + "url", +] + [[package]] name = "atty" version = "0.2.14" @@ -1111,7 +1255,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -1182,6 +1326,12 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "base-x" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" + [[package]] name = "base16ct" version = "0.2.0" @@ -1267,7 +1417,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -1309,6 +1459,15 @@ dependencies = [ "wyz", ] +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "block-buffer" version = "0.9.0" @@ -1366,7 +1525,7 @@ dependencies = [ "starknet_api", "strum 0.25.0", "strum_macros 0.25.3", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -1378,7 +1537,7 @@ dependencies = [ "async-channel 2.3.1", "async-task", "futures-io", - "futures-lite", + "futures-lite 2.3.0", "piper", ] @@ -1410,6 +1569,15 @@ dependencies = [ "starknet-types-core 0.1.5", ] +[[package]] +name = "bs58" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" +dependencies = [ + "tinyvec", +] + [[package]] name = "bstr" version = "1.10.0" @@ -1496,7 +1664,7 @@ dependencies = [ "hashbrown 0.13.2", "instant", "once_cell", - "thiserror", + "thiserror 1.0.65", "tokio", ] @@ -1555,7 +1723,7 @@ dependencies = [ "num-bigint", "num-traits 0.2.19", "serde", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -1568,7 +1736,7 @@ dependencies = [ "num-bigint", "num-traits 0.2.19", "serde", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -1582,7 +1750,7 @@ dependencies = [ "num-bigint", "num-traits 0.2.19", "serde", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -1620,7 +1788,7 @@ dependencies = [ "clap", "log", "salsa", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -1645,7 +1813,7 @@ dependencies = [ "log", "salsa", "smol_str 0.2.2", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -1671,7 +1839,7 @@ dependencies = [ "log", "salsa", "smol_str 0.2.2", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -1695,7 +1863,7 @@ dependencies = [ "indoc 2.0.5", "salsa", "smol_str 0.2.2", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -1953,7 +2121,7 @@ dependencies = [ "salsa", "serde", "smol_str 0.2.2", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -2246,7 +2414,7 @@ checksum = "3d55dcf98a6e1a03e0b36129fad4253f9e6666a1746ab9c075d212ba68a4e9c1" dependencies = [ "cairo-lang-debug 2.7.0", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -2257,7 +2425,7 @@ dependencies = [ "cairo-lang-filesystem 1.0.0-alpha.6", "serde", "smol_str 0.1.24", - "thiserror", + "thiserror 1.0.65", "toml 0.4.10", ] @@ -2269,7 +2437,7 @@ dependencies = [ "cairo-lang-filesystem 1.0.0-rc0", "serde", "smol_str 0.2.2", - "thiserror", + "thiserror 1.0.65", "toml 0.4.10", ] @@ -2282,7 +2450,7 @@ dependencies = [ "cairo-lang-filesystem 1.1.1", "serde", "smol_str 0.2.2", - "thiserror", + "thiserror 1.0.65", "toml 0.4.10", ] @@ -2296,7 +2464,7 @@ dependencies = [ "cairo-lang-utils 2.7.0", "serde", "smol_str 0.2.2", - "thiserror", + "thiserror 1.0.65", "toml 0.8.19", ] @@ -2328,7 +2496,7 @@ dependencies = [ "sha2", "smol_str 0.2.2", "starknet-types-core 0.1.5", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -2446,7 +2614,7 @@ dependencies = [ "serde", "sha3", "smol_str 0.1.24", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -2468,7 +2636,7 @@ dependencies = [ "serde", "sha3", "smol_str 0.2.2", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -2491,7 +2659,7 @@ dependencies = [ "serde", "sha3", "smol_str 0.2.2", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -2519,7 +2687,7 @@ dependencies = [ "sha3", "smol_str 0.2.2", "starknet-types-core 0.1.5", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -2531,7 +2699,7 @@ dependencies = [ "cairo-lang-sierra 1.0.0-alpha.6", "cairo-lang-utils 1.0.0-alpha.6", "itertools 0.10.5", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -2543,7 +2711,7 @@ dependencies = [ "cairo-lang-sierra 1.0.0-rc0", "cairo-lang-utils 1.0.0-rc0", "itertools 0.10.5", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -2556,7 +2724,7 @@ dependencies = [ "cairo-lang-sierra 1.1.1", "cairo-lang-utils 1.1.1", "itertools 0.10.5", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -2572,7 +2740,7 @@ dependencies = [ "itertools 0.12.1", "num-bigint", "num-traits 0.2.19", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -2584,7 +2752,7 @@ dependencies = [ "cairo-lang-sierra 1.0.0-alpha.6", "cairo-lang-utils 1.0.0-alpha.6", "itertools 0.10.5", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -2596,7 +2764,7 @@ dependencies = [ "cairo-lang-sierra 1.0.0-rc0", "cairo-lang-utils 1.0.0-rc0", "itertools 0.10.5", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -2609,7 +2777,7 @@ dependencies = [ "cairo-lang-sierra 1.1.1", "cairo-lang-utils 1.1.1", "itertools 0.10.5", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -2625,7 +2793,7 @@ dependencies = [ "itertools 0.12.1", "num-bigint", "num-traits 0.2.19", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -2748,7 +2916,7 @@ dependencies = [ "log", "num-bigint", "num-traits 0.2.19", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -2770,7 +2938,7 @@ dependencies = [ "log", "num-bigint", "num-traits 0.2.19", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -2793,7 +2961,7 @@ dependencies = [ "log", "num-bigint", "num-traits 0.2.19", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -2814,7 +2982,7 @@ dependencies = [ "num-bigint", "num-traits 0.2.19", "starknet-types-core 0.1.5", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -2863,7 +3031,7 @@ dependencies = [ "serde_json", "sha3", "smol_str 0.1.24", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -2903,7 +3071,7 @@ dependencies = [ "serde_json", "sha3", "smol_str 0.2.2", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -2944,7 +3112,7 @@ dependencies = [ "serde_json", "sha3", "smol_str 0.2.2", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -2975,7 +3143,7 @@ dependencies = [ "serde_json", "smol_str 0.2.2", "starknet-types-core 0.1.5", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -2999,7 +3167,7 @@ dependencies = [ "sha3", "smol_str 0.2.2", "starknet-types-core 0.1.5", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -3026,7 +3194,7 @@ dependencies = [ "num-traits 0.2.19", "salsa", "smol_str 0.2.2", - "thiserror", + "thiserror 1.0.65", "unescaper", ] @@ -3043,7 +3211,7 @@ dependencies = [ "num-traits 0.2.19", "salsa", "smol_str 0.2.2", - "thiserror", + "thiserror 1.0.65", "unescaper", ] @@ -3242,6 +3410,36 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "chacha20" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" +dependencies = [ + "cfg-if", + "cipher", + "cpufeatures", +] + +[[package]] +name = "chacha20poly1305" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" +dependencies = [ + "aead", + "chacha20", + "cipher", + "poly1305", + "zeroize", +] + [[package]] name = "chrono" version = "0.4.38" @@ -3265,6 +3463,7 @@ checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ "crypto-common", "inout", + "zeroize", ] [[package]] @@ -3309,7 +3508,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -3438,6 +3637,15 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" +[[package]] +name = "core2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b49ba7ef1ad6107f8824dbe97de947cbaac53c44e7f9756a1fba0d37c1eec505" +dependencies = [ + "memchr", +] + [[package]] name = "cpufeatures" version = "0.2.14" @@ -3506,6 +3714,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", + "rand_core", "typenum", ] @@ -3518,6 +3727,33 @@ dependencies = [ "cipher", ] +[[package]] +name = "curve25519-dalek" +version = "4.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest 0.10.7", + "fiat-crypto", + "rustc_version 0.4.1", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", +] + [[package]] name = "darling" version = "0.14.4" @@ -3563,7 +3799,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -3585,7 +3821,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -3621,6 +3857,26 @@ version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" +[[package]] +name = "data-encoding-macro" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1559b6cba622276d6d63706db152618eeb15b89b3e4041446b05876e352e639" +dependencies = [ + "data-encoding", + "data-encoding-macro-internal", +] + +[[package]] +name = "data-encoding-macro-internal" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "332d754c0af53bc87c108fed664d121ecf59207ec4196041f04d6ab9002ad33f" +dependencies = [ + "data-encoding", + "syn 1.0.109", +] + [[package]] name = "der" version = "0.7.9" @@ -3631,6 +3887,20 @@ dependencies = [ "zeroize", ] +[[package]] +name = "der-parser" +version = "9.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" +dependencies = [ + "asn1-rs", + "displaydoc", + "nom", + "num-bigint", + "num-traits 0.2.19", + "rusticata-macros", +] + [[package]] name = "deranged" version = "0.3.11" @@ -3662,7 +3932,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -3682,7 +3952,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", "unicode-xid", ] @@ -3743,6 +4013,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", +] + [[package]] name = "dotenv" version = "0.15.0" @@ -3755,6 +4036,12 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" +[[package]] +name = "dtoa" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" + [[package]] name = "dunce" version = "1.0.5" @@ -3781,6 +4068,31 @@ dependencies = [ "spki", ] +[[package]] +name = "ed25519" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +dependencies = [ + "pkcs8", + "signature", +] + +[[package]] +name = "ed25519-dalek" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" +dependencies = [ + "curve25519-dalek", + "ed25519", + "rand_core", + "serde", + "sha2", + "subtle", + "zeroize", +] + [[package]] name = "either" version = "1.13.0" @@ -3830,6 +4142,18 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "enum-as-inner" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.89", +] + [[package]] name = "env_filter" version = "0.1.2" @@ -3910,7 +4234,7 @@ dependencies = [ "serde_json", "sha2", "sha3", - "thiserror", + "thiserror 1.0.65", "uuid", ] @@ -3968,6 +4292,15 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "fastrand" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", +] + [[package]] name = "fastrand" version = "2.1.1" @@ -3992,7 +4325,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182f7dbc2ef73d9ef67351c5fbbea084729c48362d3ce9dd44c28e32e277fe5" dependencies = [ "libc", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -4005,6 +4338,12 @@ dependencies = [ "subtle", ] +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + [[package]] name = "fixed-hash" version = "0.8.0" @@ -4096,6 +4435,16 @@ dependencies = [ "futures-util", ] +[[package]] +name = "futures-bounded" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91f328e7fb845fc832912fb6a34f40cf6d1888c92f974d1893a54e97b5ff542e" +dependencies = [ + "futures-timer", + "futures-util", +] + [[package]] name = "futures-channel" version = "0.3.31" @@ -4130,13 +4479,28 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +[[package]] +name = "futures-lite" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +dependencies = [ + "fastrand 1.9.0", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + [[package]] name = "futures-lite" version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ - "fastrand", + "fastrand 2.1.1", "futures-core", "futures-io", "parking", @@ -4151,7 +4515,18 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", +] + +[[package]] +name = "futures-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" +dependencies = [ + "futures-io", + "rustls 0.23.16", + "rustls-pki-types", ] [[package]] @@ -4166,6 +4541,17 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" +[[package]] +name = "futures-ticker" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9763058047f713632a52e916cc7f6a4b3fc6e9fc1ff8c5b1dc49e5a89041682e" +dependencies = [ + "futures", + "futures-timer", + "instant", +] + [[package]] name = "futures-timer" version = "3.0.3" @@ -4219,7 +4605,7 @@ checksum = "553630feadf7b76442b0849fd25fdf89b860d933623aec9693fed19af0400c78" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -4246,6 +4632,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "ghash" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" +dependencies = [ + "opaque-debug", + "polyval", +] + [[package]] name = "gimli" version = "0.31.1" @@ -4286,7 +4682,7 @@ dependencies = [ "pin-project", "serde", "serde_json", - "thiserror", + "thiserror 1.0.65", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -4504,6 +4900,67 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" +[[package]] +name = "hex_fmt" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" + +[[package]] +name = "hickory-proto" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07698b8420e2f0d6447a436ba999ec85d8fbf2a398bbd737b82cac4a2e96e512" +dependencies = [ + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna 0.4.0", + "ipnet", + "once_cell", + "rand", + "socket2 0.5.7", + "thiserror 1.0.65", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "hickory-resolver" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28757f23aa75c98f254cf0405e6d8c25b831b32921b050a66692427679b1f243" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto", + "ipconfig", + "lru-cache", + "once_cell", + "parking_lot 0.12.3", + "rand", + "resolv-conf", + "smallvec", + "thiserror 1.0.65", + "tokio", + "tracing", +] + +[[package]] +name = "hkdf" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" +dependencies = [ + "hmac", +] + [[package]] name = "hmac" version = "0.12.1" @@ -4513,6 +4970,17 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "hostname" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" +dependencies = [ + "libc", + "match_cfg", + "winapi", +] + [[package]] name = "http" version = "0.2.12" @@ -4638,7 +5106,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2", + "socket2 0.5.7", "tokio", "tower-service", "tracing", @@ -4741,7 +5209,7 @@ dependencies = [ "http-body 1.0.1", "hyper 1.5.0", "pin-project-lite", - "socket2", + "socket2 0.5.7", "tokio", "tower-service", "tracing", @@ -4782,6 +5250,16 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" +[[package]] +name = "idna" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "idna" version = "0.5.0" @@ -4792,6 +5270,59 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "if-addrs" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cabb0019d51a643781ff15c9c8a3e5dedc365c47211270f4e8f82812fedd8f0a" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "if-watch" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdf9d64cfcf380606e64f9a0bcf493616b65331199f984151a6fa11a7b3cde38" +dependencies = [ + "async-io 2.3.4", + "core-foundation", + "fnv", + "futures", + "if-addrs", + "ipnet", + "log", + "netlink-packet-core", + "netlink-packet-route", + "netlink-proto", + "netlink-sys", + "rtnetlink", + "smol", + "system-configuration 0.6.1", + "tokio", + "windows", +] + +[[package]] +name = "igd-next" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "064d90fec10d541084e7b39ead8875a5a80d9114a2b18791565253bae25f49e4" +dependencies = [ + "async-trait", + "attohttpc", + "bytes", + "futures", + "http 0.2.12", + "hyper 0.14.31", + "log", + "rand", + "tokio", + "url", + "xmltree", +] + [[package]] name = "ignore" version = "0.4.23" @@ -4904,6 +5435,29 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "io-lifetimes" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" +dependencies = [ + "hermit-abi 0.3.9", + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "ipconfig" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" +dependencies = [ + "socket2 0.5.7", + "widestring", + "windows-sys 0.48.0", + "winreg", +] + [[package]] name = "ipnet" version = "2.10.1" @@ -5020,7 +5574,7 @@ dependencies = [ "rustls-native-certs 0.7.3", "rustls-pki-types", "soketto", - "thiserror", + "thiserror 1.0.65", "tokio", "tokio-rustls 0.25.0", "tokio-util", @@ -5048,7 +5602,7 @@ dependencies = [ "rustc-hash 1.1.0", "serde", "serde_json", - "thiserror", + "thiserror 1.0.65", "tokio", "tokio-stream", "tracing", @@ -5068,7 +5622,7 @@ dependencies = [ "jsonrpsee-types", "serde", "serde_json", - "thiserror", + "thiserror 1.0.65", "tokio", "tower 0.4.13", "tracing", @@ -5085,7 +5639,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -5104,7 +5658,7 @@ dependencies = [ "serde", "serde_json", "soketto", - "thiserror", + "thiserror 1.0.65", "tokio", "tokio-stream", "tokio-util", @@ -5122,7 +5676,7 @@ dependencies = [ "beef", "serde", "serde_json", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -5302,7 +5856,7 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" dependencies = [ - "spin", + "spin 0.9.8", ] [[package]] @@ -5340,103 +5894,676 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a00419de735aac21d53b0de5ce2c03bd3627277cf471300f27ebc89f7d828047" [[package]] -name = "libredox" -version = "0.1.3" +name = "libp2p" +version = "0.54.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +checksum = "bbbe80f9c7e00526cd6b838075b9c171919404a4732cb2fa8ece0a093223bfc4" dependencies = [ - "bitflags 2.6.0", - "libc", + "bytes", + "either", + "futures", + "futures-timer", + "getrandom", + "libp2p-allow-block-list", + "libp2p-autonat", + "libp2p-connection-limits", + "libp2p-core", + "libp2p-dcutr", + "libp2p-dns", + "libp2p-gossipsub", + "libp2p-identify", + "libp2p-identity", + "libp2p-kad", + "libp2p-mdns", + "libp2p-metrics", + "libp2p-noise", + "libp2p-ping", + "libp2p-quic", + "libp2p-relay", + "libp2p-swarm", + "libp2p-tcp", + "libp2p-tls", + "libp2p-upnp", + "libp2p-yamux", + "multiaddr", + "pin-project", + "rw-stream-sink", + "thiserror 1.0.65", ] [[package]] -name = "librocksdb-sys" -version = "0.16.0+8.10.0" +name = "libp2p-allow-block-list" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce3d60bc059831dc1c83903fb45c103f75db65c5a7bf22272764d9cc683e348c" +checksum = "d1027ccf8d70320ed77e984f273bc8ce952f623762cb9bf2d126df73caef8041" dependencies = [ - "bindgen", - "bzip2-sys", - "cc", - "glob", - "libc", - "libz-sys", - "lz4-sys", - "zstd-sys", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "void", ] [[package]] -name = "libz-sys" -version = "1.1.20" +name = "libp2p-autonat" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" +checksum = "a083675f189803d0682a2726131628e808144911dad076858bfbe30b13065499" dependencies = [ - "cc", - "pkg-config", - "vcpkg", + "async-trait", + "asynchronous-codec", + "bytes", + "either", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-request-response", + "libp2p-swarm", + "quick-protobuf", + "quick-protobuf-codec", + "rand", + "rand_core", + "thiserror 1.0.65", + "tracing", + "void", + "web-time", ] [[package]] -name = "linux-raw-sys" -version = "0.4.14" +name = "libp2p-connection-limits" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +checksum = "8d003540ee8baef0d254f7b6bfd79bac3ddf774662ca0abf69186d517ef82ad8" +dependencies = [ + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "void", +] [[package]] -name = "lock_api" -version = "0.4.12" +name = "libp2p-core" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "a61f26c83ed111104cd820fe9bc3aaabbac5f1652a1d213ed6e900b7918a1298" dependencies = [ - "autocfg", - "scopeguard", + "either", + "fnv", + "futures", + "futures-timer", + "libp2p-identity", + "multiaddr", + "multihash", + "multistream-select", + "once_cell", + "parking_lot 0.12.3", + "pin-project", + "quick-protobuf", + "rand", + "rw-stream-sink", + "smallvec", + "thiserror 1.0.65", + "tracing", + "unsigned-varint 0.8.0", + "void", + "web-time", ] [[package]] -name = "log" -version = "0.4.22" +name = "libp2p-dcutr" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +checksum = "3236a2e24cbcf2d05b398b003ed920e1e8cedede13784d90fa3961b109647ce0" dependencies = [ - "value-bag", + "asynchronous-codec", + "either", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "lru", + "quick-protobuf", + "quick-protobuf-codec", + "thiserror 1.0.65", + "tracing", + "void", + "web-time", ] [[package]] -name = "lru" -version = "0.12.5" +name = "libp2p-dns" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" +checksum = "97f37f30d5c7275db282ecd86e54f29dd2176bd3ac656f06abf43bedb21eb8bd" dependencies = [ - "hashbrown 0.15.0", + "async-trait", + "futures", + "hickory-resolver", + "libp2p-core", + "libp2p-identity", + "parking_lot 0.12.3", + "smallvec", + "tracing", ] [[package]] -name = "lz4-sys" -version = "1.11.1+lz4-1.10.0" +name = "libp2p-gossipsub" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" +checksum = "b4e830fdf24ac8c444c12415903174d506e1e077fbe3875c404a78c5935a8543" dependencies = [ - "cc", - "libc", + "asynchronous-codec", + "base64 0.22.1", + "byteorder", + "bytes", + "either", + "fnv", + "futures", + "futures-ticker", + "getrandom", + "hex_fmt", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "prometheus-client", + "quick-protobuf", + "quick-protobuf-codec", + "rand", + "regex", + "sha2", + "smallvec", + "tracing", + "void", + "web-time", ] [[package]] -name = "m-cairo-test-contracts" -version = "0.7.0" - -[[package]] -name = "m-proc-macros" -version = "0.7.0" +name = "libp2p-identify" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1711b004a273be4f30202778856368683bd9a83c4c7dcc8f848847606831a4e3" dependencies = [ - "indoc 2.0.5", - "proc-macro2", - "quote", - "syn 2.0.85", + "asynchronous-codec", + "either", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "lru", + "quick-protobuf", + "quick-protobuf-codec", + "smallvec", + "thiserror 1.0.65", + "tracing", + "void", ] [[package]] -name = "madara" -version = "0.7.0" +name = "libp2p-identity" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "257b5621d159b32282eac446bed6670c39c7dc68a200a992d8f056afa0066f6d" +dependencies = [ + "bs58", + "ed25519-dalek", + "hkdf", + "multihash", + "quick-protobuf", + "rand", + "sha2", + "thiserror 1.0.65", + "tracing", + "zeroize", +] + +[[package]] +name = "libp2p-kad" +version = "0.46.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced237d0bd84bbebb7c2cad4c073160dacb4fe40534963c32ed6d4c6bb7702a3" +dependencies = [ + "arrayvec", + "asynchronous-codec", + "bytes", + "either", + "fnv", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "quick-protobuf", + "quick-protobuf-codec", + "rand", + "sha2", + "smallvec", + "thiserror 1.0.65", + "tracing", + "uint", + "void", + "web-time", +] + +[[package]] +name = "libp2p-mdns" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14b8546b6644032565eb29046b42744aee1e9f261ed99671b2c93fb140dba417" +dependencies = [ + "data-encoding", + "futures", + "hickory-proto", + "if-watch", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "rand", + "smallvec", + "socket2 0.5.7", + "tokio", + "tracing", + "void", +] + +[[package]] +name = "libp2p-metrics" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ebafa94a717c8442d8db8d3ae5d1c6a15e30f2d347e0cd31d057ca72e42566" +dependencies = [ + "futures", + "libp2p-core", + "libp2p-dcutr", + "libp2p-gossipsub", + "libp2p-identify", + "libp2p-identity", + "libp2p-kad", + "libp2p-ping", + "libp2p-relay", + "libp2p-swarm", + "pin-project", + "prometheus-client", + "web-time", +] + +[[package]] +name = "libp2p-noise" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36b137cb1ae86ee39f8e5d6245a296518912014eaa87427d24e6ff58cfc1b28c" +dependencies = [ + "asynchronous-codec", + "bytes", + "curve25519-dalek", + "futures", + "libp2p-core", + "libp2p-identity", + "multiaddr", + "multihash", + "once_cell", + "quick-protobuf", + "rand", + "sha2", + "snow", + "static_assertions", + "thiserror 1.0.65", + "tracing", + "x25519-dalek", + "zeroize", +] + +[[package]] +name = "libp2p-ping" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "005a34420359223b974ee344457095f027e51346e992d1e0dcd35173f4cdd422" +dependencies = [ + "either", + "futures", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "rand", + "tracing", + "void", + "web-time", +] + +[[package]] +name = "libp2p-plaintext" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b63d926c6be56a2489e0e7316b17fe95a70bc5c4f3e85740bb3e67c0f3c6a44" +dependencies = [ + "asynchronous-codec", + "bytes", + "futures", + "libp2p-core", + "libp2p-identity", + "quick-protobuf", + "quick-protobuf-codec", + "tracing", +] + +[[package]] +name = "libp2p-quic" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46352ac5cd040c70e88e7ff8257a2ae2f891a4076abad2c439584a31c15fd24e" +dependencies = [ + "bytes", + "futures", + "futures-timer", + "if-watch", + "libp2p-core", + "libp2p-identity", + "libp2p-tls", + "parking_lot 0.12.3", + "quinn", + "rand", + "ring 0.17.8", + "rustls 0.23.16", + "socket2 0.5.7", + "thiserror 1.0.65", + "tokio", + "tracing", +] + +[[package]] +name = "libp2p-relay" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10df23d7f5b5adcc129f4a69d6fbd05209e356ccf9e8f4eb10b2692b79c77247" +dependencies = [ + "asynchronous-codec", + "bytes", + "either", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "quick-protobuf", + "quick-protobuf-codec", + "rand", + "static_assertions", + "thiserror 1.0.65", + "tracing", + "void", + "web-time", +] + +[[package]] +name = "libp2p-request-response" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1356c9e376a94a75ae830c42cdaea3d4fe1290ba409a22c809033d1b7dcab0a6" +dependencies = [ + "async-trait", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "rand", + "smallvec", + "tracing", + "void", + "web-time", +] + +[[package]] +name = "libp2p-swarm" +version = "0.45.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7dd6741793d2c1fb2088f67f82cf07261f25272ebe3c0b0c311e0c6b50e851a" +dependencies = [ + "async-std", + "either", + "fnv", + "futures", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm-derive", + "lru", + "multistream-select", + "once_cell", + "rand", + "smallvec", + "tokio", + "tracing", + "void", + "web-time", +] + +[[package]] +name = "libp2p-swarm-derive" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "206e0aa0ebe004d778d79fb0966aa0de996c19894e2c0605ba2f8524dd4443d8" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.89", +] + +[[package]] +name = "libp2p-swarm-test" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea4e1d1d92421dc4c90cad42e3cd24f50fd210191c9f126d41bd483a09567f67" +dependencies = [ + "async-trait", + "futures", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-plaintext", + "libp2p-swarm", + "libp2p-tcp", + "libp2p-yamux", + "rand", + "tracing", +] + +[[package]] +name = "libp2p-tcp" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad964f312c59dcfcac840acd8c555de8403e295d39edf96f5240048b5fcaa314" +dependencies = [ + "async-io 2.3.4", + "futures", + "futures-timer", + "if-watch", + "libc", + "libp2p-core", + "libp2p-identity", + "socket2 0.5.7", + "tokio", + "tracing", +] + +[[package]] +name = "libp2p-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b23dddc2b9c355f73c1e36eb0c3ae86f7dc964a3715f0731cfad352db4d847" +dependencies = [ + "futures", + "futures-rustls", + "libp2p-core", + "libp2p-identity", + "rcgen", + "ring 0.17.8", + "rustls 0.23.16", + "rustls-webpki 0.101.7", + "thiserror 1.0.65", + "x509-parser", + "yasna", +] + +[[package]] +name = "libp2p-upnp" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01bf2d1b772bd3abca049214a3304615e6a36fa6ffc742bdd1ba774486200b8f" +dependencies = [ + "futures", + "futures-timer", + "igd-next", + "libp2p-core", + "libp2p-swarm", + "tokio", + "tracing", + "void", +] + +[[package]] +name = "libp2p-yamux" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "788b61c80789dba9760d8c669a5bedb642c8267555c803fabd8396e4ca5c5882" +dependencies = [ + "either", + "futures", + "libp2p-core", + "thiserror 1.0.65", + "tracing", + "yamux 0.12.1", + "yamux 0.13.4", +] + +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags 2.6.0", + "libc", +] + +[[package]] +name = "librocksdb-sys" +version = "0.16.0+8.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce3d60bc059831dc1c83903fb45c103f75db65c5a7bf22272764d9cc683e348c" +dependencies = [ + "bindgen", + "bzip2-sys", + "cc", + "glob", + "libc", + "libz-sys", + "lz4-sys", + "zstd-sys", +] + +[[package]] +name = "libz-sys" +version = "1.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "linked-hash-map" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" + +[[package]] +name = "linux-raw-sys" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" + +[[package]] +name = "linux-raw-sys" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" + +[[package]] +name = "lock_api" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +dependencies = [ + "value-bag", +] + +[[package]] +name = "lru" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" +dependencies = [ + "hashbrown 0.15.0", +] + +[[package]] +name = "lru-cache" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" +dependencies = [ + "linked-hash-map", +] + +[[package]] +name = "lz4-sys" +version = "1.11.1+lz4-1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "m-cairo-test-contracts" +version = "0.7.0" + +[[package]] +name = "m-proc-macros" +version = "0.7.0" +dependencies = [ + "indoc 2.0.5", + "proc-macro2", + "quote", + "syn 2.0.89", +] + +[[package]] +name = "madara" +version = "0.7.0" dependencies = [ "alloy", "anyhow", @@ -5450,7 +6577,6 @@ dependencies = [ "governor", "hyper 0.14.31", "jsonrpsee", - "log", "mc-analytics", "mc-block-import", "mc-db", @@ -5458,6 +6584,7 @@ dependencies = [ "mc-eth", "mc-gateway", "mc-mempool", + "mc-p2p", "mc-rpc", "mc-sync", "mc-telemetry", @@ -5465,6 +6592,7 @@ dependencies = [ "mp-chain-config", "mp-convert", "mp-utils", + "multiaddr", "opentelemetry", "opentelemetry-appender-tracing", "opentelemetry-otlp", @@ -5479,7 +6607,7 @@ dependencies = [ "serde_yaml", "starknet-providers", "starknet_api", - "thiserror", + "thiserror 1.0.65", "tokio", "tower 0.4.13", "tower-http", @@ -5490,6 +6618,12 @@ dependencies = [ "url", ] +[[package]] +name = "match_cfg" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" + [[package]] name = "matchers" version = "0.1.0" @@ -5570,7 +6704,7 @@ dependencies = [ "starknet-types-core 0.1.5", "starknet_api", "tempfile", - "thiserror", + "thiserror 1.0.65", "tokio", "tracing", "tracing-core", @@ -5585,6 +6719,7 @@ dependencies = [ "anyhow", "bincode 1.3.3", "bonsai-trie", + "futures", "lazy_static", "log", "mc-analytics", @@ -5603,12 +6738,13 @@ dependencies = [ "opentelemetry_sdk", "rayon", "rocksdb", + "rstest 0.18.2", "serde", "starknet-core 0.11.0", "starknet-types-core 0.1.5", "starknet_api", "tempfile", - "thiserror", + "thiserror 1.0.65", "tokio", "tracing", "tracing-core", @@ -5710,7 +6846,7 @@ dependencies = [ "starknet-types-core 0.1.5", "starknet_api", "tempfile", - "thiserror", + "thiserror 1.0.65", "time", "tokio", "tracing", @@ -5742,7 +6878,7 @@ dependencies = [ "starknet-core 0.11.0", "starknet-types-core 0.1.5", "starknet_api", - "thiserror", + "thiserror 1.0.65", "tokio", "tracing", "tracing-core", @@ -5775,7 +6911,7 @@ dependencies = [ "serde_json", "starknet-core 0.11.0", "starknet-types-core 0.1.5", - "thiserror", + "thiserror 1.0.65", "tokio", "tower 0.4.13", "tracing", @@ -5820,12 +6956,39 @@ dependencies = [ "starknet-core 0.11.0", "starknet-types-core 0.1.5", "starknet_api", - "thiserror", + "thiserror 1.0.65", "tokio", "tracing", - "tracing-core", - "tracing-opentelemetry", - "tracing-subscriber", + "tracing-core", + "tracing-opentelemetry", + "tracing-subscriber", +] + +[[package]] +name = "mc-p2p" +version = "0.7.0" +dependencies = [ + "anyhow", + "async-trait", + "futures", + "libp2p", + "mc-db", + "mc-rpc", + "mp-block", + "mp-chain-config", + "mp-convert", + "mp-utils", + "p2p_stream", + "prost", + "prost-build", + "starknet-core 0.11.0", + "starknet-types-core 0.1.5", + "thiserror 1.0.65", + "tokio", + "tokio-stream", + "tokio-util", + "tracing", + "unsigned-varint 0.8.0", ] [[package]] @@ -5856,7 +7019,7 @@ dependencies = [ "starknet-types-core 0.1.5", "starknet-types-rpc", "starknet_api", - "thiserror", + "thiserror 1.0.65", "tokio", "tracing", ] @@ -5896,7 +7059,7 @@ dependencies = [ "starknet-types-core 0.1.5", "starknet_api", "tempfile", - "thiserror", + "thiserror 1.0.65", "tokio", "tracing", "tracing-core", @@ -5994,7 +7157,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -6016,7 +7179,7 @@ dependencies = [ "starknet-core 0.11.0", "starknet-types-core 0.1.5", "starknet-types-rpc", - "thiserror", + "thiserror 1.0.65", "tracing", "tracing-core", "tracing-opentelemetry", @@ -6030,8 +7193,8 @@ dependencies = [ "anyhow", "blockifier", "lazy_static", - "log", "mp-utils", + "multiaddr", "primitive-types", "rstest 0.18.2", "serde", @@ -6039,7 +7202,8 @@ dependencies = [ "serde_yaml", "starknet-types-core 0.1.5", "starknet_api", - "thiserror", + "thiserror 1.0.65", + "tracing", "url", ] @@ -6066,7 +7230,7 @@ dependencies = [ "starknet-core 0.11.0", "starknet-providers", "starknet-types-core 0.1.5", - "thiserror", + "thiserror 1.0.65", "tokio", ] @@ -6081,7 +7245,7 @@ dependencies = [ "starknet-core 0.11.0", "starknet-types-core 0.1.5", "starknet_api", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -6119,7 +7283,7 @@ dependencies = [ "starknet-providers", "starknet-types-core 0.1.5", "starknet_api", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -6132,7 +7296,7 @@ dependencies = [ "starknet-core 0.11.0", "starknet-providers", "starknet-types-core 0.1.5", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -6156,7 +7320,7 @@ dependencies = [ "starknet-providers", "starknet-types-core 0.1.5", "starknet_api", - "thiserror", + "thiserror 1.0.65", "tracing", ] @@ -6189,6 +7353,66 @@ dependencies = [ "url", ] +[[package]] +name = "multiaddr" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe6351f60b488e04c1d21bc69e56b89cb3f5e8f5d22557d6e8031bdfd79b6961" +dependencies = [ + "arrayref", + "byteorder", + "data-encoding", + "libp2p-identity", + "multibase", + "multihash", + "percent-encoding", + "serde", + "static_assertions", + "unsigned-varint 0.8.0", + "url", +] + +[[package]] +name = "multibase" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b3539ec3c1f04ac9748a260728e855f261b4977f5c3406612c884564f329404" +dependencies = [ + "base-x", + "data-encoding", + "data-encoding-macro", +] + +[[package]] +name = "multihash" +version = "0.19.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc41f430805af9d1cf4adae4ed2149c759b877b01d909a1f40256188d09345d2" +dependencies = [ + "core2", + "unsigned-varint 0.8.0", +] + +[[package]] +name = "multimap" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" + +[[package]] +name = "multistream-select" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea0df8e5eec2298a62b326ee4f0d7fe1a6b90a09dfcf9df37b38f947a8c42f19" +dependencies = [ + "bytes", + "futures", + "log", + "pin-project", + "smallvec", + "unsigned-varint 0.7.2", +] + [[package]] name = "native-tls" version = "0.2.12" @@ -6219,18 +7443,101 @@ dependencies = [ "rawpointer", ] +[[package]] +name = "netlink-packet-core" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72724faf704479d67b388da142b186f916188505e7e0b26719019c525882eda4" +dependencies = [ + "anyhow", + "byteorder", + "netlink-packet-utils", +] + +[[package]] +name = "netlink-packet-route" +version = "0.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053998cea5a306971f88580d0829e90f270f940befd7cf928da179d4187a5a66" +dependencies = [ + "anyhow", + "bitflags 1.3.2", + "byteorder", + "libc", + "netlink-packet-core", + "netlink-packet-utils", +] + +[[package]] +name = "netlink-packet-utils" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ede8a08c71ad5a95cdd0e4e52facd37190977039a4704eb82a283f713747d34" +dependencies = [ + "anyhow", + "byteorder", + "paste", + "thiserror 1.0.65", +] + +[[package]] +name = "netlink-proto" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b33524dc0968bfad349684447bfce6db937a9ac3332a1fe60c0c5a5ce63f21" +dependencies = [ + "bytes", + "futures", + "log", + "netlink-packet-core", + "netlink-sys", + "thiserror 1.0.65", + "tokio", +] + +[[package]] +name = "netlink-sys" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "416060d346fbaf1f23f9512963e3e878f1a78e707cb699ba9215761754244307" +dependencies = [ + "async-io 1.13.0", + "bytes", + "futures", + "libc", + "log", + "tokio", +] + [[package]] name = "new_debug_unreachable" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" +[[package]] +name = "nix" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" +dependencies = [ + "bitflags 1.3.2", + "cfg-if", + "libc", +] + [[package]] name = "no-std-compat" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c" +[[package]] +name = "nohash-hasher" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" + [[package]] name = "nom" version = "7.1.3" @@ -6388,7 +7695,7 @@ checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -6409,6 +7716,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "oid-registry" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d8034d9489cdaf79228eb9f6a3b8d7bb32ba00d6645ebd48eef4077ceb5bd9" +dependencies = [ + "asn1-rs", +] + [[package]] name = "once_cell" version = "1.20.2" @@ -6450,7 +7766,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -6482,7 +7798,7 @@ dependencies = [ "js-sys", "once_cell", "pin-project-lite", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -6510,7 +7826,7 @@ dependencies = [ "opentelemetry-proto", "opentelemetry_sdk", "prost", - "thiserror", + "thiserror 1.0.65", "tokio", "tonic", ] @@ -6547,7 +7863,7 @@ dependencies = [ "ordered-float", "serde", "serde_json", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -6566,7 +7882,7 @@ dependencies = [ "percent-encoding", "rand", "serde_json", - "thiserror", + "thiserror 1.0.65", "tokio", "tokio-stream", ] @@ -6586,6 +7902,23 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "p2p_stream" +version = "0.7.0" +dependencies = [ + "anyhow", + "async-trait", + "futures", + "futures-bounded", + "libp2p", + "libp2p-plaintext", + "libp2p-swarm-test", + "rstest 0.18.2", + "tokio", + "tracing", + "tracing-subscriber", +] + [[package]] name = "parity-scale-codec" version = "3.6.12" @@ -6707,6 +8040,16 @@ dependencies = [ "sha2", ] +[[package]] +name = "pem" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" +dependencies = [ + "base64 0.22.1", + "serde", +] + [[package]] name = "percent-encoding" version = "2.3.1" @@ -6720,7 +8063,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" dependencies = [ "memchr", - "thiserror", + "thiserror 1.0.65", "ucd-trie", ] @@ -6764,7 +8107,7 @@ dependencies = [ "phf_shared 0.11.2", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -6808,7 +8151,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -6830,7 +8173,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" dependencies = [ "atomic-waker", - "fastrand", + "fastrand 2.1.1", "futures-io", ] @@ -6850,6 +8193,22 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" +[[package]] +name = "polling" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" +dependencies = [ + "autocfg", + "bitflags 1.3.2", + "cfg-if", + "concurrent-queue", + "libc", + "log", + "pin-project-lite", + "windows-sys 0.48.0", +] + [[package]] name = "polling" version = "3.7.3" @@ -6860,11 +8219,34 @@ dependencies = [ "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix", + "rustix 0.38.38", "tracing", "windows-sys 0.59.0", ] +[[package]] +name = "poly1305" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" +dependencies = [ + "cpufeatures", + "opaque-debug", + "universal-hash", +] + +[[package]] +name = "polyval" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" +dependencies = [ + "cfg-if", + "cpufeatures", + "opaque-debug", + "universal-hash", +] + [[package]] name = "portable-atomic" version = "1.9.0" @@ -6928,6 +8310,16 @@ dependencies = [ "yansi", ] +[[package]] +name = "prettyplease" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" +dependencies = [ + "proc-macro2", + "syn 2.0.89", +] + [[package]] name = "primitive-types" version = "0.12.2" @@ -6969,18 +8361,41 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] name = "proc-macro2" -version = "1.0.89" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] +[[package]] +name = "prometheus-client" +version = "0.22.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "504ee9ff529add891127c4827eb481bd69dc0ebc72e9a682e187db4caa60c3ca" +dependencies = [ + "dtoa", + "itoa", + "parking_lot 0.12.3", + "prometheus-client-derive-encode", +] + +[[package]] +name = "prometheus-client-derive-encode" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", +] + [[package]] name = "proptest" version = "1.5.0" @@ -7009,53 +8424,158 @@ checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] name = "prost" version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0487d90e047de87f984913713b85c601c05609aad5b0df4b4573fbf69aa13f" +checksum = "7b0487d90e047de87f984913713b85c601c05609aad5b0df4b4573fbf69aa13f" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-build" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c1318b19085f08681016926435853bbf7858f9c082d0999b80550ff5d9abe15" +dependencies = [ + "bytes", + "heck 0.5.0", + "itertools 0.13.0", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost", + "prost-types", + "regex", + "syn 2.0.89", + "tempfile", +] + +[[package]] +name = "prost-derive" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" +dependencies = [ + "anyhow", + "itertools 0.13.0", + "proc-macro2", + "quote", + "syn 2.0.89", +] + +[[package]] +name = "prost-types" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4759aa0d3a6232fb8dbdb97b61de2c20047c68aca932c7ed76da9d788508d670" +dependencies = [ + "prost", +] + +[[package]] +name = "quanta" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5167a477619228a0b284fac2674e3c388cba90631d7b7de620e6f1fcd08da5" +dependencies = [ + "crossbeam-utils", + "libc", + "once_cell", + "raw-cpuid", + "wasi", + "web-sys", + "winapi", +] + +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + +[[package]] +name = "quick-protobuf" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d6da84cc204722a989e01ba2f6e1e276e190f22263d0cb6ce8526fcdb0d2e1f" +dependencies = [ + "byteorder", +] + +[[package]] +name = "quick-protobuf-codec" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15a0580ab32b169745d7a39db2ba969226ca16738931be152a3209b409de2474" +dependencies = [ + "asynchronous-codec", + "bytes", + "quick-protobuf", + "thiserror 1.0.65", + "unsigned-varint 0.8.0", +] + +[[package]] +name = "quinn" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" dependencies = [ "bytes", - "prost-derive", + "futures-io", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash 2.0.0", + "rustls 0.23.16", + "socket2 0.5.7", + "thiserror 2.0.3", + "tokio", + "tracing", ] [[package]] -name = "prost-derive" -version = "0.13.3" +name = "quinn-proto" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" +checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ - "anyhow", - "itertools 0.13.0", - "proc-macro2", - "quote", - "syn 2.0.85", + "bytes", + "getrandom", + "rand", + "ring 0.17.8", + "rustc-hash 2.0.0", + "rustls 0.23.16", + "rustls-pki-types", + "slab", + "thiserror 2.0.3", + "tinyvec", + "tracing", + "web-time", ] [[package]] -name = "quanta" -version = "0.12.3" +name = "quinn-udp" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5167a477619228a0b284fac2674e3c388cba90631d7b7de620e6f1fcd08da5" +checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" dependencies = [ - "crossbeam-utils", + "cfg_aliases", "libc", "once_cell", - "raw-cpuid", - "wasi", - "web-sys", - "winapi", + "socket2 0.5.7", + "tracing", + "windows-sys 0.59.0", ] -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" - [[package]] name = "quote" version = "1.0.37" @@ -7146,6 +8666,18 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "rcgen" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52c4f3084aa3bc7dfbba4eff4fab2a54db4324965d8872ab933565e6fbd83bc6" +dependencies = [ + "pem", + "ring 0.16.20", + "time", + "yasna", +] + [[package]] name = "redox_syscall" version = "0.2.16" @@ -7172,7 +8704,7 @@ checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom", "libredox", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -7319,7 +8851,7 @@ dependencies = [ "async-tungstenite", "futures-util", "reqwest 0.12.8", - "thiserror", + "thiserror 1.0.65", "tokio", "tokio-util", "tracing", @@ -7327,6 +8859,16 @@ dependencies = [ "web-sys", ] +[[package]] +name = "resolv-conf" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" +dependencies = [ + "hostname", + "quick-error", +] + [[package]] name = "rfc6979" version = "0.4.0" @@ -7337,6 +8879,21 @@ dependencies = [ "subtle", ] +[[package]] +name = "ring" +version = "0.16.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin 0.5.2", + "untrusted 0.7.1", + "web-sys", + "winapi", +] + [[package]] name = "ring" version = "0.17.8" @@ -7347,8 +8904,8 @@ dependencies = [ "cfg-if", "getrandom", "libc", - "spin", - "untrusted", + "spin 0.9.8", + "untrusted 0.9.0", "windows-sys 0.52.0", ] @@ -7429,10 +8986,29 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.85", + "syn 2.0.89", "unicode-ident", ] +[[package]] +name = "rtnetlink" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a552eb82d19f38c3beed3f786bd23aa434ceb9ac43ab44419ca6d67a7e186c0" +dependencies = [ + "async-global-executor", + "futures", + "log", + "netlink-packet-core", + "netlink-packet-route", + "netlink-packet-utils", + "netlink-proto", + "netlink-sys", + "nix", + "thiserror 1.0.65", + "tokio", +] + [[package]] name = "ruint" version = "1.12.3" @@ -7515,6 +9091,29 @@ dependencies = [ "semver 1.0.23", ] +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom", +] + +[[package]] +name = "rustix" +version = "0.37.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" +dependencies = [ + "bitflags 1.3.2", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys 0.3.8", + "windows-sys 0.48.0", +] + [[package]] name = "rustix" version = "0.38.38" @@ -7524,7 +9123,7 @@ dependencies = [ "bitflags 2.6.0", "errno", "libc", - "linux-raw-sys", + "linux-raw-sys 0.4.14", "windows-sys 0.52.0", ] @@ -7535,7 +9134,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", - "ring", + "ring 0.17.8", "rustls-webpki 0.101.7", "sct", ] @@ -7547,7 +9146,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" dependencies = [ "log", - "ring", + "ring 0.17.8", "rustls-pki-types", "rustls-webpki 0.102.8", "subtle", @@ -7561,6 +9160,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" dependencies = [ "once_cell", + "ring 0.17.8", "rustls-pki-types", "rustls-webpki 0.102.8", "subtle", @@ -7615,6 +9215,9 @@ name = "rustls-pki-types" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +dependencies = [ + "web-time", +] [[package]] name = "rustls-webpki" @@ -7622,8 +9225,8 @@ version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring", - "untrusted", + "ring 0.17.8", + "untrusted 0.9.0", ] [[package]] @@ -7632,9 +9235,9 @@ version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ - "ring", + "ring 0.17.8", "rustls-pki-types", - "untrusted", + "untrusted 0.9.0", ] [[package]] @@ -7655,6 +9258,17 @@ dependencies = [ "wait-timeout", ] +[[package]] +name = "rw-stream-sink" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8c9026ff5d2f23da5e45bbc283f156383001bfb09c4e44256d02c1a685fe9a1" +dependencies = [ + "futures", + "pin-project", + "static_assertions", +] + [[package]] name = "ryu" version = "1.0.18" @@ -7748,7 +9362,7 @@ dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -7775,8 +9389,8 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring", - "untrusted", + "ring 0.17.8", + "untrusted 0.9.0", ] [[package]] @@ -7869,7 +9483,7 @@ checksum = "7e85ad2009c50b58e87caa8cd6dac16bdf511bbfb7af6c33df902396aa480fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -7880,7 +9494,7 @@ checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -7989,7 +9603,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -8001,7 +9615,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -8039,7 +9653,7 @@ checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -8161,6 +9775,23 @@ dependencies = [ "serde", ] +[[package]] +name = "smol" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a33bd3e260892199c3ccfc487c88b2da2265080acb316cd920da72fdfd7c599f" +dependencies = [ + "async-channel 2.3.1", + "async-executor", + "async-fs", + "async-io 2.3.4", + "async-lock 3.4.0", + "async-net", + "async-process", + "blocking", + "futures-lite 2.3.0", +] + [[package]] name = "smol_str" version = "0.1.24" @@ -8179,6 +9810,33 @@ dependencies = [ "serde", ] +[[package]] +name = "snow" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "850948bee068e713b8ab860fe1adc4d109676ab4c3b621fd8147f06b261f2f85" +dependencies = [ + "aes-gcm", + "blake2", + "chacha20poly1305", + "curve25519-dalek", + "rand_core", + "ring 0.17.8", + "rustc_version 0.4.1", + "sha2", + "subtle", +] + +[[package]] +name = "socket2" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "socket2" version = "0.5.7" @@ -8205,6 +9863,12 @@ dependencies = [ "sha-1", ] +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + [[package]] name = "spin" version = "0.9.8" @@ -8268,7 +9932,7 @@ dependencies = [ "starknet-crypto 0.7.2", "starknet-providers", "starknet-signers", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -8283,7 +9947,7 @@ dependencies = [ "starknet-accounts", "starknet-core 0.11.0", "starknet-providers", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -8409,7 +10073,7 @@ checksum = "bbc159a1934c7be9761c237333a57febe060ace2bc9e3b337a59a37af206d19f" dependencies = [ "starknet-curve 0.4.2", "starknet-ff", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -8419,7 +10083,7 @@ source = "git+https://github.com/kasarlabs/starknet-rs.git?branch=fork#1a0428e28 dependencies = [ "starknet-curve 0.5.0", "starknet-types-core 0.1.5", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -8476,7 +10140,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8986a940af916fc0a034f4e42c6ba76d94f1e97216d75447693dfd7aefaf3ef2" dependencies = [ "starknet-core 0.12.0", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -8495,7 +10159,7 @@ dependencies = [ "serde_json", "serde_with 2.3.3", "starknet-core 0.11.0", - "thiserror", + "thiserror 1.0.65", "url", ] @@ -8513,7 +10177,7 @@ dependencies = [ "rand", "starknet-core 0.11.0", "starknet-crypto 0.7.2", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -8574,7 +10238,7 @@ dependencies = [ "starknet-types-core 0.1.5", "strum 0.24.1", "strum_macros 0.24.3", - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -8652,7 +10316,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -8665,7 +10329,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -8765,9 +10429,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.85" +version = "2.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56" +checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" dependencies = [ "proc-macro2", "quote", @@ -8783,7 +10447,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -8801,6 +10465,17 @@ dependencies = [ "futures-core", ] +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", +] + [[package]] name = "sysinfo" version = "0.30.13" @@ -8871,9 +10546,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if", - "fastrand", + "fastrand 2.1.1", "once_cell", - "rustix", + "rustix 0.38.38", "windows-sys 0.59.0", ] @@ -8909,7 +10584,16 @@ version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.65", +] + +[[package]] +name = "thiserror" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +dependencies = [ + "thiserror-impl 2.0.3", ] [[package]] @@ -8920,7 +10604,18 @@ checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", ] [[package]] @@ -9033,7 +10728,7 @@ dependencies = [ "parking_lot 0.12.3", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.5.7", "tokio-macros", "windows-sys 0.48.0", ] @@ -9046,7 +10741,7 @@ checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -9181,7 +10876,7 @@ dependencies = [ "percent-encoding", "pin-project", "prost", - "socket2", + "socket2 0.5.7", "tokio", "tokio-stream", "tower 0.4.13", @@ -9274,7 +10969,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -9352,7 +11047,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" dependencies = [ "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -9375,7 +11070,7 @@ dependencies = [ "log", "rand", "sha1", - "thiserror", + "thiserror 1.0.65", "url", "utf-8", ] @@ -9422,7 +11117,7 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c878a167baa8afd137494101a688ef8c67125089ff2249284bd2b5f9bfedb815" dependencies = [ - "thiserror", + "thiserror 1.0.65", ] [[package]] @@ -9464,12 +11159,44 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" +[[package]] +name = "universal-hash" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" +dependencies = [ + "crypto-common", + "subtle", +] + [[package]] name = "unsafe-libyaml" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" +[[package]] +name = "unsigned-varint" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" + +[[package]] +name = "unsigned-varint" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" +dependencies = [ + "futures-io", + "futures-util", +] + +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + [[package]] name = "untrusted" version = "0.9.0" @@ -9483,7 +11210,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", - "idna", + "idna 0.5.0", "percent-encoding", "serde", ] @@ -9564,6 +11291,12 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" + [[package]] name = "wait-timeout" version = "0.2.0" @@ -9573,6 +11306,12 @@ dependencies = [ "libc", ] +[[package]] +name = "waker-fn" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317211a0dc0ceedd78fb2ca9a44aed3d7b9b26f81870d485c07122b4350673b7" + [[package]] name = "walkdir" version = "2.5.0" @@ -9620,7 +11359,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", "wasm-bindgen-shared", ] @@ -9654,7 +11393,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9700,6 +11439,12 @@ dependencies = [ "rustls-pki-types", ] +[[package]] +name = "widestring" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" + [[package]] name = "winapi" version = "0.3.9" @@ -9956,6 +11701,50 @@ dependencies = [ "tap", ] +[[package]] +name = "x25519-dalek" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" +dependencies = [ + "curve25519-dalek", + "rand_core", + "serde", + "zeroize", +] + +[[package]] +name = "x509-parser" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" +dependencies = [ + "asn1-rs", + "data-encoding", + "der-parser", + "lazy_static", + "nom", + "oid-registry", + "rusticata-macros", + "thiserror 1.0.65", + "time", +] + +[[package]] +name = "xml-rs" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af310deaae937e48a26602b730250b4949e125f468f11e6990be3e5304ddd96f" + +[[package]] +name = "xmltree" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7d8a75eaf6557bb84a65ace8609883db44a29951042ada9b393151532e41fcb" +dependencies = [ + "xml-rs", +] + [[package]] name = "xshell" version = "0.2.6" @@ -9971,12 +11760,52 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d422e8e38ec76e2f06ee439ccc765e9c6a9638b9e7c9f2e8255e4d41e8bd852" +[[package]] +name = "yamux" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed0164ae619f2dc144909a9f082187ebb5893693d8c0196e8085283ccd4b776" +dependencies = [ + "futures", + "log", + "nohash-hasher", + "parking_lot 0.12.3", + "pin-project", + "rand", + "static_assertions", +] + +[[package]] +name = "yamux" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17610762a1207ee816c6fadc29220904753648aba0a9ed61c7b8336e80a559c4" +dependencies = [ + "futures", + "log", + "nohash-hasher", + "parking_lot 0.12.3", + "pin-project", + "rand", + "static_assertions", + "web-time", +] + [[package]] name = "yansi" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" +[[package]] +name = "yasna" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" +dependencies = [ + "time", +] + [[package]] name = "zerocopy" version = "0.7.35" @@ -9995,7 +11824,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] @@ -10015,7 +11844,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.89", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 092eaae22..3227bd8ea 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,6 +11,8 @@ members = [ "crates/client/devnet", "crates/client/mempool", "crates/client/block_import", + "crates/client/p2p", + "crates/client/p2p_stream", "crates/node", "crates/primitives/block", "crates/primitives/convert", @@ -39,6 +41,8 @@ default-members = [ "crates/client/mempool", "crates/client/block_import", "crates/client/analytics", + "crates/client/p2p", + "crates/client/p2p_stream", "crates/node", "crates/primitives/block", "crates/primitives/convert", @@ -117,12 +121,14 @@ mc-rpc = { path = "crates/client/rpc" } mc-gateway = { path = "crates/client/gateway" } mc-sync = { path = "crates/client/sync" } mc-eth = { path = "crates/client/eth" } +mc-p2p = { path = "crates/client/p2p" } mc-mempool = { path = "crates/client/mempool" } mc-block-import = { path = "crates/client/block_import" } mc-devnet = { path = "crates/client/devnet" } # Madara misc m-cairo-test-contracts = { path = "crates/cairo-test-contracts" } +p2p_stream = { path = "crates/client/p2p_stream" } # Starknet dependencies cairo-vm = "=1.0.1" @@ -209,6 +215,28 @@ itertools = "0.13.0" regex = "1.10.5" bytes = "1.6.0" crypto-bigint = "0.5.5" +libp2p-plaintext = "0.42.0" +libp2p-swarm-test = "0.4.0" +libp2p = { version = "0.54.1", features = [ + "tokio", + # "quic", + "tcp", + "tls", + "noise", + "yamux", + "ping", + "kad", + "gossipsub", + "autonat", + "dcutr", + "relay", + "kad", + "identify", + "macros", +] } +multiaddr = "0.18" +tokio-util = "0.7.12" +futures-bounded = "0.2.1" # Instrumentation opentelemetry = { version = "0.25.0", features = ["metrics", "logs"] } @@ -229,6 +257,10 @@ tracing-subscriber = { version = "0.3.18", features = [ "std", ] } tracing-opentelemetry = "0.26.0" +prost = "0.13.3" +prost-build = "0.13.3" +unsigned-varint = { version = "0.8.0", features = ["futures"] } +tokio-stream = "0.1" [patch.crates-io] starknet-core = { git = "https://github.com/kasarlabs/starknet-rs.git", branch = "fork" } diff --git a/crates/client/db/Cargo.toml b/crates/client/db/Cargo.toml index 0440e2103..82281462b 100644 --- a/crates/client/db/Cargo.toml +++ b/crates/client/db/Cargo.toml @@ -35,6 +35,7 @@ starknet_api = { workspace = true } # Other anyhow.workspace = true bincode = { workspace = true } +futures = { workspace = true } log = { workspace = true, default-features = true } rayon = { workspace = true } rocksdb.workspace = true @@ -69,7 +70,7 @@ tracing-subscriber = { workspace = true, features = ["env-filter"] } tempfile = "3.10" lazy_static = { workspace = true } mp-transactions = { workspace = true } - +rstest = { workspace = true } [features] default = [] diff --git a/crates/client/db/src/lib.rs b/crates/client/db/src/lib.rs index 1a592ddf7..b9cb81df9 100644 --- a/crates/client/db/src/lib.rs +++ b/crates/client/db/src/lib.rs @@ -8,10 +8,8 @@ use db_metrics::DbMetrics; use mp_chain_config::ChainConfig; use mp_utils::service::Service; use rocksdb::backup::{BackupEngine, BackupEngineOptions}; -use rocksdb::{ - BoundColumnFamily, ColumnFamilyDescriptor, DBCompressionType, DBWithThreadMode, Env, FlushOptions, MultiThreaded, - Options, SliceTransform, -}; +use rocksdb::{BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, Env, FlushOptions, MultiThreaded}; +use rocksdb_options::rocksdb_global_options; use starknet_types_core::hash::{Pedersen, Poseidon, StarkHash}; use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex}; @@ -28,7 +26,9 @@ pub mod db_metrics; pub mod devnet_db; mod error; pub mod l1_db; +mod rocksdb_options; pub mod storage_updates; +pub mod stream; pub mod tests; pub use error::{MadaraStorageError, TrieType}; @@ -38,39 +38,8 @@ pub type WriteBatchWithTransaction = rocksdb::WriteBatchWithTransaction; const DB_UPDATES_BATCH_SIZE: usize = 1024; -#[allow(clippy::identity_op)] // allow 1 * MiB -#[allow(non_upper_case_globals)] // allow KiB/MiB/GiB names -pub fn open_rocksdb(path: &Path, create: bool) -> Result> { - const KiB: usize = 1024; - const MiB: usize = 1024 * KiB; - const GiB: usize = 1024 * MiB; - - let mut opts = Options::default(); - opts.set_report_bg_io_stats(true); - opts.set_use_fsync(false); - opts.create_if_missing(create); - opts.create_missing_column_families(true); - opts.set_keep_log_file_num(1); - opts.optimize_level_style_compaction(4 * GiB); - opts.set_compression_type(DBCompressionType::Zstd); - let cores = std::thread::available_parallelism().map(|e| e.get() as i32).unwrap_or(1); - opts.increase_parallelism(cores); - - opts.set_atomic_flush(true); - opts.set_manual_wal_flush(true); - opts.set_max_subcompactions(cores as _); - - opts.set_max_log_file_size(1 * MiB); - opts.set_max_open_files(512); // 512 is the value used by substrate for reference - opts.set_keep_log_file_num(3); - opts.set_log_level(rocksdb::LogLevel::Warn); - - let mut env = Env::new().context("Creating rocksdb env")?; - // env.set_high_priority_background_threads(cores); // flushes - env.set_low_priority_background_threads(cores); // compaction - - opts.set_env(&env); - +pub fn open_rocksdb(path: &Path) -> Result> { + let opts = rocksdb_global_options()?; tracing::debug!("opening db at {:?}", path.display()); let db = DB::open_cf_descriptors( &opts, @@ -265,31 +234,6 @@ impl Column { Devnet => "devnet", } } - - /// Per column rocksdb options, like memory budget, compaction profiles, block sizes for hdd/sdd - /// etc. TODO: add basic sensible defaults - pub(crate) fn rocksdb_options(&self) -> Options { - let mut opts = Options::default(); - match self { - Column::ContractStorage => { - opts.set_prefix_extractor(SliceTransform::create_fixed_prefix( - contract_db::CONTRACT_STORAGE_PREFIX_EXTRACTOR, - )); - } - Column::ContractToClassHashes => { - opts.set_prefix_extractor(SliceTransform::create_fixed_prefix( - contract_db::CONTRACT_CLASS_HASH_PREFIX_EXTRACTOR, - )); - } - Column::ContractToNonces => { - opts.set_prefix_extractor(SliceTransform::create_fixed_prefix( - contract_db::CONTRACT_NONCES_PREFIX_EXTRACTOR, - )); - } - _ => {} - } - opts - } } pub trait DatabaseExt { @@ -315,7 +259,7 @@ pub struct MadaraBackend { chain_config: Arc, db_metrics: DbMetrics, sender_block_info: tokio::sync::broadcast::Sender, - #[cfg(feature = "testing")] + #[cfg(any(test, feature = "testing"))] _temp_dir: Option, } @@ -381,12 +325,12 @@ impl MadaraBackend { &self.chain_config } - #[cfg(feature = "testing")] + #[cfg(any(test, feature = "testing"))] pub fn open_for_testing(chain_config: Arc) -> Arc { let temp_dir = tempfile::TempDir::with_prefix("madara-test").unwrap(); Arc::new(Self { backup_handle: None, - db: open_rocksdb(temp_dir.as_ref(), true).unwrap(), + db: open_rocksdb(temp_dir.as_ref()).unwrap(), last_flush_time: Default::default(), chain_config, db_metrics: DbMetrics::register().unwrap(), @@ -425,7 +369,7 @@ impl MadaraBackend { None }; - let db = open_rocksdb(&db_path, true)?; + let db = open_rocksdb(&db_path)?; let backend = Arc::new(Self { db_metrics: DbMetrics::register().context("Registering db metrics")?, @@ -434,7 +378,7 @@ impl MadaraBackend { last_flush_time: Default::default(), chain_config: Arc::clone(&chain_config), sender_block_info: tokio::sync::broadcast::channel(100).0, - #[cfg(feature = "testing")] + #[cfg(any(test, feature = "testing"))] _temp_dir: None, }); backend.check_configuration()?; diff --git a/crates/client/db/src/rocksdb_options.rs b/crates/client/db/src/rocksdb_options.rs new file mode 100644 index 000000000..37aebb516 --- /dev/null +++ b/crates/client/db/src/rocksdb_options.rs @@ -0,0 +1,73 @@ +#![allow(clippy::identity_op)] // allow 1 * MiB +#![allow(non_upper_case_globals)] // allow KiB/MiB/GiB names + +use crate::{contract_db, Column}; +use anyhow::{Context, Result}; +use rocksdb::{DBCompressionType, Env, Options, SliceTransform}; + +const KiB: usize = 1024; +const MiB: usize = 1024 * KiB; +const GiB: usize = 1024 * MiB; + +pub fn rocksdb_global_options() -> Result { + let mut options = Options::default(); + options.create_if_missing(true); + options.create_missing_column_families(true); + let cores = std::thread::available_parallelism().map(|e| e.get() as i32).unwrap_or(1); + options.increase_parallelism(cores); + options.set_max_background_jobs(cores); + + options.set_atomic_flush(true); + options.set_max_subcompactions(cores as _); + + options.set_max_log_file_size(10 * MiB); + options.set_max_open_files(2048); + options.set_keep_log_file_num(3); + options.set_log_level(rocksdb::LogLevel::Warn); + + let mut env = Env::new().context("Creating rocksdb env")?; + // env.set_high_priority_background_threads(cores); // flushes + env.set_low_priority_background_threads(cores); // compaction + + options.set_env(&env); + + Ok(options) +} + +impl Column { + /// Per column rocksdb options, like memory budget, compaction profiles, block sizes for hdd/sdd + /// etc. + pub(crate) fn rocksdb_options(&self) -> Options { + let mut options = Options::default(); + + match self { + Column::ContractStorage => { + options.set_prefix_extractor(SliceTransform::create_fixed_prefix( + contract_db::CONTRACT_STORAGE_PREFIX_EXTRACTOR, + )); + } + Column::ContractToClassHashes => { + options.set_prefix_extractor(SliceTransform::create_fixed_prefix( + contract_db::CONTRACT_CLASS_HASH_PREFIX_EXTRACTOR, + )); + } + Column::ContractToNonces => { + options.set_prefix_extractor(SliceTransform::create_fixed_prefix( + contract_db::CONTRACT_NONCES_PREFIX_EXTRACTOR, + )); + } + _ => {} + } + + options.set_compression_type(DBCompressionType::Zstd); + match self { + Column::BlockNToBlockInfo | Column::BlockNToBlockInner => { + options.optimize_universal_style_compaction(1 * GiB); + } + _ => { + options.optimize_universal_style_compaction(100 * MiB); + } + } + options + } +} diff --git a/crates/client/db/src/stream.rs b/crates/client/db/src/stream.rs new file mode 100644 index 000000000..67763605b --- /dev/null +++ b/crates/client/db/src/stream.rs @@ -0,0 +1,472 @@ +use std::{num::NonZeroU64, sync::Arc}; + +use futures::{stream, Stream}; +use mp_block::MadaraBlockInfo; +use tokio::sync::broadcast::{error::RecvError, Receiver}; + +use crate::{db_block_id::DbBlockId, MadaraBackend, MadaraStorageError}; + +#[derive(Default, Debug, Clone, Copy, Eq, PartialEq)] +pub enum Direction { + #[default] + Forward, + Backward, +} +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct BlockStreamConfig { + pub direction: Direction, + /// Block number from which to start (inclusive). + /// In the case of reverse iteration, if the block does not exist yet, iteration will start from the latest block in db. + pub start: u64, + pub step: NonZeroU64, + pub limit: Option, +} + +impl Default for BlockStreamConfig { + fn default() -> Self { + Self { direction: Direction::Forward, start: 0, step: NonZeroU64::MIN, limit: None } + } +} + +impl MadaraBackend { + pub fn block_info_stream( + self: &Arc, + iteration: BlockStreamConfig, + ) -> impl Stream> { + // So, this is a somewhat funny problem: by the time we return the blocks until the current latest_block in db, + // the database may actually have new blocks now! + // Remember that we're returning a stream here, which means that the time between polls varies with the caller - and, + // in the use cases we're interested in (websocket/p2p) the time between polls varies depending on the speed of the + // connection with the client/peer that's calling the endpoint. + // So! it may very well be the case that once we caught up with the latest block_n in db as we saw at the beginning + // of the call, and once we sent all the blocks that have been added within the time we sent all of those, we might + // still not have caught up with the latest block in db - because new blocks could have come by then. + // This implementation solves this problem by checking the latest block in db in a loop and only once it really looks + // like we caught up with the db, we subscribe to the new blocks channel. But hold on a minute, this subscribe is + // done after getting the latest block number! There's a split second where it could have been possible to miss a + // block. Because of this rare case, there are two supplementary things to note: we *also* get the latest block_n + // *after* subscribing, so that we can check that we did not miss anything during subscription - and just in case, + // we also handle the case when the subscription returns a block that's futher into the future than the one we + // would expect. + // All in all, this implementation tries its *very best* not to subscribe to the channel when it does not have to. + // In addition, because rust does not have `yield` syntax (yet? I'm losing hope..) - this is implemented as a + // funky looking state machine. yay! + + // TODO: use db iterators to fill a VecDeque buffer (we don't want to hold a db iterator across an await point!) + // TODO: what should we do about reorgs?! i would assume we go back and rereturn the new blocks..? + + struct State { + iteration: BlockStreamConfig, + backend: Arc, + /// `None` here means we reached the end of iteration. + next_to_return: Option, + num_blocks_returned: u64, + /// This is `+ 1` because we want to handle returning genesis. If the chain is empty (does not even have a genesis + /// block), this field will be 0. + latest_plus_one: Option, + subscription: Option>, + } + + impl State { + /// Get the `latest_plus_one` variable in `self`, populating it if it is empty. + fn get_latest_plus_one(&mut self) -> Result { + let latest_plus_one = match self.latest_plus_one { + Some(n) => n, + None => { + self.backend.get_latest_block_n()?.map(|n| n.saturating_add(1)).unwrap_or(/* genesis */ 0) + } + }; + self.latest_plus_one = Some(latest_plus_one); + Ok(latest_plus_one) + } + + async fn next_forward(&mut self) -> Result, MadaraStorageError> { + 'retry: loop { + let Some(next_to_return) = self.next_to_return else { return Ok(None) }; + + // If we have a subscription, return blocks from it. + if let Some(subscription) = &mut self.subscription { + match subscription.recv().await { + // return this block + Ok(info) if info.header.block_number == next_to_return => { + self.next_to_return = next_to_return.checked_add(self.iteration.step.get()); + return Ok(Some(info)); + } + // skip this block + Ok(info) if info.header.block_number < next_to_return => continue 'retry, + // the channel returned a block number that we didn't expect. Treat that as if it lagged..? + Ok(_info) => self.subscription = None, + // If it lagged (buffer full), continue using db and we'll eventually resubscribe again once caught up :) + Err(RecvError::Lagged(_n_skipped_messages)) => self.subscription = None, + Err(RecvError::Closed) => return Ok(None), + } + } + + // Or else, return blocks from the db. + + if self.latest_plus_one.is_some_and(|latest_plus_one| latest_plus_one <= next_to_return) { + // new blocks may have arrived, get latest_block_n again + self.latest_plus_one = None + } + + let latest_plus_one = self.get_latest_plus_one()?; + + if latest_plus_one <= next_to_return { + // caught up with the db :) + self.subscription = Some(self.backend.subscribe_block_info()); + // get latest_block_n again after subscribing, because it could have changed during subscribing + self.latest_plus_one = None; + self.get_latest_plus_one()?; + continue 'retry; + } + + let block_info = &self.backend.get_block_info(&DbBlockId::Number(next_to_return))?.ok_or( + MadaraStorageError::InconsistentStorage("latest_block_n points to a non existent block".into()), + )?; + let block_info = block_info + .as_nonpending() + .ok_or(MadaraStorageError::InconsistentStorage("Closed block should not be pending".into()))?; + + self.next_to_return = next_to_return.checked_add(self.iteration.step.get()); + return Ok(Some(block_info.clone())); + } + } + + // Implement backward mode in another function. + async fn next_backward(&mut self) -> Result, MadaraStorageError> { + // This makes sure we're starting from a block that actually exists. It bounds the `next_to_return` variable. + if self.latest_plus_one.is_none() { + let Some(next_to_return) = self.next_to_return else { return Ok(None) }; + let latest_block = self.get_latest_plus_one()?.checked_sub(1); + // If there are no blocks in db, this will set `next_to_return` to None. + self.next_to_return = latest_block.map(|latest_block| u64::min(latest_block, next_to_return)) + } + + let Some(next_to_return) = self.next_to_return else { return Ok(None) }; + + let block_info = &self.backend.get_block_info(&DbBlockId::Number(next_to_return))?.ok_or( + MadaraStorageError::InconsistentStorage("latest_block_n points to a non existent block".into()), + )?; + let block_info = block_info + .as_nonpending() + .ok_or(MadaraStorageError::InconsistentStorage("Closed block should not be pending".into()))?; + + // The None here will stop the iteration once we passed genesis. + self.next_to_return = next_to_return.checked_sub(self.iteration.step.get()); + Ok(Some(block_info.clone())) + } + + async fn next(&mut self) -> Result, MadaraStorageError> { + if self.iteration.limit.is_some_and(|limit| self.num_blocks_returned >= limit) { + return Ok(None); + } + + let ret = match self.iteration.direction { + Direction::Forward => self.next_forward().await?, + Direction::Backward => self.next_backward().await?, + }; + + if ret.is_some() { + self.num_blocks_returned = self.num_blocks_returned.saturating_add(1); + } + + Ok(ret) + } + } + + stream::unfold( + State { + next_to_return: Some(iteration.start), + iteration, + num_blocks_returned: 0, + latest_plus_one: None, + backend: Arc::clone(self), + subscription: None, + }, + |mut s| async { s.next().await.transpose().map(|el| (el, s)) }, + ) + } +} + +#[cfg(test)] +mod tests { + //! To test: + //! - [x] Simple iteration, everything in db. + //! - [x] Simple iteration, db is empty. + //! - [x] Simple iteration, everything in db. Start from a specific block. + //! - [x] Simple iteration, everything in db. Start from a block that doesnt exist yet. + //! - [x] More complex cases where blocks are added during iteration. + //! - [x] Reverse iteration. + //! - [x] Reverse iteration, db is empty. + //! - [x] Reverse iteration: start from a specific block. + //! - [x] Reverse: Start from a block that doesnt exist yet. + //! - [x] Step iteration, forward. + //! - [x] Step iteration, backward. + //! - [x] Limit field. + //! - [x] Limit field wait on channel. + //! - [x] Limit field reverse iteration. + + use super::*; + use mp_block::{Header, MadaraMaybePendingBlock, MadaraMaybePendingBlockInfo}; + use mp_chain_config::ChainConfig; + use starknet_core::types::Felt; + use std::time::Duration; + use stream::StreamExt; + use tokio::{pin, time::timeout}; + + fn block_info(block_number: u64) -> MadaraBlockInfo { + MadaraBlockInfo { + header: Header { block_number, ..Default::default() }, + block_hash: Felt::from(block_number), + tx_hashes: Default::default(), + } + } + + fn store_block(backend: &MadaraBackend, block_number: u64) { + backend + .store_block( + MadaraMaybePendingBlock { + inner: Default::default(), + info: MadaraMaybePendingBlockInfo::NotPending(block_info(block_number)), + }, + Default::default(), + Default::default(), + ) + .unwrap(); + } + + #[rstest::fixture] + fn empty_chain() -> Arc { + MadaraBackend::open_for_testing(ChainConfig::madara_test().into()) + } + + #[rstest::fixture] + fn test_chain() -> Arc { + let backend = MadaraBackend::open_for_testing(ChainConfig::madara_test().into()); + for block_number in 0..5 { + store_block(&backend, block_number) + } + backend + } + + #[rstest::rstest] + #[tokio::test] + async fn test_simple(test_chain: Arc) { + let stream = test_chain.block_info_stream(BlockStreamConfig::default()); + pin!(stream); + + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(0))); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(1))); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(2))); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(3))); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(4))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + + store_block(&test_chain, 5); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(5))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_empty_chain(empty_chain: Arc) { + let stream = empty_chain.block_info_stream(BlockStreamConfig::default()); + pin!(stream); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + + store_block(&empty_chain, 0); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(0))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_start_from_block(test_chain: Arc) { + let stream = test_chain.block_info_stream(BlockStreamConfig { start: 3, ..Default::default() }); + pin!(stream); + + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(3))); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(4))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + + store_block(&test_chain, 5); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(5))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_start_from_not_yet_created(empty_chain: Arc) { + let stream = empty_chain.block_info_stream(BlockStreamConfig { start: 3, ..Default::default() }); + pin!(stream); + + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + store_block(&empty_chain, 0); + store_block(&empty_chain, 1); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + store_block(&empty_chain, 2); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + store_block(&empty_chain, 3); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(3))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + store_block(&empty_chain, 4); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(4))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_concurrent(empty_chain: Arc) { + let stream = empty_chain.block_info_stream(BlockStreamConfig::default()); + pin!(stream); + + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + store_block(&empty_chain, 0); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(0))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + store_block(&empty_chain, 1); + store_block(&empty_chain, 2); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(1))); + store_block(&empty_chain, 3); + store_block(&empty_chain, 4); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(2))); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(3))); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(4))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + store_block(&empty_chain, 5); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(5))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_backward(test_chain: Arc) { + let stream = test_chain.block_info_stream(BlockStreamConfig { + direction: Direction::Backward, + start: 3, + ..Default::default() + }); + pin!(stream); + + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(3))); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(2))); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(1))); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(0))); + assert_eq!(stream.next().await.transpose().unwrap(), None); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_backward_empty(empty_chain: Arc) { + let stream = empty_chain.block_info_stream(BlockStreamConfig { + direction: Direction::Backward, + start: 0, + ..Default::default() + }); + pin!(stream); + + assert_eq!(stream.next().await.transpose().unwrap(), None); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_backward_start_from_not_yet_created(test_chain: Arc) { + let stream = test_chain.block_info_stream(BlockStreamConfig { + direction: Direction::Backward, + start: 10, + ..Default::default() + }); + pin!(stream); + + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(4))); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(3))); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(2))); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(1))); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(0))); + assert_eq!(stream.next().await.transpose().unwrap(), None); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_step(test_chain: Arc) { + let stream = + test_chain.block_info_stream(BlockStreamConfig { step: 2.try_into().unwrap(), ..Default::default() }); + pin!(stream); + + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(0))); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(2))); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(4))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + + store_block(&test_chain, 5); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + + store_block(&test_chain, 6); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(6))); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_step_backward(test_chain: Arc) { + let stream = test_chain.block_info_stream(BlockStreamConfig { + direction: Direction::Backward, + step: 2.try_into().unwrap(), + start: 4, + ..Default::default() + }); + pin!(stream); + + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(4))); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(2))); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(0))); + assert_eq!(stream.next().await.transpose().unwrap(), None); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_limit(test_chain: Arc) { + let stream = test_chain.block_info_stream(BlockStreamConfig { limit: Some(3), ..Default::default() }); + pin!(stream); + + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(0))); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(1))); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(2))); + assert_eq!(stream.next().await.transpose().unwrap(), None); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_limit2(test_chain: Arc) { + let stream = test_chain.block_info_stream(BlockStreamConfig { limit: Some(3), start: 4, ..Default::default() }); + pin!(stream); + + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(4))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + store_block(&test_chain, 5); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(5))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + store_block(&test_chain, 6); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(6))); + assert_eq!(stream.next().await.transpose().unwrap(), None); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_limit_backward(test_chain: Arc) { + let stream = test_chain.block_info_stream(BlockStreamConfig { + direction: Direction::Backward, + limit: Some(3), + start: 5, + ..Default::default() + }); + pin!(stream); + + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(4))); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(3))); + assert_eq!(stream.next().await.transpose().unwrap(), Some(block_info(2))); + assert_eq!(stream.next().await.transpose().unwrap(), None); + } +} diff --git a/crates/client/db/src/tests/common/mod.rs b/crates/client/db/src/tests/common/mod.rs index 26e76eef1..d81d38a15 100644 --- a/crates/client/db/src/tests/common/mod.rs +++ b/crates/client/db/src/tests/common/mod.rs @@ -13,7 +13,7 @@ use mp_transactions::{ use starknet_api::felt; use starknet_types_core::felt::Felt; -#[cfg(feature = "testing")] +#[cfg(any(test, feature = "testing"))] pub mod temp_db { use crate::DatabaseService; use mp_chain_config::ChainConfig; diff --git a/crates/client/p2p/Cargo.toml b/crates/client/p2p/Cargo.toml new file mode 100644 index 000000000..3de534bdb --- /dev/null +++ b/crates/client/p2p/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "mc-p2p" +authors.workspace = true +homepage.workspace = true +edition.workspace = true +repository.workspace = true +version.workspace = true +license.workspace = true + +[dependencies] +anyhow.workspace = true +async-trait.workspace = true +futures.workspace = true +libp2p.workspace = true +prost.workspace = true +thiserror.workspace = true +tokio-stream.workspace = true +tokio-util.workspace = true +tokio.workspace = true +tracing.workspace = true +unsigned-varint.workspace = true + +mc-db.workspace = true +mc-rpc.workspace = true +mp-block.workspace = true +mp-chain-config.workspace = true +mp-convert.workspace = true +mp-utils.workspace = true +p2p_stream.workspace = true + +starknet-core.workspace = true +starknet-types-core.workspace = true + +[lints] +workspace = true + +[build-dependencies] +prost-build.workspace = true diff --git a/crates/client/p2p/build.rs b/crates/client/p2p/build.rs new file mode 100644 index 000000000..ced7e620c --- /dev/null +++ b/crates/client/p2p/build.rs @@ -0,0 +1,6 @@ +use std::io::Result; +fn main() -> Result<()> { + let files: Vec<_> = std::fs::read_dir("proto")?.map(|entry| entry.map(|e| e.path())).collect::>()?; + prost_build::compile_protos(&files, &["proto/"])?; + Ok(()) +} diff --git a/crates/client/p2p/proto/capabilities.proto b/crates/client/p2p/proto/capabilities.proto new file mode 100644 index 000000000..422553134 --- /dev/null +++ b/crates/client/p2p/proto/capabilities.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; +import "common.proto"; + +// A capability for one of the following protocols: +// 1. /starknet/headers/ +// 2. /starknet/state_diffs/ +// 3. /starknet/classes/ +// 4. /starknet/transactions/ +// 5. /starknet/events/ +// The capability defines which blocks does the node store +message SyncCapability { + message ArchiveStrategy {} // Keep all data from genesis for the given protocol. + message L1PruneStrategy {} // Keep all data not accepted on L1 for the given protocol. + message ConstSizePruneStrategy { // Keep data for the top n blocks of the chain for the given protocol. + uint64 num_blocks = 1; + } + message StaticPruneStrategy { // Keep all data from a hardcoded block for the given protocol. + uint64 first_block_number = 1; + } + + oneof prune_strategy { + ArchiveStrategy archive_strategy = 1; + L1PruneStrategy l1_prune_strategy = 2; + ConstSizePruneStrategy const_size_prune_strategy = 3; + StaticPruneStrategy static_prune_strategy = 4; + } +} diff --git a/crates/client/p2p/proto/class.proto b/crates/client/p2p/proto/class.proto new file mode 100644 index 000000000..9aa087f83 --- /dev/null +++ b/crates/client/p2p/proto/class.proto @@ -0,0 +1,55 @@ +syntax = "proto3"; +import "common.proto"; + +message EntryPoint { + Felt252 selector = 1; + uint64 offset = 2; +} + +message Cairo0Class { + string abi = 1; + repeated EntryPoint externals = 2; + repeated EntryPoint l1_handlers = 3; + repeated EntryPoint constructors = 4; + // Compressed in base64 representation. + string program = 5; +} + +message SierraEntryPoint { + uint64 index = 1; + Felt252 selector = 2; +} + +message Cairo1EntryPoints { + repeated SierraEntryPoint externals = 1; + repeated SierraEntryPoint l1_handlers = 2; + repeated SierraEntryPoint constructors = 3; +} + +message Cairo1Class { + string abi = 1; + Cairo1EntryPoints entry_points = 2; + repeated Felt252 program = 3; + string contract_class_version = 4; +} + +message Class { + oneof class { + Cairo0Class cairo0 = 1; + Cairo1Class cairo1 = 2; + } + uint32 domain = 3; + Hash class_hash = 4; +} + +message ClassesRequest { + Iteration iteration = 1; +} + +// Responses are sent ordered by the order given in the request. +message ClassesResponse { + oneof class_message { + Class class = 1; + Fin fin = 2; // Fin is sent after the peer sent all the data or when it encountered a block that it doesn't have its classes. + } +} diff --git a/crates/client/p2p/proto/common.proto b/crates/client/p2p/proto/common.proto new file mode 100644 index 000000000..1ab04a1d8 --- /dev/null +++ b/crates/client/p2p/proto/common.proto @@ -0,0 +1,83 @@ +syntax = "proto3"; + +message Felt252 { + bytes elements = 1; +} + +// A hash value representable as a Felt252 +message Hash { + bytes elements = 1; +} + +// A 256 bit hash value (like Keccak256) +message Hash256 { + // Required to be 32 bytes long + bytes elements = 1; +} + +message Hashes { + repeated Hash items = 1; +} + +message Address { + bytes elements = 1; +} + +message PeerID { + bytes id = 1; +} + +message Uint128 { + uint64 low = 1; + uint64 high = 2; +} + +message ConsensusSignature { + Felt252 r = 1; + Felt252 s = 2; +} + +message Patricia { + uint64 n_leaves = 1; // needed to know the height, so as to how many nodes to expect in a proof. + // and also when receiving all leaves, how many to expect + Hash root = 2; +} + +message StateDiffCommitment { + uint64 state_diff_length = 1; + Hash root = 2; +} + +message BlockID { + uint64 number = 1; + Hash header = 2; +} + +enum L1DataAvailabilityMode { + Calldata = 0; + Blob = 1; +} + +enum VolitionDomain { + L1 = 0; + L2 = 1; +} + +message Iteration { + enum Direction { + Forward = 0; + Backward = 1; + } + oneof start { + uint64 block_number = 1; + Hash header = 2; + } + Direction direction = 3; + uint64 limit = 4; + uint64 step = 5; // to allow interleaving from several nodes + // bool interleave = 6; // return results in any order of blocks, per block the messages should still be in the order specified +} + +// mark the end of a stream of messages +// TBD: may not be required if we open a stream per request. +message Fin {} diff --git a/crates/client/p2p/proto/consensus.proto b/crates/client/p2p/proto/consensus.proto new file mode 100644 index 000000000..b3a41c618 --- /dev/null +++ b/crates/client/p2p/proto/consensus.proto @@ -0,0 +1,72 @@ +syntax = "proto3"; + +import "common.proto"; +import "header.proto"; +import "transaction.proto"; + +// WIP - will change + +message Vote { + enum VoteType { + Prevote = 0; + Precommit = 1; + }; + + // We use a type field to distinguish between prevotes and precommits instead of different + // messages, to make sure the data, and therefore the signatures, are unambiguous between + // Prevote and Precommit. + VoteType vote_type = 1; + uint64 block_number = 3; + uint64 fork_id = 4; + uint32 round = 5; + // This is optional since a vote can be NIL. + optional Hash block_hash = 6; + // Identifies the voter. + Address voter = 7; +} + +message ProposalInit { + uint64 block_number = 2; + uint64 fork_id = 3; + uint32 proposal_round = 4; +} + +// Finalize the Tendermint Proposal. When a validator receives this message it will presume that no +// more content for the proposal should be sent. The signature supplied with ProposalFin should be +// for the full Tendermint proposal: +// 1. height +// 2. fork_id +// 3. proposal_round +// 4. valid_round +// 5. block_hash - the validator calculates the block_hash on its own from the content stream and +// confirms the signature with that value. +message ProposalFin { + optional uint32 valid_round = 1; +} + +// The timestamp of a proposal can impact consensus, specifically the lower bound applied. If nodes +// apply a lower bound validation based on their local time, then we risk a scenario where in round +// `R` proposal P is locked. Then in a later round the timestamp in P has gone stale. Therefore the +// lower bound should be "greater than the previous timestamp". Upper bounds don't suffer from this +// problem. +message Proposal { + oneof messages { + ProposalInit init = 1; + ProposalFin fin = 2; + // Once block `H` is decided there remains a question; which set of validators receive a + // reward? More specifically, what is the canonical set of precommits for block `H`? Our + // current plan is for the proposer to set the first transaction in `H+1` to be writing the + // list of precommits for `H` to the staking contract in startknet. + Transactions transactions = 3; + BlockProof proof = 4; + } +} + +message ConsensusMessage { + oneof messages { + Vote vote = 1; + Proposal proposal = 2; + } + // Signature by the initial sender (e.g. proposer, voter) of the message. + ConsensusSignature signature = 3; +} \ No newline at end of file diff --git a/crates/client/p2p/proto/discovery.proto b/crates/client/p2p/proto/discovery.proto new file mode 100644 index 000000000..74cc2cede --- /dev/null +++ b/crates/client/p2p/proto/discovery.proto @@ -0,0 +1,53 @@ +syntax = "proto3"; + +import "common.proto"; +import "google/protobuf/descriptor.proto"; + +extend google.protobuf.MessageOptions { + optional bytes powDifficulty = 1001; +} + +message MultiAddress { + bytes value = 1; +} + +// advertise what queries a peer can reply to (it can always query others for whatever it wants) +message Capability { + string protocol = 1; + bytes capability = 2; // The content of the capability. + // It should be the encoding of a protobuf message without length prefix. + // The type of message is deduced from the protocol. +} + +message Pow { + // ~10 seconds on a single CPU. 2^240 out of 2^256, so 16 bits of difficulty. + option (powDifficulty) = "0x200000000000000000000000000000000000000000000000000000000000"; + + bytes blockHash = 1; // one of the accepted block hashes in the underlying layer (ethereum in starknet). + // accepted is currently the current last or one before it. + bytes salt = 2; // a salt such that keccak(salt||blockHash||id) is below posDifficulty +} + +// send when joining and periodically (period TBD) +message Node +{ + PeerID id = 1; + repeated MultiAddress addresses = 2; + repeated Capability capabilities = 3; + + Pow pow = 4; +} + +// when a node joins it can ask peers for the nodes they know +message NodesRequest { + // this can be used to request for peer information when only its id is known. The number of ids is limited (TBD) + // we might know only of an id when getting a message through a relayer from a new peer. + repeated PeerID ids = 1; +} + +message NodesResponse +{ + // a selection of random nodes the peer knows. Limited (TBD exact number). + repeated Node nodes = 1; +} + diff --git a/crates/client/p2p/proto/event.proto b/crates/client/p2p/proto/event.proto new file mode 100644 index 000000000..44d21678c --- /dev/null +++ b/crates/client/p2p/proto/event.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; +import "common.proto"; + +message Event { + Hash transaction_hash = 1; + Felt252 from_address = 3; + repeated Felt252 keys = 4; + repeated Felt252 data = 5; +} + +message EventsRequest { + Iteration iteration = 1; +} + +// Responses are sent ordered by the order given in the request. +message EventsResponse { + oneof event_message { + Event event = 1; + Fin fin = 2; // Fin is sent after the peer sent all the data or when it encountered a block that it doesn't have its events. + } +} diff --git a/crates/client/p2p/proto/header.proto b/crates/client/p2p/proto/header.proto new file mode 100644 index 000000000..7f4ba1f35 --- /dev/null +++ b/crates/client/p2p/proto/header.proto @@ -0,0 +1,54 @@ +syntax = "proto3"; +import "common.proto"; + +// Note: commitments may change to be for the previous blocks like comet/tendermint +// hash of block header sent to L1 +message SignedBlockHeader { + Hash block_hash = 1; // For the structure of the block hash, see https://docs.starknet.io/documentation/architecture_and_concepts/Network_Architecture/header/#block_hash + Hash parent_hash = 2; + uint64 number = 3; // This can be deduced from context. We can consider removing this field. + uint64 time = 4; // Encoded in Unix time. + Address sequencer_address = 5; + Hash state_root = 6; // Patricia root of contract and class patricia tries. Each of those tries are of height 251. Same as in L1. Later more trees will be included + StateDiffCommitment state_diff_commitment = 7; // The state diff commitment returned by the Starknet Feeder Gateway + // For more info, see https://community.starknet.io/t/introducing-p2p-authentication-and-mismatch-resolution-in-v0-12-2/97993 + // The leaves contain a hash of the transaction hash and transaction signature. + Patricia transactions = 8; // By order of execution. TBD: required? the client can execute (powerful machine) and match state diff + Patricia events = 9; // By order of issuance. TBD: in receipts? + Hash receipts = 10; // By order of issuance. This is a patricia root. No need for length because it's the same length as transactions. + string protocol_version = 11; // Starknet version + Uint128 gas_price_fri = 12; + Uint128 gas_price_wei = 13; + Uint128 data_gas_price_fri = 14; + Uint128 data_gas_price_wei = 15; + L1DataAvailabilityMode l1_data_availability_mode = 16; + // for now, we assume a small consensus, so this fits in 1M. Else, these will be repeated and extracted from this message. + repeated ConsensusSignature signatures = 17; + // can be more explicit here about the signature structure as this is not part of account abstraction +} + +// sent to all peers (except the ones this was received from, if any). +// for a fraction of peers, also send the GetBlockHeaders response (as if they asked for it for this block) +message NewBlock { + oneof maybe_full { + BlockID id = 1; + BlockHeadersResponse header = 2; + } +} + + +message BlockHeadersRequest { + Iteration iteration = 1; +} + +// Responses are sent ordered by the order given in the request. +message BlockHeadersResponse { + oneof header_message { + SignedBlockHeader header = 1; + Fin fin = 2; // Fin is sent after the peer sent all the data or when it encountered a block that it doesn't have its header. + } +} + +message BlockProof { + repeated bytes proof = 1; +} \ No newline at end of file diff --git a/crates/client/p2p/proto/mempool.proto b/crates/client/p2p/proto/mempool.proto new file mode 100644 index 000000000..a172a5e87 --- /dev/null +++ b/crates/client/p2p/proto/mempool.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +import "common.proto"; +import "transaction.proto"; + +// Support also non-validating node that wants to know of the mempool (e.g. to estimate fee in case of first price) +// Result is PooledTransactions+ +message PooledTransactionsRequest +{ + message Known { + oneof known { + Hashes txs = 1; // for mempool of 2000 txs, this will be 64K. Can use Hash32 instead (8K)... + uint64 marker = 2; // since last returned marker. + } + } + optional Known known = 1; +} + +// Can be also a push, similar to NewBlock. So a full node that accepts a new transaction from a wallet +// can propagate it without being pulled +// nodes should track state diffs to know when txs have been included (the contract nonce increases) +message PolledTransactionsResponse { + optional uint64 marker = 1; // optional, if the peer supports that. + bool baseline = 2; // means treat all data as baseline, not diff (may be if 'known' was sent but the mempool was reset/reorged + + oneof responses { + Transactions pending = 3; // if 'known' is given, they will be only txs added after the known + Fin fin = 4; + } +} diff --git a/crates/client/p2p/proto/receipt.proto b/crates/client/p2p/proto/receipt.proto new file mode 100644 index 000000000..378ec7dd7 --- /dev/null +++ b/crates/client/p2p/proto/receipt.proto @@ -0,0 +1,82 @@ +syntax = "proto3"; +import "common.proto"; + +message MessageToL1 { + Felt252 from_address = 2; + repeated Felt252 payload = 3; + EthereumAddress to_address = 4; +} + +enum PriceUnit { + Wei = 0; + Fri = 1; +} + +message EthereumAddress { + bytes elements = 1; +} + +message Receipt { + message ExecutionResources { + message BuiltinCounter { + uint32 bitwise = 1; + uint32 ecdsa = 2; + uint32 ec_op = 3; + uint32 pedersen = 4; + uint32 range_check = 5; + uint32 poseidon = 6; + uint32 keccak = 7; + uint32 output = 8; + uint32 add_mod = 9; + uint32 mul_mod = 10; + uint32 range_check96 = 11; + } + + BuiltinCounter builtins = 1; + uint32 steps = 2; + uint32 memory_holes = 3; + Felt252 l1_gas = 4; + Felt252 l1_data_gas = 5; + Felt252 total_l1_gas = 6; + } + + message Common { + Felt252 actual_fee = 2; + PriceUnit price_unit = 3; + repeated MessageToL1 messages_sent = 4; + ExecutionResources execution_resources = 5; + optional string revert_reason = 6; + } + + + message Invoke { + Common common = 1; + } + + message L1Handler { + Common common = 1; + Hash256 msg_hash = 2; + } + + message Declare { + Common common = 1; + } + + message Deploy { + Common common = 1; + Felt252 contract_address = 2; + } + + message DeployAccount { + Common common = 1; + Felt252 contract_address = 2; + } + + oneof type { + Invoke invoke = 1; + L1Handler l1_handler = 2; + Declare declare = 3; + Deploy deprecated_deploy = 4; + DeployAccount deploy_account = 5; + } +} diff --git a/crates/client/p2p/proto/state.proto b/crates/client/p2p/proto/state.proto new file mode 100644 index 000000000..3349e02f2 --- /dev/null +++ b/crates/client/p2p/proto/state.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; +import "common.proto"; + + +// optimized for flat storage, not through a trie (not sharing key prefixes) +message ContractStoredValue { + Felt252 key = 1; + Felt252 value = 2; +} + +message ContractDiff { + Address address = 1; + optional Felt252 nonce = 2; // Present only if the nonce was updated + optional Hash class_hash = 3; // Present only if the contract was deployed or replaced in this block. + repeated ContractStoredValue values = 4; + VolitionDomain domain = 5; +} + +message DeclaredClass { + Hash class_hash = 1; + optional Hash compiled_class_hash = 2; // Present only if the class is Cairo1 +} + +message StateDiffsRequest { + Iteration iteration = 1; +} + +// Responses are sent ordered by the order given in the request. +message StateDiffsResponse { + // All of the messages related to a block need to be sent before a message from the next block is sent. + oneof state_diff_message { + ContractDiff contract_diff = 1; // Multiple contract diffs for the same contract may appear continuously if the diff is too large or if it's more convenient. + DeclaredClass declared_class = 2; + Fin fin = 3; // Fin is sent after the peer sent all the data or when it encountered a block that it doesn't have its state diff. + } +} diff --git a/crates/client/p2p/proto/transaction.proto b/crates/client/p2p/proto/transaction.proto new file mode 100644 index 000000000..b3d86cbfa --- /dev/null +++ b/crates/client/p2p/proto/transaction.proto @@ -0,0 +1,167 @@ +syntax = "proto3"; +import "common.proto"; +import "receipt.proto"; + +message ResourceLimits { + Felt252 max_amount = 1; + Felt252 max_price_per_unit = 2; +} + +message ResourceBounds { + ResourceLimits l1_gas = 1; + ResourceLimits l2_gas = 2; +} + +message AccountSignature { + repeated Felt252 parts = 1; +} + +// This is a transaction that is already accepted in a block. Once we have a mempool, we will define +// a separate message for BroadcastedTransaction. +message Transaction +{ + message DeclareV0 { + Address sender = 1; + Felt252 max_fee = 2; + AccountSignature signature = 3; + Hash class_hash = 4; + } + + message DeclareV1 { + Address sender = 1; + Felt252 max_fee = 2; + AccountSignature signature = 3; + Hash class_hash = 4; + Felt252 nonce = 5; + } + + message DeclareV2 { + Address sender = 1; + Felt252 max_fee = 2; + AccountSignature signature = 3; + Hash class_hash = 4; + Felt252 nonce = 5; + Hash compiled_class_hash = 6; + } + + // see https://external.integration.starknet.io/feeder_gateway/get_transaction?transactionHash=0x41d1f5206ef58a443e7d3d1ca073171ec25fa75313394318fc83a074a6631c3 + message DeclareV3 { + Address sender = 1; + AccountSignature signature = 2; + Hash class_hash = 3; + Felt252 nonce = 4; + Hash compiled_class_hash = 5; + ResourceBounds resource_bounds = 6; + uint64 tip = 7; + repeated Felt252 paymaster_data = 8; + repeated Felt252 account_deployment_data = 9; + VolitionDomain nonce_data_availability_mode = 10; + VolitionDomain fee_data_availability_mode = 11; + } + + message Deploy { + Hash class_hash = 1; + Felt252 address_salt = 2; + repeated Felt252 calldata = 3; + uint32 version = 4; + } + + message DeployAccountV1 { + Felt252 max_fee = 1; + AccountSignature signature = 2; + Hash class_hash = 3; + Felt252 nonce = 4; + Felt252 address_salt = 5; + repeated Felt252 calldata = 6; + } + + // see https://external.integration.starknet.io/feeder_gateway/get_transaction?transactionHash=0x29fd7881f14380842414cdfdd8d6c0b1f2174f8916edcfeb1ede1eb26ac3ef0 + message DeployAccountV3 { + AccountSignature signature = 1; + Hash class_hash = 2; + Felt252 nonce = 3; + Felt252 address_salt = 4; + repeated Felt252 calldata = 5; + ResourceBounds resource_bounds = 6; + uint64 tip = 7; + repeated Felt252 paymaster_data = 8; + VolitionDomain nonce_data_availability_mode = 9; + VolitionDomain fee_data_availability_mode = 10; + } + + message InvokeV0 { + Felt252 max_fee = 1; + AccountSignature signature = 2; + Address address = 3; + Felt252 entry_point_selector = 4; + repeated Felt252 calldata = 5; + } + + message InvokeV1 { + Address sender = 1; + Felt252 max_fee = 2; + AccountSignature signature = 3; + repeated Felt252 calldata = 4; + Felt252 nonce = 5; + } + + // see https://external.integration.starknet.io/feeder_gateway/get_transaction?transactionHash=0x41906f1c314cca5f43170ea75d3b1904196a10101190d2b12a41cc61cfd17c + message InvokeV3 { + Address sender = 1; + AccountSignature signature = 2; + repeated Felt252 calldata = 3; + ResourceBounds resource_bounds = 4; + uint64 tip = 5; + repeated Felt252 paymaster_data = 6; + repeated Felt252 account_deployment_data = 7; + VolitionDomain nonce_data_availability_mode = 8; + VolitionDomain fee_data_availability_mode = 9; + Felt252 nonce = 10; + } + + message L1HandlerV0 { + Felt252 nonce = 1; + Address address = 2; + Felt252 entry_point_selector = 3; + repeated Felt252 calldata = 4; + } + + oneof txn { + DeclareV0 declare_v0 = 1; + DeclareV1 declare_v1 = 2; + DeclareV2 declare_v2 = 3; + DeclareV3 declare_v3 = 4; + Deploy deploy = 5; + DeployAccountV1 deploy_account_v1 = 6; + DeployAccountV3 deploy_account_v3 = 7; + InvokeV0 invoke_v0 = 8; + InvokeV1 invoke_v1 = 9; + InvokeV3 invoke_v3 = 10; + L1HandlerV0 l1_handler = 11; + } + Hash transaction_hash = 12; +} + +message TransactionWithReceipt { + Transaction transaction = 1; + Receipt receipt = 2; +} + +// TBD: can support a flag to return tx hashes only, good for standalone mempool to remove them, +// or any node that keeps track of transaction streaming in the consensus. +message TransactionsRequest { + Iteration iteration = 1; +} + +// Responses are sent ordered by the order given in the request. The order inside each block is +// according to the execution order. +message TransactionsResponse { + oneof transaction_message { + TransactionWithReceipt transaction_with_receipt = 1; + Fin fin = 2; // Fin is sent after the peer sent all the data or when it encountered a block that it doesn't have its transactions. + } +} + +message Transactions { + repeated Transaction transactions = 1; +} diff --git a/crates/client/p2p/src/behaviour.rs b/crates/client/p2p/src/behaviour.rs new file mode 100644 index 000000000..e4ae0354a --- /dev/null +++ b/crates/client/p2p/src/behaviour.rs @@ -0,0 +1,91 @@ +use libp2p::{ + autonat, dcutr, + gossipsub::{self, MessageAuthenticity}, + identify, + identity::Keypair, + kad::{self, store::MemoryStore}, + ping, + relay::{self}, + swarm::NetworkBehaviour, + StreamProtocol, +}; +use mp_chain_config::ChainConfig; +use std::time::Duration; + +use crate::sync_codec::codecs; + +pub type Event = ::ToSwarm; + +#[derive(NetworkBehaviour)] +pub struct MadaraP2pBehaviour { + /// Ping protocol. + pub ping: ping::Behaviour, + /// Kademlia is used for node discovery only. + pub kad: kad::Behaviour, + /// Identify as starknet node. + pub identify: identify::Behaviour, + + /// Automatically make NAT configuration. + pub autonat: autonat::Behaviour, + /// DCUTR: Direct Connection Upgrade using Relay: this allows nodes behind a NAT to receive incoming connections through a relay node. + pub dcutr: dcutr::Behaviour, + /// If we're behind a NAT, we want to have a relay client to advertise a public address. It'll then be upgraded using DCUTR to a direct connection. + pub relay: relay::client::Behaviour, + + /// Pubsub. + pub gossipsub: gossipsub::Behaviour, + + // Single Req - Multiple Responses Streams + pub headers_sync: p2p_stream::Behaviour, + pub classes_sync: p2p_stream::Behaviour, + pub state_diffs_sync: p2p_stream::Behaviour, + pub transactions_sync: p2p_stream::Behaviour, + pub events_sync: p2p_stream::Behaviour, +} + +impl MadaraP2pBehaviour { + // The return error type can't be anyhow::Error unfortunately because the SwarmBuilder won't let us + pub fn new( + chain_config: &ChainConfig, + identity: &Keypair, + relay_behaviour: libp2p::relay::client::Behaviour, + ) -> Result> { + let pubkey = identity.public(); + let local_peer_id = pubkey.to_peer_id(); + + let p2p_stream_config = p2p_stream::Config::default(); + Ok(Self { + identify: identify::Behaviour::new( + identify::Config::new(identify::PROTOCOL_NAME.to_string(), pubkey) + .with_agent_version(format!("madara/{}", env!("CARGO_PKG_VERSION"))), + ), + ping: Default::default(), + kad: { + let protocol = StreamProtocol::try_from_owned(format!("/starknet/kad/{}/1.0.0", chain_config.chain_id)) + .expect("Invalid kad stream protocol"); + let mut cfg = kad::Config::new(protocol); + const PROVIDER_PUBLICATION_INTERVAL: Duration = Duration::from_secs(600); + cfg.set_record_ttl(Some(Duration::from_secs(0))); + cfg.set_provider_record_ttl(Some(PROVIDER_PUBLICATION_INTERVAL * 3)); + cfg.set_provider_publication_interval(Some(PROVIDER_PUBLICATION_INTERVAL)); + cfg.set_periodic_bootstrap_interval(Some(Duration::from_millis(500))); + cfg.set_query_timeout(Duration::from_secs(5 * 60)); + kad::Behaviour::with_config(local_peer_id, MemoryStore::new(local_peer_id), cfg) + }, + autonat: autonat::Behaviour::new(local_peer_id, autonat::Config::default()), + dcutr: dcutr::Behaviour::new(local_peer_id), + relay: relay_behaviour, + gossipsub: { + let privacy = MessageAuthenticity::Signed(identity.clone()); + gossipsub::Behaviour::new(privacy, gossipsub::Config::default()) + .map_err(|err| anyhow::anyhow!("Error making gossipsub config: {err}"))? + }, + + headers_sync: p2p_stream::Behaviour::with_codec(codecs::headers(), p2p_stream_config), + classes_sync: p2p_stream::Behaviour::with_codec(codecs::classes(), p2p_stream_config), + state_diffs_sync: p2p_stream::Behaviour::with_codec(codecs::state_diffs(), p2p_stream_config), + transactions_sync: p2p_stream::Behaviour::with_codec(codecs::transactions(), p2p_stream_config), + events_sync: p2p_stream::Behaviour::with_codec(codecs::events(), p2p_stream_config), + }) + } +} diff --git a/crates/client/p2p/src/events.rs b/crates/client/p2p/src/events.rs new file mode 100644 index 000000000..6acf077ab --- /dev/null +++ b/crates/client/p2p/src/events.rs @@ -0,0 +1,53 @@ +//! Handle incomming p2p events +use crate::{ + behaviour::{self}, + MadaraP2p, +}; +use libp2p::swarm::SwarmEvent; + +impl MadaraP2p { + pub fn handle_event(&mut self, event: SwarmEvent) -> anyhow::Result<()> { + tracing::info!("event: {event:?}"); + match event { + SwarmEvent::NewListenAddr { address, .. } => { + let listen_address = address.with_p2p(*self.swarm.local_peer_id()).expect("Making multiaddr"); + tracing::info!("📡 Peer-to-peer listening on address {listen_address:?}"); + } + SwarmEvent::Behaviour(behaviour::Event::Identify(libp2p::identify::Event::Received { + peer_id, + info, + connection_id: _, + })) => { + tracing::info!("identify: {info:?}"); + // TODO: we may want to tell the local node about the info.observed_addr - but we probably need to check that address first + // maybe we do want to trust the address if it comes from the relay..? + // https://github.com/libp2p/rust-libp2p/blob/master/protocols/identify/CHANGELOG.md#0430 + // https://github.com/search?q=repo%3Alibp2p%2Frust-libp2p%20add_external_address&type=code + self.swarm.add_external_address(info.observed_addr); // removing this will mean that the node won't switch to kad server mode and will stay client + + // check that we're supposed to be in the same network - we check that they have at least the kademlia protocol for our chain + let local_kad_protocols = self.swarm.behaviour().kad.protocol_names(); + // local_kad_protocols=[/starknet/kad/SN_SEPOLIA/1.0.0] + + if !info.protocols.iter().any(|p| local_kad_protocols.contains(p)) { + // TODO: should we be more restrictive about this? + tracing::debug!( + "Got an Identify response from a peer ({peer_id}) that is not running any of our protocols" + ); + return Ok(()); + } + + // Make kademlia aware of the identity of the peer we connected to. + for addr in info.listen_addrs { + self.swarm.behaviour_mut().kad.add_address(&peer_id, addr); + } + } + + SwarmEvent::Behaviour(behaviour::Event::HeadersSync(event)) => { + self.headers_sync_handler.handle_event(event); + } + _event => {} + } + Ok(()) + } +} diff --git a/crates/client/p2p/src/handlers_impl/error.rs b/crates/client/p2p/src/handlers_impl/error.rs new file mode 100644 index 000000000..8e6ff846d --- /dev/null +++ b/crates/client/p2p/src/handlers_impl/error.rs @@ -0,0 +1,106 @@ +#![allow(unused)] + +use crate::sync_handlers; +use std::fmt; + +#[macro_export] +macro_rules! bail_internal_server_error { + ($msg:literal $(,)?) => { + return ::core::result::Result::Err($crate::sync_handlers::Error::Internal(anyhow::anyhow!($msg))) + }; + ($err:expr $(,)?) => { + return ::core::result::Result::Err($crate::sync_handlers::Error::Internal(anyhow::anyhow!($err))) + }; + ($fmt:expr, $($arg:tt)*) => { + return ::core::result::Result::Err($crate::sync_handlers::Error::Internal(anyhow::anyhow!($err, $($arg)*))) + }; +} + +#[macro_export] +macro_rules! bail_bad_request { + ($msg:literal $(,)?) => { + return ::core::result::Result::Err($crate::sync_handlers::Error::BadRequest(format!($msg))) + }; + ($err:expr $(,)?) => { + return ::core::result::Result::Err($crate::sync_handlers::Error::BadRequest(format!($err))) + }; + ($fmt:expr, $($arg:tt)*) => { + return ::core::result::Result::Err($crate::sync_handlers::Error::BadRequest(format!($err, $($arg)*))) + }; +} + +pub trait ResultExt { + fn or_internal_server_error(self, context: C) -> Result; + fn or_else_internal_server_error C>( + self, + context_fn: F, + ) -> Result; + fn or_bad_request(self, context: C) -> Result; + fn or_else_bad_request C>(self, context_fn: F) -> Result; +} + +impl> ResultExt for Result { + fn or_internal_server_error(self, context: C) -> Result { + self.map_err(|err| sync_handlers::Error::Internal(anyhow::anyhow!("{}: {:#}", context, E::into(err)))) + } + fn or_else_internal_server_error C>( + self, + context_fn: F, + ) -> Result { + self.map_err(|err| sync_handlers::Error::Internal(anyhow::anyhow!("{}: {:#}", context_fn(), E::into(err)))) + } + + fn or_bad_request(self, context: C) -> Result { + self.map_err(|err| sync_handlers::Error::BadRequest(format!("{}: {:#}", context, E::into(err)).into())) + } + fn or_else_bad_request C>(self, context_fn: F) -> Result { + self.map_err(|err| sync_handlers::Error::BadRequest(format!("{}: {:#}", context_fn(), E::into(err)).into())) + } +} + +pub trait OptionExt { + fn ok_or_internal_server_error( + self, + context: C, + ) -> Result; + fn ok_or_else_internal_server_error C>( + self, + context_fn: F, + ) -> Result; + fn ok_or_bad_request( + self, + context: C, + ) -> Result; + fn ok_or_else_bad_request C>( + self, + context_fn: F, + ) -> Result; +} + +impl OptionExt for Option { + fn ok_or_internal_server_error( + self, + context: C, + ) -> Result { + self.ok_or_else(|| sync_handlers::Error::Internal(anyhow::anyhow!("{}", context))) + } + fn ok_or_else_internal_server_error C>( + self, + context_fn: F, + ) -> Result { + self.ok_or_else(|| sync_handlers::Error::Internal(anyhow::anyhow!("{}", context_fn()))) + } + + fn ok_or_bad_request( + self, + context: C, + ) -> Result { + self.ok_or_else(|| sync_handlers::Error::BadRequest(format!("{}", context).into())) + } + fn ok_or_else_bad_request C>( + self, + context_fn: F, + ) -> Result { + self.ok_or_else(|| sync_handlers::Error::BadRequest(format!("{}", context_fn()).into())) + } +} diff --git a/crates/client/p2p/src/handlers_impl/headers.rs b/crates/client/p2p/src/handlers_impl/headers.rs new file mode 100644 index 000000000..545eb9e1a --- /dev/null +++ b/crates/client/p2p/src/handlers_impl/headers.rs @@ -0,0 +1,81 @@ +use super::{block_stream_config, error::ResultExt}; +use crate::{ + model::{self}, + sync_handlers::{self, ReqContext}, + MadaraP2pContext, +}; +use futures::{channel::mpsc::Sender, stream, SinkExt, StreamExt}; +use mp_block::{header::L1DataAvailabilityMode, MadaraBlockInfo}; +use starknet_core::types::Felt; +use tokio::pin; + +impl From for model::BlockHeadersResponse { + fn from(val: MadaraBlockInfo) -> Self { + model::BlockHeadersResponse { + header_message: Some(model::block_headers_response::HeaderMessage::Header(model::SignedBlockHeader { + block_hash: Some(val.block_hash.into()), + parent_hash: Some(val.header.parent_block_hash.into()), + number: val.header.block_number, + time: val.header.block_timestamp, + sequencer_address: Some(val.header.sequencer_address.into()), + state_root: Some(val.header.global_state_root.into()), + state_diff_commitment: val.header.state_diff_commitment.zip(val.header.state_diff_length).map( + |(commitment, state_diff_length)| model::StateDiffCommitment { + state_diff_length, + root: Some(commitment.into()), + }, + ), + transactions: Some(model::Patricia { + n_leaves: val.header.transaction_count, + root: Some(val.header.transaction_commitment.into()), + }), + events: Some(model::Patricia { + n_leaves: val.header.event_count, + root: Some(val.header.event_commitment.into()), + }), + receipts: val.header.receipt_commitment.map(Into::into), + protocol_version: val.header.protocol_version.to_string(), + gas_price_fri: Some(val.header.l1_gas_price.strk_l1_gas_price.into()), + gas_price_wei: Some(val.header.l1_gas_price.eth_l1_gas_price.into()), + data_gas_price_fri: Some(val.header.l1_gas_price.strk_l1_data_gas_price.into()), + data_gas_price_wei: Some(val.header.l1_gas_price.eth_l1_data_gas_price.into()), + l1_data_availability_mode: match val.header.l1_da_mode { + L1DataAvailabilityMode::Calldata => model::L1DataAvailabilityMode::Calldata, + L1DataAvailabilityMode::Blob => model::L1DataAvailabilityMode::Blob, + } + .into(), + signatures: vec![model::ConsensusSignature { r: Some(Felt::ONE.into()), s: Some(Felt::ONE.into()) }], + })), + } + } +} + +pub async fn headers_sync( + ctx: ReqContext, + req: model::BlockHeadersRequest, + mut out: Sender, +) -> Result<(), sync_handlers::Error> { + let stream = ctx + .app_ctx + .backend + .block_info_stream(block_stream_config(&ctx.app_ctx.backend, req.iteration.unwrap_or_default())?) + .map(|res| res.map(Into::into)) + // Add the Fin message + .chain(stream::once(async { + Ok(model::BlockHeadersResponse { + header_message: Some(model::block_headers_response::HeaderMessage::Fin(model::Fin {})), + }) + })); + + tracing::debug!("headers sync!"); + + pin!(stream); + while let Some(res) = stream.next().await { + tracing::debug!("new res: {res:?}!"); + if let Err(_closed) = out.send(res.or_internal_server_error("Error while reading from block stream")?).await { + break; + } + } + + Ok(()) +} diff --git a/crates/client/p2p/src/handlers_impl/mod.rs b/crates/client/p2p/src/handlers_impl/mod.rs new file mode 100644 index 000000000..c813ebb8d --- /dev/null +++ b/crates/client/p2p/src/handlers_impl/mod.rs @@ -0,0 +1,82 @@ +mod error; +mod headers; + +use crate::{model, sync_handlers}; +use error::{OptionExt, ResultExt}; +pub use headers::*; +use mc_db::{ + stream::{BlockStreamConfig, Direction}, + MadaraBackend, +}; +use mp_block::BlockId; +use mp_convert::FeltExt; +use starknet_core::types::Felt; +use std::num::NonZeroU64; + +impl TryFrom for Felt { + type Error = sync_handlers::Error; + fn try_from(value: model::Felt252) -> Result { + Self::from_slice_be_checked(&value.elements).or_bad_request("Malformated felt") + } +} +impl From for model::Felt252 { + fn from(value: Felt) -> Self { + Self { elements: value.to_bytes_be().into() } + } +} + +impl TryFrom for Felt { + type Error = sync_handlers::Error; + fn try_from(value: model::Hash) -> Result { + Self::from_slice_be_checked(&value.elements).or_bad_request("Malformated felt") + } +} +impl From for model::Hash { + fn from(value: Felt) -> Self { + Self { elements: value.to_bytes_be().into() } + } +} + +impl From for model::Address { + fn from(value: Felt) -> Self { + Self { elements: value.to_bytes_be().into() } + } +} + +impl From for model::Uint128 { + fn from(value: u128) -> Self { + let b = value.to_be_bytes(); + let low = u64::from_be_bytes([b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]]); + let high = u64::from_be_bytes([b[8], b[9], b[10], b[11], b[12], b[13], b[14], b[15]]); + Self { low, high } + } +} + +pub fn block_stream_config( + db: &MadaraBackend, + value: model::Iteration, +) -> Result { + let direction = match value.direction() { + model::iteration::Direction::Forward => Direction::Forward, + model::iteration::Direction::Backward => Direction::Backward, + }; + + let start = match (value.start, &direction) { + (Some(model::iteration::Start::BlockNumber(n)), _) => n, + (Some(model::iteration::Start::Header(hash)), _) => db + .get_block_n(&BlockId::Hash(hash.try_into()?)) + .or_internal_server_error("Getting block_n from hash")? + .ok_or_bad_request("Block not found")?, + (None, Direction::Forward) => 0, + (None, Direction::Backward) => { + db.get_latest_block_n().or_internal_server_error("Getting latest block_n")?.unwrap_or(0) + } + }; + Ok(BlockStreamConfig { + direction, + start, + // in protobuf fields default to 0 - we should not return any error in these cases. + step: value.step.try_into().unwrap_or(NonZeroU64::MIN), + limit: if value.limit == 0 { None } else { Some(value.limit) }, + }) +} diff --git a/crates/client/p2p/src/lib.rs b/crates/client/p2p/src/lib.rs new file mode 100644 index 000000000..1a2a4831d --- /dev/null +++ b/crates/client/p2p/src/lib.rs @@ -0,0 +1,137 @@ +use anyhow::Context; +use behaviour::MadaraP2pBehaviour; +use futures::FutureExt; +use libp2p::{futures::StreamExt, multiaddr::Protocol, Multiaddr, Swarm}; +use mc_db::MadaraBackend; +use mc_rpc::providers::AddTransactionProvider; +use mp_utils::graceful_shutdown; +use std::{sync::Arc, time::Duration}; +use sync_handlers::DynSyncHandler; + +mod behaviour; +mod events; +mod handlers_impl; +mod sync_codec; +mod sync_handlers; + +/// Protobuf messages. +#[allow(clippy::all)] +pub mod model { + include!(concat!(env!("OUT_DIR"), "/_.rs")); +} + +pub struct P2pConfig { + /// None to get an OS-assigned port. + pub port: Option, + pub bootstrap_nodes: Vec, + pub status_interval: Duration, +} + +#[derive(Clone)] +struct MadaraP2pContext { + backend: Arc, +} + +pub struct MadaraP2p { + config: P2pConfig, + #[allow(unused)] + db: Arc, + #[allow(unused)] + add_transaction_provider: Arc, + + swarm: Swarm, + + headers_sync_handler: DynSyncHandler, +} + +impl MadaraP2p { + pub fn new( + config: P2pConfig, + db: Arc, + add_transaction_provider: Arc, + ) -> anyhow::Result { + // we do not need to provide a stable identity except for bootstrap nodes + let swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + // support tls and noise + (libp2p::tls::Config::new, libp2p::noise::Config::new), + // multiplexing protocol (yamux) + libp2p::yamux::Config::default, + ) + .context("Configuring libp2p tcp transport")? + .with_relay_client(libp2p::noise::Config::new, libp2p::yamux::Config::default) + .context("Configuring relay transport")? + .with_behaviour(|identity, relay_client| MadaraP2pBehaviour::new(db.chain_config(), identity, relay_client)) + .context("Configuring libp2p behaviour")? + .build(); + + let app_ctx = MadaraP2pContext { backend: Arc::clone(&db) }; + + Ok(Self { + config, + db, + add_transaction_provider, + swarm, + headers_sync_handler: DynSyncHandler::new("headers", app_ctx.clone(), |ctx, req, out| { + handlers_impl::headers_sync(ctx, req, out).boxed() + }), + }) + } + + /// Main loop of the p2p service. + pub async fn run(&mut self) -> anyhow::Result<()> { + let multi_addr = "/ip4/0.0.0.0".parse::()?.with(Protocol::Tcp(self.config.port.unwrap_or(0))); + self.swarm.listen_on(multi_addr).context("Binding port")?; + + for node in &self.config.bootstrap_nodes { + if let Err(err) = self.swarm.dial(node.clone()) { + tracing::debug!("Could not dial bootstrap node {node}: {err:#}"); + } + } + + let mut status_interval = tokio::time::interval(self.config.status_interval); + + loop { + tokio::select! { + // Stop condition + _ = graceful_shutdown() => break, + + // Show node status regularly + _ = status_interval.tick() => { + let network_info = self.swarm.network_info(); + let connections_info = network_info.connection_counters(); + + let peers = network_info.num_peers(); + let connections_in = connections_info.num_established_incoming(); + let connections_out = connections_info.num_established_outgoing(); + let pending_connections = connections_info.num_pending(); + let dht = self.swarm.behaviour_mut().kad + .kbuckets() + // Cannot .into_iter() a KBucketRef, hence the inner collect followed by flat_map + .map(|kbucket_ref| { + kbucket_ref + .iter() + .map(|entry_ref| *entry_ref.node.key.preimage()) + .collect::>() + }) + .flat_map(|peers_in_bucket| peers_in_bucket.into_iter()) + .collect::>(); + tracing::info!("P2P {peers} peers IN: {connections_in} OUT: {connections_out} Pending: {pending_connections}"); + tracing::info!("DHT {dht:?}"); + } + + // Handle incoming service commands + // _ = + + // Make progress on the swarm and handle the events it yields + event = self.swarm.next() => match event { + Some(event) => self.handle_event(event).context("Handling p2p event")?, + None => break, + } + } + } + Ok(()) + } +} diff --git a/crates/client/p2p/src/sync_codec.rs b/crates/client/p2p/src/sync_codec.rs new file mode 100644 index 000000000..f5df443dc --- /dev/null +++ b/crates/client/p2p/src/sync_codec.rs @@ -0,0 +1,155 @@ +//! Part of this file is inspired by the wonderful pathfinder implementation + +use async_trait::async_trait; +use futures::io::{AsyncReadExt, AsyncWriteExt}; +use libp2p::futures::{AsyncRead, AsyncWrite}; +use std::{io, marker::PhantomData}; + +use crate::model; + +pub mod protocols { + //! This only handles 1 protocol version for now. In the future this file would need + //! to be rewritten so that it handles returning responses for older protocol versions. + + macro_rules! define_protocols { + { $( struct $type_name:ident = $name:literal ; )* } => { + $( + #[derive(Debug, Clone, Copy, Default)] + pub struct $type_name; + impl AsRef for $type_name { + fn as_ref(&self) -> &str { + $name + } + } + )* + } + } + + define_protocols! { + struct Headers = "/starknet/headers/0.1.0-rc.0"; + struct StateDiffs = "/starknet/state_diffs/0.1.0-rc.0"; + struct Classes = "/starknet/classes/0.1.0-rc.0"; + struct Transactions = "/starknet/transactions/0.1.0-rc.0"; + struct Events = "/starknet/events/0.1.0-rc.0"; + } +} + +pub mod codecs { + #![allow(clippy::identity_op)] // allow 1 * MiB + #![allow(non_upper_case_globals)] // allow MiB name + use super::*; + + const MiB: u64 = 1024 * 1024; + + pub type Headers = SyncCodec; + pub fn headers() -> Headers { + SyncCodec::new(SyncCodecConfig { req_size_limit_bytes: 1 * MiB, res_size_limit_bytes: 1 * MiB }) + } + pub type StateDiffs = SyncCodec; + pub fn state_diffs() -> StateDiffs { + SyncCodec::new(SyncCodecConfig { req_size_limit_bytes: 1 * MiB, res_size_limit_bytes: 1 * MiB }) + } + pub type Classes = SyncCodec; + pub fn classes() -> Classes { + SyncCodec::new(SyncCodecConfig { req_size_limit_bytes: 1 * MiB, res_size_limit_bytes: 4 * MiB }) + } + pub type Transactions = SyncCodec; + pub fn transactions() -> Transactions { + SyncCodec::new(SyncCodecConfig { req_size_limit_bytes: 1 * MiB, res_size_limit_bytes: 1 * MiB }) + } + pub type Events = SyncCodec; + pub fn events() -> Events { + SyncCodec::new(SyncCodecConfig { req_size_limit_bytes: 1 * MiB, res_size_limit_bytes: 1 * MiB }) + } +} + +#[derive(Debug, Clone)] +pub struct SyncCodecConfig { + pub req_size_limit_bytes: u64, + pub res_size_limit_bytes: u64, +} + +#[derive(Debug, Clone)] +pub struct SyncCodec { + config: SyncCodecConfig, + /// buffer reuse + buf: Vec, + _boo: PhantomData<(Protocol, Req, Res)>, +} + +impl SyncCodec { + pub fn new(config: SyncCodecConfig) -> Self { + Self { buf: Vec::new(), config, _boo: PhantomData } + } +} + +#[async_trait] +impl< + Protocol: AsRef + Send + Clone, + Req: prost::Message + Default + Send, + Res: prost::Message + Default + Send, + > p2p_stream::Codec for SyncCodec +{ + type Protocol = Protocol; + type Request = Req; + type Response = Res; + + async fn read_request(&mut self, _protocol: &Protocol, io: &mut T) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + self.buf.clear(); + io.take(self.config.req_size_limit_bytes).read_to_end(&mut self.buf).await?; + Ok(Req::decode(self.buf.as_ref())?) + } + + async fn read_response(&mut self, _protocol: &Protocol, mut io: &mut T) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + // Response is prepended with the message length + // We do not directly use [`prost::Message::decode_length_delimited`] because we want to reject the message before reading it + // if it's too long + + // unsigned_varint's error type implements Into and not From io::Error, so we have to map the error by hand + let encoded_len = unsigned_varint::aio::read_usize(&mut io).await.map_err(Into::::into)?; + if encoded_len > self.config.res_size_limit_bytes as _ { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!( + "Response has length {} which exceeds the spec-defined limit of {}", + encoded_len, self.config.res_size_limit_bytes + ), + )); + } + + self.buf.clear(); + self.buf.reserve(encoded_len); + io.take(encoded_len as _).read_to_end(&mut self.buf).await?; + if self.buf.len() != encoded_len { + return Err(io::ErrorKind::UnexpectedEof.into()); + } + + Ok(Res::decode(self.buf.as_ref())?) + } + + async fn write_request(&mut self, _protocol: &Protocol, io: &mut T, req: Req) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + self.buf.clear(); + req.encode(&mut self.buf)?; + io.write_all(&self.buf).await + } + + async fn write_response(&mut self, _protocol: &Protocol, io: &mut T, res: Res) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + // we don't have to use unsigned_varint::aio::write_usize here we can just use prost's length delimited messages impl + + self.buf.clear(); + res.encode_length_delimited(&mut self.buf)?; + io.write_all(&self.buf).await + } +} diff --git a/crates/client/p2p/src/sync_handlers.rs b/crates/client/p2p/src/sync_handlers.rs new file mode 100644 index 000000000..fd19b2967 --- /dev/null +++ b/crates/client/p2p/src/sync_handlers.rs @@ -0,0 +1,106 @@ +use futures::{channel::mpsc::Sender, future::BoxFuture, pin_mut, Future}; +use p2p_stream::InboundRequestId; +use std::borrow::Cow; +use std::{collections::HashMap, fmt, marker::PhantomData}; +use tokio::task::{AbortHandle, JoinSet}; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + /// Error is internal and will be reported with error level. + #[error("Internal server error: {0:#}")] + Internal(anyhow::Error), + /// Error is the peer's fault, will only be reported with debug level. + #[error("Bad request: {0}")] + BadRequest(Cow<'static, str>), +} + +pub struct ReqContext { + pub app_ctx: AppCtx, +} + +pub type DynSyncHandler = StreamHandler< + AppCtx, + Req, + Res, + fn(ReqContext, Req, Sender) -> BoxFuture<'static, Result<(), Error>>, + BoxFuture<'static, Result<(), Error>>, +>; + +pub struct StreamHandler { + debug_name: &'static str, + app_ctx: AppCtx, + handler: F, + join_set: JoinSet<()>, + current_inbound: HashMap, + _boo: PhantomData<(Req, Res, Fut)>, +} + +impl StreamHandler +where + F: Fn(ReqContext, Req, Sender) -> Fut, + Fut: Future> + Send + 'static, +{ + pub fn new(debug_name: &'static str, app_ctx: AppCtx, handler: F) -> Self { + Self { + debug_name, + handler, + app_ctx, + join_set: Default::default(), + current_inbound: Default::default(), + _boo: PhantomData, + } + } + + pub fn handle_event(&mut self, ev: p2p_stream::Event) { + match ev { + /* === OTHER PEER => US === */ + p2p_stream::Event::InboundRequest { request_id, request, peer, channel } => { + tracing::debug!("New inbounds request in stream {} [peer_id {}]", self.debug_name, peer); + let ctx = ReqContext { app_ctx: self.app_ctx.clone() }; + // Spawn the task that responds to the request. + + let fut = (self.handler)(ctx, request, channel); + + let abort_handle = self.join_set.spawn(async move { + let fut = fut; + pin_mut!(fut); + + if let Err(err) = fut.await { + match err { + Error::Internal(err) => { + tracing::error!(target: "p2p_errors", "Internal Server Error: {:#}", err); + } + Error::BadRequest(err) => { + tracing::debug!(target: "p2p_errors", "Bad request: {:#}", err); + } + } + } + }); + + self.current_inbound.insert(request_id, abort_handle); + } + p2p_stream::Event::InboundFailure { peer, request_id, error } => { + tracing::debug!("Inbounds failure in stream {} [peer_id {}]: {:#}", self.debug_name, peer, error); + if let Some(v) = self.current_inbound.remove(&request_id) { + v.abort(); + } + } + p2p_stream::Event::OutboundResponseStreamClosed { peer, request_id } => { + tracing::debug!("End of stream {} [peer_id {}]", self.debug_name, peer); + if let Some(v) = self.current_inbound.remove(&request_id) { + v.abort(); // abort if not yet aborted + } + } + /* === US => OTHER PEER === */ + p2p_stream::Event::OutboundRequestSentAwaitingResponses { .. } => todo!(), + p2p_stream::Event::OutboundFailure { .. } => todo!(), + p2p_stream::Event::InboundResponseStreamClosed { .. } => todo!(), + } + } +} + +impl fmt::Debug for StreamHandler { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "StreamHandler[{}] <{} inbounds tasks>", self.debug_name, self.current_inbound.len()) + } +} diff --git a/crates/client/p2p_stream/Cargo.toml b/crates/client/p2p_stream/Cargo.toml new file mode 100644 index 000000000..1c7a10b2f --- /dev/null +++ b/crates/client/p2p_stream/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "p2p_stream" +description = "Extension of libp2p-request-response that allows for streaming responses to a single request." +authors = [ + "Parity Technologies ", + "Equilibrium Labs ", +] +version = { workspace = true } +edition = { workspace = true } +license = { workspace = true } + +[dependencies] +async-trait = { workspace = true } +futures = { workspace = true } +futures-bounded = { workspace = true } +libp2p = { workspace = true, features = ["identify", "noise", "tcp", "tokio"] } +tracing = { workspace = true } + +[dev-dependencies] +anyhow = { workspace = true } +libp2p = { workspace = true, features = [ + "identify", + "noise", + "tcp", + "tokio", + "yamux", +] } +libp2p-plaintext = { workspace = true } +libp2p-swarm-test = { workspace = true } +rstest = { workspace = true } +tokio = { workspace = true, features = ["macros", "time"] } +tracing-subscriber = { workspace = true, features = ["env-filter"] } diff --git a/crates/client/p2p_stream/README.md b/crates/client/p2p_stream/README.md new file mode 100644 index 000000000..8f19b754a --- /dev/null +++ b/crates/client/p2p_stream/README.md @@ -0,0 +1,41 @@ +# pathfinder's `p2p_stream` crate + +This crate is a copy of [pathfinder's `p2p_stream` crate](https://github.com/eqlabs/pathfinder/blob/main/crates/p2p_stream) which itself is a derivate +of the [`libp2p request/response`](https://docs.rs/libp2p-request-response/latest/libp2p_request_response/) crate with a few changes to allow streaming responses. + +Original readme follows. + +------------------- + +# Introduction + +This crate is a derivative of Parity Technologies' [`libp2p request/response`](https://docs.rs/libp2p-request-response/latest/libp2p_request_response/) crate, which provides a generic **"single request - stream of responses"** protocol, similar to [gRPC's server streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#server-streaming-rpc). + +# Feature comparison with request/response + +| | p2p-stream | libp2p-request-response | +| ----------- | ----------- | ----------- | +| libp2p compatibility | [≥ libp2p-v0.53.2](https://github.com/libp2p/rust-libp2p/releases/tag/libp2p-v0.53.2) | ✔ | +| sending request opens new libp2p stream | ✔ | ✔ | +| sending request | `Behavior::send_request` | `Behavior::send_request` | +| receiving request | `InboundRequest` event | `Message::Request` in `Message` event | +| sending response(s) | into a channel obtained from `InboundRequest` event | call `Behaviour` method after receiving `Message::Request` event | +| receiving response(s) | from a channel obtained from `OutboundRequestSentAwaitingResponses` event | `Message::Response` in `Message` event | +| number of responses per request | ≥ 0 | 1 | +| user defined R & W protocol codec | ✔ | ✔ | +| response codec should delimit messages | ✔ | n/a | +| partial protocol support
(ie. only upstream or downstream) | * | ✔ | +| out of the box cbor and json codecs | * | ✔ | + +
+ +*): [`pathfinder`](https://github.com/eqlabs/pathfinder) uses this crate with its own [Starknet](https://www.starknet.io/) specific [protocol](https://github.com/starknet-io/starknet-p2p-specs) and decided to drop unnecessary features + +# Acknowledgements + +Thanks to the [rust-libp2p contributors](https://github.com/libp2p/rust-libp2p/graphs/contributors) and [Parity Technologies](https://www.parity.io/) for making [`rust-libp2p`](https://github.com/libp2p/rust-libp2p) possible. + +# FAQ + +1. Q: I'd like to see the scope of changes compared to the original crate.
+ A: Please diff with [`libp2p-v0.53.2`](https://github.com/libp2p/rust-libp2p/tree/libp2p-v0.53.2) diff --git a/crates/client/p2p_stream/src/codec.rs b/crates/client/p2p_stream/src/codec.rs new file mode 100644 index 000000000..c6387d65f --- /dev/null +++ b/crates/client/p2p_stream/src/codec.rs @@ -0,0 +1,65 @@ +// Equilibrium Labs: This work is an extension of libp2p's request-response +// protocol, hence the original copyright notice is included below. +// +// +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use std::io; + +use async_trait::async_trait; +use futures::prelude::*; + +/// A `Codec` defines the request and response types +/// for a request/streaming-response [`Behaviour`](crate::Behaviour) protocol or +/// protocol family and how they are encoded / decoded on an I/O stream. +#[async_trait] +pub trait Codec { + /// The type of protocol(s) or protocol versions being negotiated. + type Protocol: AsRef + Send + Clone; + /// The type of inbound and outbound requests. + type Request: Send; + /// The type of inbound and outbound responses. + type Response: Send; + + /// Reads a request from the given I/O stream according to the + /// negotiated protocol. + async fn read_request(&mut self, protocol: &Self::Protocol, io: &mut T) -> io::Result + where + T: AsyncRead + Unpin + Send; + + /// Reads a response from the given I/O stream according to the + /// negotiated protocol. + async fn read_response(&mut self, protocol: &Self::Protocol, io: &mut T) -> io::Result + where + T: AsyncRead + Unpin + Send; + + /// Writes a request to the given I/O stream according to the + /// negotiated protocol. + async fn write_request(&mut self, protocol: &Self::Protocol, io: &mut T, req: Self::Request) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send; + + /// Writes a response to the given I/O stream according to the + /// negotiated protocol. + async fn write_response(&mut self, protocol: &Self::Protocol, io: &mut T, res: Self::Response) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send; +} diff --git a/crates/client/p2p_stream/src/handler.rs b/crates/client/p2p_stream/src/handler.rs new file mode 100644 index 000000000..b282e95e2 --- /dev/null +++ b/crates/client/p2p_stream/src/handler.rs @@ -0,0 +1,469 @@ +// Equilibrium Labs: This work is an extension of libp2p's request-response +// protocol, hence the original copyright notice is included below. +// +// +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +pub(crate) mod protocol; + +use std::collections::VecDeque; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; +use std::task::{Context, Poll}; +use std::time::Duration; +use std::{fmt, io}; + +use futures::channel::mpsc; +use futures::prelude::*; +use libp2p::swarm::handler::{ + ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, FullyNegotiatedInbound, + FullyNegotiatedOutbound, ListenUpgradeError, StreamUpgradeError, +}; +use libp2p::swarm::SubstreamProtocol; + +use crate::codec::Codec; +use crate::handler::protocol::Protocol; +use crate::{InboundRequestId, OutboundRequestId, EMPTY_QUEUE_SHRINK_THRESHOLD}; + +/// A connection handler for a request/streaming-response +/// [`Behaviour`](super::Behaviour) protocol. +pub struct Handler +where + TCodec: Codec, +{ + /// The supported inbound protocols. + inbound_protocols: Vec, + /// The request/streaming-response message codec. + codec: TCodec, + /// Queue of events to emit in `poll()`. + pending_events: VecDeque>, + /// Outbound upgrades waiting to be emitted as an + /// `OutboundSubstreamRequest`. + pending_outbound: VecDeque>, + + requested_outbound: VecDeque>, + /// A channel for receiving inbound requests. + inbound_receiver: mpsc::Receiver<(InboundRequestId, TCodec::Request, mpsc::Sender)>, + /// The [`mpsc::Sender`] for the above receiver. Cloned for each inbound + /// request. + inbound_sender: mpsc::Sender<(InboundRequestId, TCodec::Request, mpsc::Sender)>, + /// A channel for signalling that an outbound request has been sent. Cloned + /// for each outbound request. + outbound_sender: mpsc::Sender<(OutboundRequestId, mpsc::Receiver>)>, + /// The [`mpsc::Receiver`] for the above sender. + outbound_receiver: mpsc::Receiver<(OutboundRequestId, mpsc::Receiver>)>, + + inbound_request_id: Arc, + + worker_streams: futures_bounded::FuturesMap, io::Error>>, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +enum RequestId { + Inbound(InboundRequestId), + Outbound(OutboundRequestId), +} + +impl Handler +where + TCodec: Codec + Send + Clone + 'static, +{ + pub(super) fn new( + inbound_protocols: Vec, + codec: TCodec, + substream_timeout: Duration, + inbound_request_id: Arc, + max_concurrent_streams: usize, + ) -> Self { + let (inbound_sender, inbound_receiver) = mpsc::channel(0); + let (outbound_sender, outbound_receiver) = mpsc::channel(0); + Self { + inbound_protocols, + codec, + pending_outbound: VecDeque::new(), + requested_outbound: Default::default(), + inbound_receiver, + inbound_sender, + outbound_sender, + outbound_receiver, + pending_events: VecDeque::new(), + inbound_request_id, + worker_streams: futures_bounded::FuturesMap::new(substream_timeout, max_concurrent_streams), + } + } + + /// Returns the next inbound request ID. + fn next_inbound_request_id(&mut self) -> InboundRequestId { + InboundRequestId(self.inbound_request_id.fetch_add(1, Ordering::Relaxed)) + } + + fn on_fully_negotiated_inbound( + &mut self, + FullyNegotiatedInbound { protocol: (mut stream, protocol), info: () }: FullyNegotiatedInbound< + ::InboundProtocol, + ::InboundOpenInfo, + >, + ) { + let mut codec = self.codec.clone(); + let request_id = self.next_inbound_request_id(); + let mut sender = self.inbound_sender.clone(); + + let recv_request_then_fwd_outgoing_responses = async move { + let (rs_send, mut rs_recv) = mpsc::channel(0); + + let read = codec.read_request(&protocol, &mut stream); + let request = read.await?; + + sender + .send((request_id, request, rs_send)) + .await + .expect("`ConnectionHandler` owns both ends of the channel"); + drop(sender); + + // Keep on forwarding until the channel is closed + while let Some(response) = rs_recv.next().await { + let write = codec.write_response(&protocol, &mut stream, response); + write.await?; + } + + stream.close().await?; + + Ok(Event::OutboundResponseStreamClosed(request_id)) + }; + + if self + .worker_streams + .try_push(RequestId::Inbound(request_id), recv_request_then_fwd_outgoing_responses.boxed()) + .is_err() + { + tracing::warn!("Dropping inbound stream because we are at capacity") + } + } + + fn on_fully_negotiated_outbound( + &mut self, + FullyNegotiatedOutbound { protocol: (mut stream, protocol), info: () }: FullyNegotiatedOutbound< + ::OutboundProtocol, + ::OutboundOpenInfo, + >, + ) { + let message = self.requested_outbound.pop_front().expect("negotiated a stream without a pending message"); + + let mut codec = self.codec.clone(); + let request_id = message.request_id; + + let (mut rs_send, rs_recv) = mpsc::channel(0); + + let mut sender = self.outbound_sender.clone(); + + let send_req_then_fwd_incoming_responses = async move { + let write = codec.write_request(&protocol, &mut stream, message.request); + write.await?; + + stream.close().await?; + + sender.send((request_id, rs_recv)).await.expect("`ConnectionHandler` owns both ends of the channel"); + drop(sender); + + // Keep on forwarding until the channel is closed or error occurs + loop { + match codec.read_response(&protocol, &mut stream).await { + Ok(response) => { + rs_send + .send(Ok(response)) + .await + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; + } + // The stream is closed, there's nothing more to receive + Err(error) if error.kind() == io::ErrorKind::UnexpectedEof => break, + // An error occurred, propagate it + Err(error) => { + let error_clone = io::Error::new(error.kind(), error.to_string()); + rs_send + .send(Err(error_clone)) + .await + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; + return Err(error); + } + } + } + + Ok(Event::InboundResponseStreamClosed(request_id)) + }; + + if self + .worker_streams + .try_push(RequestId::Outbound(request_id), send_req_then_fwd_incoming_responses.boxed()) + .is_err() + { + tracing::warn!("Dropping outbound stream because we are at capacity") + } + } + + fn on_dial_upgrade_error( + &mut self, + DialUpgradeError { error, info: () }: DialUpgradeError< + ::OutboundOpenInfo, + ::OutboundProtocol, + >, + ) { + let message = self.requested_outbound.pop_front().expect("negotiated a stream without a pending message"); + + match error { + StreamUpgradeError::Timeout => { + self.pending_events.push_back(Event::OutboundTimeout(message.request_id)); + } + StreamUpgradeError::NegotiationFailed => { + // The remote merely doesn't support the protocol(s) we requested. + // This is no reason to close the connection, which may + // successfully communicate with other protocols already. + // An event is reported to permit user code to react to the fact that + // the remote peer does not support the requested protocol(s). + self.pending_events.push_back(Event::OutboundUnsupportedProtocols(message.request_id)); + } + StreamUpgradeError::Apply(e) => unreachable!("{e:?}"), + StreamUpgradeError::Io(e) => { + tracing::debug!("outbound stream for request {} failed: {e}, retrying", message.request_id); + self.requested_outbound.push_back(message); + } + } + } + fn on_listen_upgrade_error( + &mut self, + ListenUpgradeError { error, .. }: ListenUpgradeError< + ::InboundOpenInfo, + ::InboundProtocol, + >, + ) { + unreachable!("{error:?}") + } +} + +/// The events emitted by the [`Handler`]. +pub enum Event +where + TCodec: Codec, +{ + /// A request has been received. + InboundRequest { + /// The ID of the request. + request_id: InboundRequestId, + /// The request message. + request: TCodec::Request, + /// The channel through which we are expected to send responses. + sender: mpsc::Sender, + }, + /// A request has been sent and we are awaiting responses. + OutboundRequestSentAwaitingResponses { + /// The ID of the outbound request. + request_id: OutboundRequestId, + /// The channel through which we can receive the responses. + receiver: mpsc::Receiver>, + }, + /// An outbound response stream to an inbound request was closed. + OutboundResponseStreamClosed(InboundRequestId), + /// An inbound response stream to an outbound request was closed. + InboundResponseStreamClosed(OutboundRequestId), + /// An outbound request timed out while sending the request + /// or waiting for the response. + OutboundTimeout(OutboundRequestId), + /// An outbound request failed to negotiate a mutually supported protocol. + OutboundUnsupportedProtocols(OutboundRequestId), + OutboundStreamFailed { + request_id: OutboundRequestId, + error: io::Error, + }, + /// An inbound request timed out while waiting for the request + /// or sending the response. + InboundTimeout(InboundRequestId), + InboundStreamFailed { + request_id: InboundRequestId, + error: io::Error, + }, +} + +impl fmt::Debug for Event { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Event::InboundRequest { request_id, request: _, sender: _ } => { + f.debug_struct("Event::InboundRequest").field("request_id", request_id).finish() + } + Event::OutboundRequestSentAwaitingResponses { request_id, receiver: _ } => { + f.debug_struct("Event::OutboundRequestSentAwaitingResponses").field("request_id", request_id).finish() + } + Event::InboundResponseStreamClosed(request_id) => { + f.debug_struct("Event::InboundResponseStreamClosed").field("request_id", request_id).finish() + } + Event::OutboundResponseStreamClosed(request_id) => { + f.debug_struct("Event::OutboundResponseStreamClosed").field("request_id", request_id).finish() + } + Event::OutboundTimeout(request_id) => f.debug_tuple("Event::OutboundTimeout").field(request_id).finish(), + Event::OutboundUnsupportedProtocols(request_id) => { + f.debug_tuple("Event::OutboundUnsupportedProtocols").field(request_id).finish() + } + Event::OutboundStreamFailed { request_id, error } => f + .debug_struct("Event::OutboundStreamFailed") + .field("request_id", &request_id) + .field("error", &error) + .finish(), + Event::InboundTimeout(request_id) => f.debug_tuple("Event::InboundTimeout").field(request_id).finish(), + Event::InboundStreamFailed { request_id, error } => f + .debug_struct("Event::InboundStreamFailed") + .field("request_id", &request_id) + .field("error", &error) + .finish(), + } + } +} + +pub struct OutboundMessage { + pub(crate) request_id: OutboundRequestId, + pub(crate) request: TCodec::Request, + pub(crate) protocols: Vec, +} + +impl fmt::Debug for OutboundMessage +where + TCodec: Codec, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OutboundMessage").finish_non_exhaustive() + } +} + +impl ConnectionHandler for Handler +where + TCodec: Codec + Send + Clone + 'static, +{ + type FromBehaviour = OutboundMessage; + type ToBehaviour = Event; + type InboundProtocol = Protocol; + type OutboundProtocol = Protocol; + type OutboundOpenInfo = (); + type InboundOpenInfo = (); + + fn listen_protocol(&self) -> SubstreamProtocol { + SubstreamProtocol::new(Protocol { protocols: self.inbound_protocols.clone() }, ()) + } + + fn on_behaviour_event(&mut self, request: Self::FromBehaviour) { + self.pending_outbound.push_back(request); + } + + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] + fn poll( + &mut self, + cx: &mut Context<'_>, + ) -> Poll, (), Self::ToBehaviour>> { + match self.worker_streams.poll_unpin(cx) { + Poll::Ready((_, Ok(Ok(event)))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)); + } + Poll::Ready((RequestId::Inbound(id), Ok(Err(e)))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Event::InboundStreamFailed { + request_id: id, + error: e, + })); + } + Poll::Ready((RequestId::Outbound(id), Ok(Err(e)))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Event::OutboundStreamFailed { + request_id: id, + error: e, + })); + } + Poll::Ready((RequestId::Inbound(id), Err(futures_bounded::Timeout { .. }))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Event::InboundTimeout(id))); + } + Poll::Ready((RequestId::Outbound(id), Err(futures_bounded::Timeout { .. }))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Event::OutboundTimeout(id))); + } + Poll::Pending => {} + } + + // Drain pending events that were produced by `worker_streams`. + if let Some(event) = self.pending_events.pop_front() { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)); + } else if self.pending_events.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD { + self.pending_events.shrink_to_fit(); + } + + // Check for inbound requests. + if let Poll::Ready(Some((id, rq, rs_sender))) = self.inbound_receiver.poll_next_unpin(cx) { + // We received an inbound request. + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Event::InboundRequest { + request_id: id, + request: rq, + sender: rs_sender, + })); + } + + // Emit outbound requests. + if let Some(request) = self.pending_outbound.pop_front() { + let protocols = request.protocols.clone(); + self.requested_outbound.push_back(request); + + return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(Protocol { protocols }, ()), + }); + } + + // Check for readiness to receive inbound responses. + if let Poll::Ready(Some((id, rs_receiver))) = self.outbound_receiver.poll_next_unpin(cx) { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Event::OutboundRequestSentAwaitingResponses { + request_id: id, + receiver: rs_receiver, + })); + } + + debug_assert!(self.pending_outbound.is_empty()); + + if self.pending_outbound.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD { + self.pending_outbound.shrink_to_fit(); + } + + Poll::Pending + } + + fn on_connection_event( + &mut self, + event: ConnectionEvent< + '_, + Self::InboundProtocol, + Self::OutboundProtocol, + Self::InboundOpenInfo, + Self::OutboundOpenInfo, + >, + ) { + match event { + ConnectionEvent::FullyNegotiatedInbound(fully_negotiated_inbound) => { + self.on_fully_negotiated_inbound(fully_negotiated_inbound) + } + ConnectionEvent::FullyNegotiatedOutbound(fully_negotiated_outbound) => { + self.on_fully_negotiated_outbound(fully_negotiated_outbound) + } + ConnectionEvent::DialUpgradeError(dial_upgrade_error) => self.on_dial_upgrade_error(dial_upgrade_error), + ConnectionEvent::ListenUpgradeError(listen_upgrade_error) => { + self.on_listen_upgrade_error(listen_upgrade_error) + } + _ => {} + } + } +} diff --git a/crates/client/p2p_stream/src/handler/protocol.rs b/crates/client/p2p_stream/src/handler/protocol.rs new file mode 100644 index 000000000..67f68b503 --- /dev/null +++ b/crates/client/p2p_stream/src/handler/protocol.rs @@ -0,0 +1,78 @@ +// Equilibrium Labs: This work is an extension of libp2p's request-response +// protocol, hence the original copyright notice is included below. +// +// +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! The definition of a request/streaming-response protocol via inbound +//! and outbound substream upgrades. The inbound upgrade receives a request +//! and allows for sending a series of responses, whereas the outbound upgrade +//! sends a request and allows for receivung several responses. + +use futures::future::{ready, Ready}; +use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p::swarm::Stream; + +/// Response substream upgrade protocol. +/// +/// Receives a request and sends responses. +#[derive(Debug)] +pub struct Protocol

{ + pub(crate) protocols: Vec

, +} + +impl

UpgradeInfo for Protocol

+where + P: AsRef + Clone, +{ + type Info = P; + type InfoIter = std::vec::IntoIter; + + fn protocol_info(&self) -> Self::InfoIter { + self.protocols.clone().into_iter() + } +} + +impl

InboundUpgrade for Protocol

+where + P: AsRef + Clone, +{ + type Output = (Stream, P); + type Error = (); + type Future = Ready>; + + fn upgrade_inbound(self, io: Stream, protocol: Self::Info) -> Self::Future { + ready(Ok((io, protocol))) + } +} + +impl

OutboundUpgrade for Protocol

+where + P: AsRef + Clone, +{ + type Output = (Stream, P); + type Error = (); + type Future = Ready>; + + fn upgrade_outbound(self, io: Stream, protocol: Self::Info) -> Self::Future { + ready(Ok((io, protocol))) + } +} diff --git a/crates/client/p2p_stream/src/lib.rs b/crates/client/p2p_stream/src/lib.rs new file mode 100644 index 000000000..06853b0a1 --- /dev/null +++ b/crates/client/p2p_stream/src/lib.rs @@ -0,0 +1,756 @@ +// Equilibrium Labs: This work is an extension of libp2p's request-response +// protocol, hence the original copyright notice is included below. +// +// +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! Generic single-request/response-stream protocols, later referred to as +//! request/streaming-response. +//! +//! ## General Usage +//! +//! The [`Behaviour`] struct is a [`NetworkBehaviour`] that implements a generic +//! request/streaming-response protocol or protocol family, whereby each request +//! is sent over a new substream on a connection. `Behaviour` is generic +//! over the actual messages being sent, which are defined in terms of a +//! [`Codec`]. Creating a request/streaming-response protocol thus amounts +//! to providing an implementation of this trait which can then be +//! given to [`Behaviour::with_codec`]. Further configuration options are +//! available via the [`Config`]. +//! +//! Outbound requests are sent using [`Behaviour::send_request`] and the +//! responses received via +//! [`Event::OutboundRequestSentAwaitingResponses::channel`]. +//! +//! Inbound requests are received via [`Event::InboundRequest`] and responses +//! are sent via [`Event::InboundRequest::channel`]. +//! +//! ## Protocol Families +//! +//! A single [`Behaviour`] instance can be used with an entire +//! protocol family that share the same request and response types. +//! For that purpose, [`Codec::Protocol`] is typically +//! instantiated with a sum type. + +mod codec; +mod handler; + +use std::collections::{HashMap, HashSet, VecDeque}; +use std::sync::atomic::AtomicU64; +use std::sync::Arc; +use std::task::{Context, Poll}; +use std::time::Duration; +use std::{fmt, io}; + +pub use codec::Codec; +use futures::channel::mpsc; +use handler::Handler; +use libp2p::core::transport::PortUse; +use libp2p::core::{ConnectedPoint, Endpoint, Multiaddr}; +use libp2p::identity::PeerId; +use libp2p::swarm::behaviour::{AddressChange, ConnectionClosed, DialFailure, FromSwarm}; +use libp2p::swarm::dial_opts::DialOpts; +use libp2p::swarm::{ + ConnectionDenied, ConnectionHandler, ConnectionId, NetworkBehaviour, NotifyHandler, THandler, THandlerInEvent, + THandlerOutEvent, ToSwarm, +}; + +use crate::handler::OutboundMessage; + +/// The events emitted by a request/streaming-response [`Behaviour`]. +#[derive(Debug)] +pub enum Event { + /// An incoming request from another peer. + InboundRequest { + /// The peer who sent the request. + peer: PeerId, + /// The ID of the request. + request_id: InboundRequestId, + /// The request message. + request: TRequest, + /// The channel through which we are expected to send responses. + channel: mpsc::Sender, + }, + /// Outbound request to another peer was accepted and we can now await + /// responses. + OutboundRequestSentAwaitingResponses { + /// The peer who received our request. + peer: PeerId, + /// The ID of the outbound request. + request_id: OutboundRequestId, + /// The channel through which we can receive the responses. + channel: mpsc::Receiver>, + }, + /// An outbound request failed. + OutboundFailure { + /// The peer to whom the request was sent. + peer: PeerId, + /// The (local) ID of the failed request. + request_id: OutboundRequestId, + /// The error that occurred. + error: OutboundFailure, + }, + /// An inbound request failed. + InboundFailure { + /// The peer from whom the request was received. + peer: PeerId, + /// The ID of the failed inbound request. + request_id: InboundRequestId, + /// The error that occurred. + error: InboundFailure, + }, + OutboundResponseStreamClosed { + /// The peer to whom the responses were sent. + peer: PeerId, + /// The ID of the inbound request to which responses were sent. + request_id: InboundRequestId, + }, + InboundResponseStreamClosed { + /// The peer from whom the responses were received. + peer: PeerId, + /// The ID of the outbound request to which responses were received. + request_id: OutboundRequestId, + }, +} + +/// Possible failures occurring in the context of sending +/// an outbound request and receiving the response. +#[derive(Debug)] +pub enum OutboundFailure { + /// The request could not be sent because a dialing attempt failed. + DialFailure, + /// The request timed out before a response was received. + /// + /// It is not known whether the request may have been + /// received (and processed) by the remote peer. + Timeout, + /// The connection closed before a response was received. + /// + /// It is not known whether the request may have been + /// received (and processed) by the remote peer. + ConnectionClosed, + /// The remote supports none of the requested protocols. + UnsupportedProtocols, + /// An IO failure happened on an outbound stream. + Io(io::Error), +} + +impl fmt::Display for OutboundFailure { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + OutboundFailure::DialFailure => write!(f, "Failed to dial the requested peer"), + OutboundFailure::Timeout => write!(f, "Timeout while waiting for a response"), + OutboundFailure::ConnectionClosed => { + write!(f, "Connection was closed before a response was received") + } + OutboundFailure::UnsupportedProtocols => { + write!(f, "The remote supports none of the requested protocols") + } + OutboundFailure::Io(e) => write!(f, "IO error on outbound stream: {e}"), + } + } +} + +impl std::error::Error for OutboundFailure {} + +/// Possible failures occurring in the context of receiving an +/// inbound request and sending a response. +#[derive(Debug)] +pub enum InboundFailure { + /// The inbound request timed out, either while reading the + /// incoming request or before a response is sent, e.g. if + /// `Event::InboundRequest::channel::send` is not called in a + /// timely manner. + Timeout, + /// The connection closed before a response could be send. + ConnectionClosed, + /// An IO failure happened on an inbound stream. + Io(io::Error), +} + +impl fmt::Display for InboundFailure { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + InboundFailure::Timeout => { + write!(f, "Timeout while receiving request or sending response") + } + InboundFailure::ConnectionClosed => { + write!(f, "Connection was closed before a response could be sent") + } + InboundFailure::Io(e) => write!(f, "IO error on inbound stream: {e}"), + } + } +} + +impl std::error::Error for InboundFailure {} + +/// The ID of an inbound request. +/// +/// Note: [`InboundRequestId`]'s uniqueness is only guaranteed between +/// inbound requests of the same originating [`Behaviour`]. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct InboundRequestId(u64); + +impl fmt::Display for InboundRequestId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +/// The ID of an outbound request. +/// +/// Note: [`OutboundRequestId`]'s uniqueness is only guaranteed between +/// outbound requests of the same originating [`Behaviour`]. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct OutboundRequestId(u64); + +impl fmt::Display for OutboundRequestId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +/// The configuration for a `Behaviour` protocol. +#[derive(Debug, Clone, Copy)] +pub struct Config { + request_timeout: Duration, + max_concurrent_streams: usize, +} + +impl Default for Config { + fn default() -> Self { + Self { request_timeout: Duration::from_secs(60), max_concurrent_streams: 100 } + } +} + +impl Config { + /// Sets the timeout for inbound and outbound requests. + pub fn request_timeout(mut self, v: Duration) -> Self { + self.request_timeout = v; + self + } + + /// Sets the upper bound for the number of concurrent inbound + outbound + /// streams. + pub fn max_concurrent_streams(mut self, num_streams: usize) -> Self { + self.max_concurrent_streams = num_streams; + self + } +} + +/// A request/streaming-response protocol for some message codec. +#[allow(clippy::type_complexity)] +pub struct Behaviour +where + TCodec: Codec + Clone + Send + 'static, +{ + /// The supported protocols. + protocols: Vec, + /// The next (local) request ID. + next_outbound_request_id: OutboundRequestId, + /// The next (inbound) request ID. + next_inbound_request_id: Arc, + /// The protocol configuration. + config: Config, + /// The protocol codec for reading and writing requests and responses. + codec: TCodec, + /// Pending events to return from `poll`. + pending_events: VecDeque, OutboundMessage>>, + /// The currently connected peers, their pending outbound and inbound + /// responses and their known, reachable addresses, if any. + connected: HashMap>, + /// Requests that have not yet been sent and are waiting for a connection + /// to be established. + pending_outbound_requests: HashMap>>, +} + +impl Behaviour +where + TCodec: Codec + Default + Clone + Send + 'static, +{ + /// Creates a new `Behaviour` for the given configuration, + /// using [`Default`] to construct the codec and the protocol. + pub fn new(cfg: Config) -> Self + where + TCodec::Protocol: Default, + { + Self::with_codec_and_protocols(TCodec::default(), std::iter::once(TCodec::Protocol::default()), cfg) + } +} + +impl Behaviour +where + TCodec: Codec + Clone + Send + 'static, +{ + /// Creates a new `Behaviour` with a default protocol name for the given + /// codec and configuration. + pub fn with_codec(codec: TCodec, cfg: Config) -> Self + where + TCodec::Protocol: Default, + { + Self::with_codec_and_protocols(codec, std::iter::once(TCodec::Protocol::default()), cfg) + } + + /// Creates a new `Behaviour` for the given + /// protocols, codec and configuration. + pub fn with_codec_and_protocols(codec: TCodec, protocols: I, cfg: Config) -> Self + where + I: IntoIterator, + { + Behaviour { + protocols: protocols.into_iter().collect(), + next_outbound_request_id: OutboundRequestId(1), + next_inbound_request_id: Arc::new(AtomicU64::new(1)), + config: cfg, + codec, + pending_events: VecDeque::new(), + connected: HashMap::new(), + pending_outbound_requests: HashMap::new(), + } + } + + /// Initiates sending a request. + /// + /// If the targeted peer is currently not connected, a dialing + /// attempt is initiated and the request is sent as soon as a + /// connection is established. + /// + /// > **Note**: In order for such a dialing attempt to succeed, + /// > the `RequestResponse` protocol must be embedded + /// > in another `NetworkBehaviour` that provides peer and + /// > address discovery. + pub fn send_request(&mut self, peer: &PeerId, request: TCodec::Request) -> OutboundRequestId { + let request_id = self.next_outbound_request_id(); + + let request = OutboundMessage { request_id, request, protocols: self.protocols.clone() }; + + if let Some(request) = self.try_send_request(peer, request) { + self.pending_events.push_back(ToSwarm::Dial { opts: DialOpts::peer_id(*peer).build() }); + + self.pending_outbound_requests.entry(*peer).or_default().push(request); + } + + request_id + } + + /// Checks whether a peer is currently connected. + pub fn is_connected(&self, peer: &PeerId) -> bool { + if let Some(connections) = self.connected.get(peer) { + !connections.is_empty() + } else { + false + } + } + + /// Returns the next outbound request ID. + fn next_outbound_request_id(&mut self) -> OutboundRequestId { + let request_id = self.next_outbound_request_id; + self.next_outbound_request_id.0 += 1; + request_id + } + + /// Tries to send a request by queueing an appropriate event to be + /// emitted to the `Swarm`. If the peer is not currently connected, + /// the given request is return unchanged. + fn try_send_request(&mut self, peer: &PeerId, request: OutboundMessage) -> Option> { + if let Some(connections) = self.connected.get_mut(peer) { + if connections.is_empty() { + return Some(request); + } + let ix = (request.request_id.0 as usize) % connections.len(); + let conn = &mut connections[ix]; + conn.pending_outbound_response_streams.insert(request.request_id); + self.pending_events.push_back(ToSwarm::NotifyHandler { + peer_id: *peer, + handler: NotifyHandler::One(conn.id), + event: request, + }); + None + } else { + Some(request) + } + } + + /// Remove pending outbound response stream for the given peer and + /// connection. + /// + /// Returns `true` if the provided connection to the given peer is still + /// alive and the [`OutboundRequestId`] was previously present and is now + /// removed. Returns `false` otherwise. + fn remove_pending_outbound_response_stream( + &mut self, + peer: &PeerId, + connection: ConnectionId, + request: OutboundRequestId, + ) -> bool { + self.get_connection_mut(peer, connection) + .map(|c| c.pending_outbound_response_streams.remove(&request)) + .unwrap_or(false) + } + + /// Remove pending inbound response stream for the given peer and + /// connection. + /// + /// Returns `true` if the provided connection to the given peer is still + /// alive and the [`InboundRequestId`] was previously present and is now + /// removed. Returns `false` otherwise. + fn remove_pending_inbound_response_stream( + &mut self, + peer: &PeerId, + connection: ConnectionId, + request: InboundRequestId, + ) -> bool { + self.get_connection_mut(peer, connection) + .map(|c| c.pending_inbound_response_streams.remove(&request)) + .unwrap_or(false) + } + + /// Returns a mutable reference to the connection in `self.connected` + /// corresponding to the given [`PeerId`] and [`ConnectionId`]. + fn get_connection_mut(&mut self, peer: &PeerId, connection: ConnectionId) -> Option<&mut Connection> { + self.connected.get_mut(peer).and_then(|connections| connections.iter_mut().find(|c| c.id == connection)) + } + + fn on_address_change(&mut self, AddressChange { peer_id, connection_id, new, .. }: AddressChange<'_>) { + let new_address = match new { + ConnectedPoint::Dialer { address, .. } => Some(address.clone()), + ConnectedPoint::Listener { .. } => None, + }; + let connections = + self.connected.get_mut(&peer_id).expect("Address change can only happen on an established connection."); + + let connection = connections + .iter_mut() + .find(|c| c.id == connection_id) + .expect("Address change can only happen on an established connection."); + connection.remote_address = new_address; + } + + fn on_connection_closed( + &mut self, + ConnectionClosed { peer_id, connection_id, remaining_established, .. }: ConnectionClosed<'_>, + ) { + let connections = + self.connected.get_mut(&peer_id).expect("Expected some established connection to peer before closing."); + + let connection = connections + .iter() + .position(|c| c.id == connection_id) + .map(|p: usize| connections.remove(p)) + .expect("Expected connection to be established before closing."); + + debug_assert_eq!(connections.is_empty(), remaining_established == 0); + if connections.is_empty() { + self.connected.remove(&peer_id); + } + + for request_id in connection.pending_inbound_response_streams { + self.pending_events.push_back(ToSwarm::GenerateEvent(Event::InboundFailure { + peer: peer_id, + request_id, + error: InboundFailure::ConnectionClosed, + })); + } + + for request_id in connection.pending_outbound_response_streams { + self.pending_events.push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { + peer: peer_id, + request_id, + error: OutboundFailure::ConnectionClosed, + })); + } + } + + fn on_dial_failure(&mut self, DialFailure { peer_id, .. }: DialFailure<'_>) { + if let Some(peer) = peer_id { + // If there are pending outgoing requests when a dial failure occurs, + // it is implied that we are not connected to the peer, since pending + // outgoing requests are drained when a connection is established and + // only created when a peer is not connected when a request is made. + // Thus these requests must be considered failed, even if there is + // another, concurrent dialing attempt ongoing. + if let Some(pending) = self.pending_outbound_requests.remove(&peer) { + for request in pending { + self.pending_events.push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { + peer, + request_id: request.request_id, + error: OutboundFailure::DialFailure, + })); + } + } + } + } + + /// Preloads a new [`Handler`] with requests that are waiting to be sent to + /// the newly connected peer. + fn preload_new_handler( + &mut self, + handler: &mut Handler, + peer: PeerId, + connection_id: ConnectionId, + remote_address: Option, + ) { + let mut connection = Connection::new(connection_id, remote_address); + + if let Some(pending_requests) = self.pending_outbound_requests.remove(&peer) { + for request in pending_requests { + connection.pending_outbound_response_streams.insert(request.request_id); + handler.on_behaviour_event(request); + } + } + + self.connected.entry(peer).or_default().push(connection); + } +} + +impl NetworkBehaviour for Behaviour +where + TCodec: Codec + Send + Clone + 'static, +{ + type ConnectionHandler = Handler; + type ToSwarm = Event; + + fn handle_established_inbound_connection( + &mut self, + connection_id: ConnectionId, + peer: PeerId, + _: &Multiaddr, + _: &Multiaddr, + ) -> Result, ConnectionDenied> { + let mut handler = Handler::new( + self.protocols.clone(), + self.codec.clone(), + self.config.request_timeout, + self.next_inbound_request_id.clone(), + self.config.max_concurrent_streams, + ); + + self.preload_new_handler(&mut handler, peer, connection_id, None); + + Ok(handler) + } + + fn handle_pending_outbound_connection( + &mut self, + _connection_id: ConnectionId, + maybe_peer: Option, + _addresses: &[Multiaddr], + _effective_role: Endpoint, + ) -> Result, ConnectionDenied> { + let peer = match maybe_peer { + None => return Ok(vec![]), + Some(peer) => peer, + }; + + let mut addresses = Vec::new(); + if let Some(connections) = self.connected.get(&peer) { + addresses.extend(connections.iter().filter_map(|c| c.remote_address.clone())) + } + + Ok(addresses) + } + + fn handle_established_outbound_connection( + &mut self, + connection_id: ConnectionId, + peer: PeerId, + remote_address: &Multiaddr, + _: Endpoint, + _: PortUse, + ) -> Result, ConnectionDenied> { + let mut handler = Handler::new( + self.protocols.clone(), + self.codec.clone(), + self.config.request_timeout, + self.next_inbound_request_id.clone(), + self.config.max_concurrent_streams, + ); + + self.preload_new_handler(&mut handler, peer, connection_id, Some(remote_address.clone())); + + Ok(handler) + } + + fn on_swarm_event(&mut self, event: FromSwarm<'_>) { + match event { + FromSwarm::ConnectionEstablished(_) => {} + FromSwarm::ConnectionClosed(connection_closed) => self.on_connection_closed(connection_closed), + FromSwarm::AddressChange(address_change) => self.on_address_change(address_change), + FromSwarm::DialFailure(dial_failure) => self.on_dial_failure(dial_failure), + _ => {} + } + } + + fn on_connection_handler_event(&mut self, peer: PeerId, connection: ConnectionId, event: THandlerOutEvent) { + match event { + handler::Event::OutboundRequestSentAwaitingResponses { request_id, receiver } => { + let removed = self.remove_pending_outbound_response_stream(&peer, connection, request_id); + debug_assert!(removed, "Expect request_id to be pending before getting the response channel.",); + + self.pending_events.push_back(ToSwarm::GenerateEvent(Event::OutboundRequestSentAwaitingResponses { + peer, + request_id, + channel: receiver, + })); + } + handler::Event::InboundRequest { request_id, request, sender } => { + match self.get_connection_mut(&peer, connection) { + Some(connection) => { + let inserted = connection.pending_inbound_response_streams.insert(request_id); + debug_assert!(inserted, "Expect id of new request to be unknown."); + + self.pending_events.push_back(ToSwarm::GenerateEvent(Event::InboundRequest { + peer, + request_id, + request, + channel: sender, + })) + } + None => { + tracing::debug!( + "Connection ({connection}) closed after `Event::Request` ({request_id}) \ + has been emitted." + ); + } + } + } + handler::Event::OutboundResponseStreamClosed(request_id) => { + let removed = self.remove_pending_inbound_response_stream(&peer, connection, request_id); + + debug_assert!(removed, "Expect request_id to be pending before response is sent."); + + self.pending_events + .push_back(ToSwarm::GenerateEvent(Event::OutboundResponseStreamClosed { peer, request_id })); + } + handler::Event::InboundResponseStreamClosed(request_id) => { + let removed = self.remove_pending_outbound_response_stream(&peer, connection, request_id); + + debug_assert!( + !removed, + "Expect request_id to have been removed from pending because the response \ + channel has already been available." + ); + + self.pending_events + .push_back(ToSwarm::GenerateEvent(Event::InboundResponseStreamClosed { peer, request_id })); + } + handler::Event::OutboundTimeout(request_id) => { + self.remove_pending_outbound_response_stream(&peer, connection, request_id); + + self.pending_events.push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { + peer, + request_id, + error: OutboundFailure::Timeout, + })); + } + handler::Event::OutboundUnsupportedProtocols(request_id) => { + let removed = self.remove_pending_outbound_response_stream(&peer, connection, request_id); + debug_assert!(removed, "Expect request_id to be pending before failing to connect.",); + + self.pending_events.push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { + peer, + request_id, + error: OutboundFailure::UnsupportedProtocols, + })); + } + handler::Event::OutboundStreamFailed { request_id, error } => { + self.remove_pending_outbound_response_stream(&peer, connection, request_id); + + self.pending_events.push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { + peer, + request_id, + error: OutboundFailure::Io(error), + })) + } + handler::Event::InboundTimeout(request_id) => { + let removed = self.remove_pending_inbound_response_stream(&peer, connection, request_id); + + if removed { + self.pending_events.push_back(ToSwarm::GenerateEvent(Event::InboundFailure { + peer, + request_id, + error: InboundFailure::Timeout, + })); + } else { + // This happens when timeout is emitted before `read_request` finishes. + tracing::debug!("Inbound request timeout for an unknown request_id ({request_id})"); + } + } + handler::Event::InboundStreamFailed { request_id, error } => { + let removed = self.remove_pending_inbound_response_stream(&peer, connection, request_id); + + if removed { + self.pending_events.push_back(ToSwarm::GenerateEvent(Event::InboundFailure { + peer, + request_id, + error: InboundFailure::Io(error), + })); + } else { + // This happens when `read_request` fails. + tracing::debug!( + "Inbound failure is reported for an unknown request_id ({request_id}): \ + {error}" + ); + } + } + } + } + + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { + if let Some(ev) = self.pending_events.pop_front() { + return Poll::Ready(ev); + } else if self.pending_events.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD { + self.pending_events.shrink_to_fit(); + } + + Poll::Pending + } +} + +/// Internal threshold for when to shrink the capacity +/// of empty queues. If the capacity of an empty queue +/// exceeds this threshold, the associated memory is +/// released. +const EMPTY_QUEUE_SHRINK_THRESHOLD: usize = 100; + +/// Internal information tracked for an established connection. +struct Connection { + id: ConnectionId, + remote_address: Option, + /// Pending outbound responses where corresponding inbound requests have + /// been received on this connection and emitted via `poll` but have not yet + /// been answered. + pending_outbound_response_streams: HashSet, + /// Pending inbound responses for previously sent requests on this + /// connection. + pending_inbound_response_streams: HashSet, +} + +impl Connection { + fn new(id: ConnectionId, remote_address: Option) -> Self { + Self { + id, + remote_address, + pending_outbound_response_streams: Default::default(), + pending_inbound_response_streams: Default::default(), + } + } +} diff --git a/crates/client/p2p_stream/tests/error_reporting.rs b/crates/client/p2p_stream/tests/error_reporting.rs new file mode 100644 index 000000000..23a578970 --- /dev/null +++ b/crates/client/p2p_stream/tests/error_reporting.rs @@ -0,0 +1,356 @@ +use std::io; +use std::time::Duration; + +use futures::prelude::*; +use libp2p_swarm_test::SwarmExt; +use p2p_stream::{InboundFailure, OutboundFailure}; + +pub mod utils; + +use utils::{ + new_swarm, new_swarm_with_timeout, wait_inbound_failure, wait_inbound_request, wait_inbound_response_stream_closed, + wait_no_events, wait_outbound_failure, wait_outbound_request_sent_awaiting_responses, Action, +}; + +#[tokio::test] +async fn report_outbound_failure_on_read_response_failure() { + let _ = tracing_subscriber::fmt().with_test_writer().try_init(); + + let (peer1_id, mut swarm1) = new_swarm(); + let (peer2_id, mut swarm2) = new_swarm(); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + let server_task = async move { + let (peer, req_id, action, mut resp_channel) = wait_inbound_request(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(action, Action::FailOnReadResponse); + + resp_channel.send(Action::FailOnReadResponse).await.unwrap(); + + // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` + // instead Wait for swarm2 disconnecting + let (peer, req_id_done, error) = wait_inbound_failure(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(req_id_done, req_id); + assert!(matches!(error, InboundFailure::ConnectionClosed)); + }; + + let client_task = async move { + let req_id = swarm2.behaviour_mut().send_request(&peer1_id, Action::FailOnReadResponse); + + let (peer, req_id_done, mut resp_channel) = + wait_outbound_request_sent_awaiting_responses(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + assert!( + matches!(resp_channel.next().await, Some(Err(x)) if x.kind() == io::ErrorKind::Other && x.to_string() == "FailOnReadResponse") + ); + + let (peer, req_id_done, error) = wait_outbound_failure(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + let error = match error { + OutboundFailure::Io(e) => e, + e => panic!("Unexpected error: {e:?}"), + }; + + assert_eq!(error.kind(), io::ErrorKind::Other); + assert_eq!(error.into_inner().unwrap().to_string(), "FailOnReadResponse"); + }; + + // Make sure both run to completion + tokio::join!(server_task, client_task); +} + +#[tokio::test] +async fn report_outbound_failure_on_write_request_failure() { + let _ = tracing_subscriber::fmt().with_test_writer().try_init(); + + let (peer1_id, mut swarm1) = new_swarm(); + let (_peer2_id, mut swarm2) = new_swarm(); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + // Expects no events because `Event::Request` is produced after `read_request`. + // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` + // instead. + let server_task = async move { + wait_no_events(&mut swarm1).await; + }; + + let client_task = async move { + let req_id = swarm2.behaviour_mut().send_request(&peer1_id, Action::FailOnWriteRequest); + + let (peer, req_id_done, error) = wait_outbound_failure(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + let error = match error { + OutboundFailure::Io(e) => e, + e => panic!("Unexpected error: {e:?}"), + }; + + assert_eq!(error.kind(), io::ErrorKind::Other); + assert_eq!(error.into_inner().unwrap().to_string(), "FailOnWriteRequest"); + }; + + // Server should always "outrun" the client + tokio::spawn(server_task); + + // Make sure client runs to completion + client_task.await; +} + +#[tokio::test] +async fn report_outbound_timeout_on_read_response_timeout() { + let _ = tracing_subscriber::fmt().with_test_writer().try_init(); + + // `swarm1` needs to have a bigger timeout to avoid racing + let (peer1_id, mut swarm1) = new_swarm_with_timeout(Duration::from_millis(200)); + let (peer2_id, mut swarm2) = new_swarm_with_timeout(Duration::from_millis(100)); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + let server_task = async move { + let (peer, req_id, action, mut resp_tx) = wait_inbound_request(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(action, Action::TimeoutOnReadResponse); + + resp_tx.send(Action::TimeoutOnReadResponse).await.unwrap(); + + let (peer, req_id_done, error) = wait_inbound_failure(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(req_id_done, req_id); + assert!(matches!(error, InboundFailure::ConnectionClosed)); + }; + + let client_task = async move { + let req_id = swarm2.behaviour_mut().send_request(&peer1_id, Action::TimeoutOnReadResponse); + + let (peer, req_id_done, mut resp_rx) = + wait_outbound_request_sent_awaiting_responses(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + assert!(resp_rx.next().await.is_none()); + + let (peer, req_id_done, error) = wait_outbound_failure(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + assert!(matches!(error, OutboundFailure::Timeout)); + }; + + // Make sure both run to completion + tokio::join!(server_task, client_task); +} + +#[tokio::test] +async fn report_inbound_closure_on_read_request_failure() { + let _ = tracing_subscriber::fmt().with_test_writer().try_init(); + + let (peer1_id, mut swarm1) = new_swarm(); + let (_peer2_id, mut swarm2) = new_swarm(); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + // Expects no events because `Event::IncomingRequest` is produced after + // `read_request`. Keep the connection alive, otherwise swarm2 may receive + // `ConnectionClosed` instead. + let server_task = async move { + wait_no_events(&mut swarm1).await; + }; + + let client_task = async move { + let req_id = swarm2.behaviour_mut().send_request(&peer1_id, Action::FailOnReadRequest); + + let (peer, req_id_done, mut resp_rx) = + wait_outbound_request_sent_awaiting_responses(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + assert!(resp_rx.next().await.is_none()); + + let (peer, req_id_done) = wait_inbound_response_stream_closed(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + }; + + // Server should always "outrun" the client + tokio::spawn(server_task); + + // Make sure client runs to completion + client_task.await; +} + +#[tokio::test] +async fn report_inbound_failure_on_write_response_failure() { + let _ = tracing_subscriber::fmt().with_test_writer().try_init(); + + let (peer1_id, mut swarm1) = new_swarm(); + let (peer2_id, mut swarm2) = new_swarm(); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + let server_task = async move { + let (peer, req_id, action, mut resp_tx) = wait_inbound_request(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(action, Action::FailOnWriteResponse); + + resp_tx.send(Action::FailOnWriteResponse).await.unwrap(); + + let (peer, req_id_done, error) = wait_inbound_failure(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(req_id_done, req_id); + + let error = match error { + InboundFailure::Io(e) => e, + e => panic!("Unexpected error: {e:?}"), + }; + + assert_eq!(error.kind(), io::ErrorKind::Other); + assert_eq!(error.into_inner().unwrap().to_string(), "FailOnWriteResponse"); + }; + + let client_task = async move { + let req_id = swarm2.behaviour_mut().send_request(&peer1_id, Action::FailOnWriteResponse); + + let (peer, req_id_done, mut resp_rx) = + wait_outbound_request_sent_awaiting_responses(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + assert!(resp_rx.next().await.is_none()); + + // We cannot know if writing response failed or there was no response written at + // all. + wait_inbound_response_stream_closed(&mut swarm2).await.unwrap(); + }; + + // Make sure both run to completion + tokio::join!(client_task, server_task); +} + +#[tokio::test] +async fn report_inbound_timeout_on_write_response_timeout() { + let _ = tracing_subscriber::fmt().with_test_writer().try_init(); + + // `swarm2` needs to have a bigger timeout to avoid racing + let (peer1_id, mut swarm1) = new_swarm_with_timeout(Duration::from_millis(100)); + let (peer2_id, mut swarm2) = new_swarm_with_timeout(Duration::from_millis(200)); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + let server_task = async move { + let (peer, req_id, action, mut resp_channel) = wait_inbound_request(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(action, Action::TimeoutOnWriteResponse); + + resp_channel.send(Action::TimeoutOnWriteResponse).await.unwrap(); + + let (peer, req_id_done, error) = wait_inbound_failure(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(req_id_done, req_id); + assert!(matches!(error, InboundFailure::Timeout)); + }; + + let client_task = async move { + let req_id = swarm2.behaviour_mut().send_request(&peer1_id, Action::TimeoutOnWriteResponse); + + let (peer, req_id_done, mut resp_channel) = + wait_outbound_request_sent_awaiting_responses(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + assert!(resp_channel.next().await.is_none()); + + let (peer, req_id_done) = wait_inbound_response_stream_closed(&mut swarm2).await.unwrap(); + + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + }; + + // Make sure both run to completion + tokio::join!(client_task, server_task); +} + +#[tokio::test] +async fn report_outbound_timeout_on_write_request_timeout() { + let _ = tracing_subscriber::fmt().with_test_writer().try_init(); + + // `swarm1` needs to have a bigger timeout to avoid racing + let (peer1_id, mut swarm1) = new_swarm_with_timeout(Duration::from_millis(200)); + let (_peer2_id, mut swarm2) = new_swarm_with_timeout(Duration::from_millis(100)); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + // Expects no events because `Event::Request` is produced after `read_request`. + // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` + // instead. + let server_task = async move { + wait_no_events(&mut swarm1).await; + }; + + let client_task = async move { + let req_id = swarm2.behaviour_mut().send_request(&peer1_id, Action::TimeoutOnWriteRequest); + + let (peer, req_id_done, error) = wait_outbound_failure(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + assert!(matches!(error, OutboundFailure::Timeout)); + }; + + // Server should always "outrun" the client + tokio::spawn(server_task); + + // Make sure client runs to completion + client_task.await; +} + +#[tokio::test] +async fn report_outbound_timeout_on_read_request_timeout() { + let _ = tracing_subscriber::fmt().with_test_writer().try_init(); + + // `swarm2` needs to have a bigger timeout to avoid racing + let (peer1_id, mut swarm1) = new_swarm_with_timeout(Duration::from_millis(200)); + let (_peer2_id, mut swarm2) = new_swarm_with_timeout(Duration::from_millis(100)); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + let server_task = async move { + wait_no_events(&mut swarm1).await; + }; + + let client_task = async move { + let req_id = swarm2.behaviour_mut().send_request(&peer1_id, Action::TimeoutOnReadRequest); + + let (peer, req_id_done, mut resp_channel) = + wait_outbound_request_sent_awaiting_responses(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + assert!(resp_channel.next().await.is_none()); + + let (peer, req_id_done, error) = wait_outbound_failure(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + assert!(matches!(error, OutboundFailure::Timeout)); + }; + + // Server should always "outrun" the client + tokio::spawn(server_task); + + // Make sure client runs to completion + client_task.await; +} diff --git a/crates/client/p2p_stream/tests/sanity.rs b/crates/client/p2p_stream/tests/sanity.rs new file mode 100644 index 000000000..1b22a8345 --- /dev/null +++ b/crates/client/p2p_stream/tests/sanity.rs @@ -0,0 +1,112 @@ +use futures::prelude::*; +use libp2p::PeerId; +use libp2p_swarm_test::SwarmExt; +use rstest::rstest; +use std::time::Duration; + +pub mod utils; + +use utils::{ + new_swarm_with_timeout, wait_inbound_request, wait_inbound_response_stream_closed, + wait_outbound_request_sent_awaiting_responses, wait_outbound_response_stream_closed, Action, TestSwarm, +}; + +struct Requester { + peer_id: PeerId, + swarm: TestSwarm, +} + +struct Responder { + peer_id: PeerId, + swarm: TestSwarm, +} + +struct Scenario { + requester: Requester, + responder: Responder, +} + +// peer1 is the server, peer2 is the client +async fn setup() -> (PeerId, TestSwarm, PeerId, TestSwarm) { + let (srv_peer_id, mut srv_swarm) = new_swarm_with_timeout(Duration::from_secs(10)); + let (cli_peer_id, mut cli_swarm) = new_swarm_with_timeout(Duration::from_secs(10)); + + srv_swarm.listen().with_memory_addr_external().await; + cli_swarm.connect(&mut srv_swarm).await; + + (srv_peer_id, srv_swarm, cli_peer_id, cli_swarm) +} + +async fn client_request_to_server() -> Scenario { + let (srv_peer_id, srv_swarm, cli_peer_id, cli_swarm) = setup().await; + + Scenario { + requester: Requester { peer_id: cli_peer_id, swarm: cli_swarm }, + responder: Responder { peer_id: srv_peer_id, swarm: srv_swarm }, + } +} + +async fn server_request_to_client() -> Scenario { + let (srv_peer_id, srv_swarm, cli_peer_id, cli_swarm) = setup().await; + + Scenario { + requester: Requester { peer_id: srv_peer_id, swarm: srv_swarm }, + responder: Responder { peer_id: cli_peer_id, swarm: cli_swarm }, + } +} + +#[rstest] +#[case::client_request_to_server(client_request_to_server())] +#[case::server_request_to_client(server_request_to_client())] +#[tokio::test] + +async fn sanity( + #[values(0, 1, 322)] num_responses: usize, + #[case] + #[future] + scenario: Scenario, +) { + let _ = tracing_subscriber::fmt().with_test_writer().try_init(); + + let Scenario { mut requester, mut responder } = scenario.await; + + let responder_task = async move { + let (peer, req_id, action, mut resp_tx) = wait_inbound_request(&mut responder.swarm).await.unwrap(); + + assert_eq!(peer, requester.peer_id); + assert_eq!(action, Action::SanityRequest); + + for i in 0..num_responses { + resp_tx.send(Action::SanityResponse(i as u32)).await.unwrap(); + } + + // Force close the stream + drop(resp_tx); + + let (peer, req_id_done) = wait_outbound_response_stream_closed(&mut responder.swarm).await.unwrap(); + + assert_eq!(peer, requester.peer_id); + assert_eq!(req_id_done, req_id); + }; + + let requester_task = async move { + let req_id = requester.swarm.behaviour_mut().send_request(&responder.peer_id, Action::SanityRequest); + + let (peer, req_id_done, mut resp_rx) = + wait_outbound_request_sent_awaiting_responses(&mut requester.swarm).await.unwrap(); + + assert_eq!(peer, responder.peer_id); + assert_eq!(req_id_done, req_id); + + for i in 0..num_responses { + assert_eq!(resp_rx.next().await.unwrap().unwrap(), Action::SanityResponse(i as u32)); + } + + let (peer, req_id_done) = wait_inbound_response_stream_closed(&mut requester.swarm).await.unwrap(); + + assert_eq!(peer, responder.peer_id); + assert_eq!(req_id_done, req_id); + }; + + tokio::join!(responder_task, requester_task); +} diff --git a/crates/client/p2p_stream/tests/utils/mod.rs b/crates/client/p2p_stream/tests/utils/mod.rs new file mode 100644 index 000000000..530cd71c2 --- /dev/null +++ b/crates/client/p2p_stream/tests/utils/mod.rs @@ -0,0 +1,282 @@ +//! Common utilities for p2p_stream integration tests. +use std::fmt::Debug; +use std::time::Duration; +use std::{io, iter}; + +use anyhow::{bail, Result}; +use async_trait::async_trait; +use futures::channel::mpsc; +use futures::prelude::*; +use libp2p::core::transport::MemoryTransport; +use libp2p::core::upgrade::Version; +use libp2p::identity::{Keypair, PeerId}; +use libp2p::swarm::{self, NetworkBehaviour, StreamProtocol, Swarm}; +use libp2p::{yamux, Transport}; +use p2p_stream::{Codec, InboundFailure, InboundRequestId, OutboundFailure, OutboundRequestId}; + +#[derive(Clone, Default)] +pub struct TestCodec; + +pub type TestSwarm = Swarm>; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum Action { + FailOnReadRequest, + FailOnReadResponse, + TimeoutOnReadResponse, + FailOnWriteRequest, + FailOnWriteResponse, + TimeoutOnWriteResponse, + SanityRequest, + SanityResponse(u32), // The highest byte is ignored + TimeoutOnWriteRequest, + TimeoutOnReadRequest, +} + +impl From for u32 { + fn from(value: Action) -> Self { + match value { + Action::FailOnReadRequest => 0, + Action::FailOnReadResponse => 1, + Action::TimeoutOnReadResponse => 2, + Action::FailOnWriteRequest => 3, + Action::FailOnWriteResponse => 4, + Action::TimeoutOnWriteResponse => 5, + Action::SanityRequest => 6, + Action::SanityResponse(id) => 7 | ((id & 0x00FFFFFF) << 8), + Action::TimeoutOnWriteRequest => 8, + Action::TimeoutOnReadRequest => 9, + } + } +} + +impl TryFrom for Action { + type Error = io::Error; + + fn try_from(value: u32) -> Result { + match value & 0x000000FF { + 0 => Ok(Action::FailOnReadRequest), + 1 => Ok(Action::FailOnReadResponse), + 2 => Ok(Action::TimeoutOnReadResponse), + 3 => Ok(Action::FailOnWriteRequest), + 4 => Ok(Action::FailOnWriteResponse), + 5 => Ok(Action::TimeoutOnWriteResponse), + 6 => Ok(Action::SanityRequest), + 7 => Ok(Action::SanityResponse((value & 0xFFFFFF00) >> 8)), + 8 => Ok(Action::TimeoutOnWriteRequest), + 9 => Ok(Action::TimeoutOnReadRequest), + _ => Err(io::Error::new(io::ErrorKind::Other, "invalid action")), + } + } +} + +#[async_trait] +impl Codec for TestCodec { + type Protocol = StreamProtocol; + type Request = Action; + type Response = Action; + + async fn read_request(&mut self, _protocol: &Self::Protocol, io: &mut T) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + let mut buf = [0u8; std::mem::size_of::()]; + + io.read_exact(&mut buf).await?; + + match u32::from_be_bytes(buf).try_into()? { + Action::FailOnReadRequest => Err(io::Error::new(io::ErrorKind::Other, "FailOnReadRequest")), + Action::TimeoutOnReadRequest => loop { + tokio::time::sleep(Duration::MAX).await; + }, + action => Ok(action), + } + } + + async fn read_response(&mut self, _protocol: &Self::Protocol, io: &mut T) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + let mut buf = [0u8; std::mem::size_of::()]; + + io.read_exact(&mut buf).await?; + + match u32::from_be_bytes(buf).try_into()? { + Action::FailOnReadResponse => Err(io::Error::new(io::ErrorKind::Other, "FailOnReadResponse")), + Action::TimeoutOnReadResponse => loop { + tokio::time::sleep(Duration::MAX).await; + }, + action => Ok(action), + } + } + + async fn write_request(&mut self, _protocol: &Self::Protocol, io: &mut T, req: Self::Request) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + match req { + Action::FailOnWriteRequest => Err(io::Error::new(io::ErrorKind::Other, "FailOnWriteRequest")), + Action::TimeoutOnWriteRequest => loop { + tokio::time::sleep(Duration::MAX).await; + }, + action => { + let bytes = u32::from(action).to_be_bytes(); + io.write_all(&bytes).await?; + Ok(()) + } + } + } + + async fn write_response(&mut self, _protocol: &Self::Protocol, io: &mut T, res: Self::Response) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + match res { + Action::FailOnWriteResponse => Err(io::Error::new(io::ErrorKind::Other, "FailOnWriteResponse")), + Action::TimeoutOnWriteResponse => loop { + tokio::time::sleep(Duration::MAX).await; + }, + action => { + let bytes = u32::from(action).to_be_bytes(); + io.write_all(&bytes).await?; + Ok(()) + } + } + } +} + +/// [`SwarmExt::new_ephemeral`] uses `async_std` executor, but we're using +/// `tokio` +pub(crate) fn new_ephemeral_with_tokio_executor(behaviour_fn: impl FnOnce(Keypair) -> B) -> Swarm +where + B: NetworkBehaviour + Send, + ::ToSwarm: Debug, +{ + let identity = Keypair::generate_ed25519(); + let peer_id = PeerId::from(identity.public()); + + let transport = MemoryTransport::default() + .or_transport(libp2p::tcp::tokio::Transport::default()) + .upgrade(Version::V1) + .authenticate(libp2p_plaintext::Config::new(&identity)) + .multiplex(yamux::Config::default()) + .timeout(Duration::from_secs(20)) + .boxed(); + + Swarm::new( + transport, + behaviour_fn(identity), + peer_id, + swarm::Config::with_tokio_executor().with_idle_connection_timeout(Duration::from_secs(5)), /* Some tests need connections to be kept alive beyond what the individual behaviour configures., */ + ) +} + +pub fn new_swarm_with_timeout(timeout: Duration) -> (PeerId, Swarm>) { + let protocols = iter::once(StreamProtocol::new("/test/1")); + let cfg = p2p_stream::Config::default().request_timeout(timeout); + + // SwarmExt::new_ephemeral uses async::std + let swarm = new_ephemeral_with_tokio_executor(|_| { + p2p_stream::Behaviour::::with_codec_and_protocols(TestCodec, protocols, cfg) + }); + + let peed_id = *swarm.local_peer_id(); + + (peed_id, swarm) +} + +pub fn new_swarm() -> (PeerId, Swarm>) { + new_swarm_with_timeout(Duration::from_millis(100)) +} + +pub async fn wait_no_events(swarm: &mut Swarm>) { + loop { + if let Ok(ev) = swarm.select_next_some().await.try_into_behaviour_event() { + panic!("Unexpected event: {ev:?}") + } + } +} + +pub async fn wait_inbound_request( + swarm: &mut Swarm>, +) -> Result<(PeerId, InboundRequestId, Action, mpsc::Sender)> { + loop { + match swarm.select_next_some().await.try_into_behaviour_event() { + Ok(p2p_stream::Event::InboundRequest { peer, request_id, request, channel }) => { + return Ok((peer, request_id, request, channel)); + } + Ok(ev) => bail!("Unexpected event: {ev:?}"), + Err(..) => {} + } + } +} + +pub async fn wait_outbound_request_sent_awaiting_responses( + swarm: &mut Swarm>, +) -> Result<(PeerId, OutboundRequestId, mpsc::Receiver>)> { + loop { + match swarm.select_next_some().await.try_into_behaviour_event() { + Ok(p2p_stream::Event::OutboundRequestSentAwaitingResponses { peer, request_id, channel }) => { + return Ok((peer, request_id, channel)); + } + Ok(ev) => bail!("Unexpected event: {ev:?}"), + Err(..) => {} + } + } +} + +pub async fn wait_outbound_response_stream_closed( + swarm: &mut Swarm>, +) -> Result<(PeerId, InboundRequestId)> { + loop { + match swarm.select_next_some().await.try_into_behaviour_event() { + Ok(p2p_stream::Event::OutboundResponseStreamClosed { peer, request_id, .. }) => { + return Ok((peer, request_id)); + } + Ok(ev) => bail!("Unexpected event: {ev:?}"), + Err(..) => {} + } + } +} + +pub async fn wait_inbound_response_stream_closed( + swarm: &mut Swarm>, +) -> Result<(PeerId, OutboundRequestId)> { + loop { + match swarm.select_next_some().await.try_into_behaviour_event() { + Ok(p2p_stream::Event::InboundResponseStreamClosed { peer, request_id, .. }) => { + return Ok((peer, request_id)); + } + Ok(ev) => bail!("Unexpected event: {ev:?}"), + Err(..) => {} + } + } +} + +pub async fn wait_inbound_failure( + swarm: &mut Swarm>, +) -> Result<(PeerId, InboundRequestId, InboundFailure)> { + loop { + match swarm.select_next_some().await.try_into_behaviour_event() { + Ok(p2p_stream::Event::InboundFailure { peer, request_id, error }) => { + return Ok((peer, request_id, error)); + } + Ok(ev) => bail!("Unexpected event: {ev:?}"), + Err(..) => {} + } + } +} + +pub async fn wait_outbound_failure( + swarm: &mut Swarm>, +) -> Result<(PeerId, OutboundRequestId, OutboundFailure)> { + loop { + match swarm.select_next_some().await.try_into_behaviour_event() { + Ok(p2p_stream::Event::OutboundFailure { peer, request_id, error }) => { + return Ok((peer, request_id, error)); + } + Ok(ev) => bail!("Unexpected event: {ev:?}"), + Err(..) => {} + } + } +} diff --git a/crates/node/Cargo.toml b/crates/node/Cargo.toml index 2e7daf1ff..a622d6bed 100644 --- a/crates/node/Cargo.toml +++ b/crates/node/Cargo.toml @@ -28,6 +28,7 @@ mc-devnet = { workspace = true } mc-eth = { workspace = true } mc-gateway = { workspace = true } mc-mempool = { workspace = true } +mc-p2p = { workspace = true } mc-rpc = { workspace = true } mc-sync = { workspace = true } mc-telemetry = { workspace = true } @@ -53,7 +54,7 @@ futures = { workspace = true, features = ["thread-pool"] } governor.workspace = true hyper = { version = "0.14", features = ["server"] } jsonrpsee.workspace = true -log.workspace = true +multiaddr.workspace = true rand.workspace = true rayon.workspace = true reqwest.workspace = true diff --git a/crates/node/src/cli/chain_config_overrides.rs b/crates/node/src/cli/chain_config_overrides.rs index adb765836..4bf9ee955 100644 --- a/crates/node/src/cli/chain_config_overrides.rs +++ b/crates/node/src/cli/chain_config_overrides.rs @@ -15,6 +15,7 @@ use mp_chain_config::{ }; use mp_utils::parsers::parse_key_value_yaml; use mp_utils::serde::{deserialize_duration, deserialize_private_key, serialize_duration}; +use url::Url; /// Override chain config parameters. /// Format: "--chain-config-override chain_id=SN_MADARA,chain_name=MADARA,block_time=1500ms,bouncer_config.block_max_capacity.n_steps=100000000" @@ -28,6 +29,8 @@ pub struct ChainConfigOverrideParams { pub struct ChainConfigOverridesInner { pub chain_name: String, pub chain_id: ChainId, + pub feeder_gateway_url: Url, + pub gateway_url: Url, pub native_fee_token_address: ContractAddress, pub parent_fee_token_address: ContractAddress, #[serde(deserialize_with = "deserialize_starknet_version", serialize_with = "serialize_starknet_version")] @@ -46,6 +49,7 @@ pub struct ChainConfigOverridesInner { #[serde(skip_serializing)] #[serde(deserialize_with = "deserialize_private_key")] pub private_key: ZeroingPrivateKey, + pub p2p_bootstrap_nodes: Vec, } impl ChainConfigOverrideParams { @@ -66,6 +70,9 @@ impl ChainConfigOverrideParams { eth_core_contract_address: chain_config.eth_core_contract_address, eth_gps_statement_verifier: chain_config.eth_gps_statement_verifier, private_key: chain_config.private_key, + feeder_gateway_url: chain_config.feeder_gateway_url, + gateway_url: chain_config.gateway_url, + p2p_bootstrap_nodes: chain_config.p2p_bootstrap_nodes, }) .context("Failed to convert ChainConfig to Value")?; @@ -101,8 +108,8 @@ impl ChainConfigOverrideParams { Ok(ChainConfig { chain_name: chain_config_overrides.chain_name, chain_id: chain_config_overrides.chain_id, - feeder_gateway_url: chain_config.feeder_gateway_url, - gateway_url: chain_config.gateway_url, + feeder_gateway_url: chain_config_overrides.feeder_gateway_url, + gateway_url: chain_config_overrides.gateway_url, native_fee_token_address: chain_config_overrides.native_fee_token_address, parent_fee_token_address: chain_config_overrides.parent_fee_token_address, latest_protocol_version: chain_config_overrides.latest_protocol_version, @@ -115,6 +122,7 @@ impl ChainConfigOverrideParams { versioned_constants, eth_gps_statement_verifier: chain_config_overrides.eth_gps_statement_verifier, private_key: chain_config_overrides.private_key, + p2p_bootstrap_nodes: chain_config_overrides.p2p_bootstrap_nodes, }) } } diff --git a/crates/node/src/cli/mod.rs b/crates/node/src/cli/mod.rs index f08fc99b5..3306d0004 100644 --- a/crates/node/src/cli/mod.rs +++ b/crates/node/src/cli/mod.rs @@ -1,29 +1,32 @@ +use clap::ArgGroup; +use mp_chain_config::ChainConfig; +use starknet_api::core::ChainId; +use std::path::PathBuf; +use std::str::FromStr; +use std::sync::Arc; + pub mod analytics; pub mod block_production; pub mod chain_config_overrides; pub mod db; pub mod gateway; pub mod l1; +pub mod p2p; pub mod rpc; pub mod sync; pub mod telemetry; -use crate::cli::l1::L1SyncParams; -use analytics::AnalyticsParams; + +pub use analytics::*; pub use block_production::*; pub use chain_config_overrides::*; pub use db::*; pub use gateway::*; +pub use l1::*; +pub use p2p::*; pub use rpc::*; -use starknet_api::core::ChainId; -use std::str::FromStr; pub use sync::*; pub use telemetry::*; -use clap::ArgGroup; -use mp_chain_config::ChainConfig; -use std::path::PathBuf; -use std::sync::Arc; - /// Madara: High performance Starknet sequencer/full-node. #[derive(Clone, Debug, clap::Parser)] #[clap( @@ -79,6 +82,10 @@ pub struct RunCmd { #[clap(flatten)] pub rpc_params: RpcParams, + #[allow(missing_docs)] + #[clap(flatten)] + pub p2p_params: P2pParams, + #[allow(missing_docs)] #[clap(flatten)] pub block_production_params: BlockProductionParams, diff --git a/crates/node/src/cli/p2p.rs b/crates/node/src/cli/p2p.rs new file mode 100644 index 000000000..e80ba2094 --- /dev/null +++ b/crates/node/src/cli/p2p.rs @@ -0,0 +1,10 @@ +#[derive(Clone, Debug, clap::Args)] +pub struct P2pParams { + /// Enable the p2p service. + #[arg(env = "MADARA_P2P", long)] + pub p2p: bool, + + /// Port for peer-to-peer. By default, it will ask the os for an unused port. + #[arg(env = "MADARA_P2P_PORT", long)] + pub p2p_port: Option, +} diff --git a/crates/node/src/cli/rpc.rs b/crates/node/src/cli/rpc.rs index 63ded0ea5..375640e17 100644 --- a/crates/node/src/cli/rpc.rs +++ b/crates/node/src/cli/rpc.rs @@ -67,9 +67,10 @@ impl FromStr for Cors { #[derive(Clone, Debug, clap::Args)] pub struct RpcParams { - /// Disable the RPC server. - #[arg(env = "MADARA_RPC_DISABLED", long, alias = "no-rpc")] - pub rpc_disabled: bool, + /// Disables the user RPC endpoint. This includes all methods which are part + /// of the official starknet specs. + #[arg(env = "MADARA_RPC_DISABLE", long, default_value_t = false, alias = "no-rpc")] + pub rpc_disable: bool, /// Listen to all network interfaces. This usually means that the RPC server will be accessible externally. /// Please note that some endpoints should not be exposed to the outside world - by default, enabling remote access @@ -77,17 +78,12 @@ pub struct RpcParams { #[arg(env = "MADARA_RPC_EXTERNAL", long)] pub rpc_external: bool, - /// RPC methods to expose. - #[arg( - env = "MADARA_RPC_METHODS", - long, - value_name = "METHOD", - value_enum, - ignore_case = true, - default_value_t = RpcMethods::Auto, - verbatim_doc_comment - )] - pub rpc_methods: RpcMethods, + /// Enables the admin RPC endpoint. This includes additional RPC methods + /// which are not part of the official specs and can be used by node admins + /// to control their node at a distance. By default, this is exposed o + /// localhost. + #[arg(env = "MADARA_RPC_ADMIN", long, default_value_t = false)] + pub rpc_admin: bool, /// RPC rate limiting (calls/minute) for each connection. /// diff --git a/crates/node/src/cli/telemetry.rs b/crates/node/src/cli/telemetry.rs index aa1927d06..42d790582 100644 --- a/crates/node/src/cli/telemetry.rs +++ b/crates/node/src/cli/telemetry.rs @@ -4,7 +4,7 @@ use clap::Args; #[derive(Debug, Clone, Args)] pub struct TelemetryParams { /// Enable connecting to the Madara telemetry server. - #[arg(env = "MADARA_TELEMETRY", long, alias = "telemetry")] + #[arg(env = "MADARA_TELEMETRY", long)] pub telemetry: bool, /// The URL of the telemetry server. diff --git a/crates/node/src/main.rs b/crates/node/src/main.rs index dd23c0e56..b55ed436a 100644 --- a/crates/node/src/main.rs +++ b/crates/node/src/main.rs @@ -5,8 +5,6 @@ mod cli; mod service; mod util; -use std::sync::Arc; - use anyhow::Context; use clap::Parser; use cli::{NetworkType, RunCmd}; @@ -18,8 +16,9 @@ use mc_rpc::providers::{AddTransactionProvider, ForwardToProvider, MempoolAddTxP use mc_telemetry::{SysInfo, TelemetryService}; use mp_convert::ToFelt; use mp_utils::service::{Service, ServiceGroup}; -use service::{BlockProductionService, GatewayService, L1SyncService, RpcService, SyncService}; +use service::{BlockProductionService, GatewayService, L1SyncService, P2pService, RpcService, SyncService}; use starknet_providers::SequencerGatewayProvider; +use std::sync::Arc; const GREET_IMPL_NAME: &str = "Madara"; const GREET_SUPPORT_URL: &str = "https://github.com/madara-alliance/madara/issues"; @@ -171,9 +170,14 @@ async fn main() -> anyhow::Result<()> { let rpc_service = RpcService::new(&run_cmd.rpc_params, &db_service, Arc::clone(&rpc_add_txs_method_provider)) .context("Initializing rpc service")?; - let gateway_service = GatewayService::new(run_cmd.gateway_params, &db_service, rpc_add_txs_method_provider) + let gateway_service = + GatewayService::new(run_cmd.gateway_params, &db_service, Arc::clone(&rpc_add_txs_method_provider)) + .await + .context("Initializing gateway service")?; + + let p2p_service = P2pService::new(run_cmd.p2p_params, &db_service, Arc::clone(&rpc_add_txs_method_provider)) .await - .context("Initializing gateway service")?; + .context("Initializing p2p service")?; telemetry_service.send_connected(&node_name, node_version, &chain_config.chain_name, &sys_info); @@ -183,6 +187,7 @@ async fn main() -> anyhow::Result<()> { .with(block_provider_service) .with(rpc_service) .with(gateway_service) + .with(p2p_service) .with(telemetry_service); // Check if the devnet is running with the correct chain id. @@ -199,7 +204,6 @@ async fn main() -> anyhow::Result<()> { app.start_and_drive_to_end().await?; - tracing::info!("Shutting down analytics"); let _ = analytics.shutdown(); Ok(()) diff --git a/crates/node/src/service/l1.rs b/crates/node/src/service/l1.rs index f4c08f8e8..54b6188d1 100644 --- a/crates/node/src/service/l1.rs +++ b/crates/node/src/service/l1.rs @@ -61,7 +61,7 @@ impl L1SyncService { if gas_price_sync_enabled { let eth_client = eth_client .clone() - .context("L1 gas prices require the ethereum service to be enabled. Either disable gas prices syncing using `--gas-price 0`, or disable L1 sync using the `--no-l1-sync` argument.")?; + .context("L1 gas prices require the ethereum service to be enabled. Either disable gas prices syncing using `--gas-price 0` and `--blob-gas-price 0`, or enable L1 sync by removing the `--no-l1-sync` argument.")?; // running at-least once before the block production service tracing::info!("⏳ Getting initial L1 gas prices"); mc_eth::l1_gas_price::gas_price_worker_once(ð_client, l1_gas_provider.clone(), gas_price_poll) diff --git a/crates/node/src/service/mod.rs b/crates/node/src/service/mod.rs index 0f5c8d1b9..a78fade3c 100644 --- a/crates/node/src/service/mod.rs +++ b/crates/node/src/service/mod.rs @@ -1,11 +1,13 @@ mod block_production; mod gateway; mod l1; +mod p2p; mod rpc; mod sync; pub use block_production::BlockProductionService; pub use gateway::GatewayService; pub use l1::L1SyncService; +pub use p2p::P2pService; pub use rpc::RpcService; pub use sync::SyncService; diff --git a/crates/node/src/service/p2p.rs b/crates/node/src/service/p2p.rs new file mode 100644 index 000000000..ea1206ef3 --- /dev/null +++ b/crates/node/src/service/p2p.rs @@ -0,0 +1,46 @@ +use crate::cli::P2pParams; +use anyhow::Context; +use mc_db::{DatabaseService, MadaraBackend}; +use mc_rpc::providers::AddTransactionProvider; +use mp_utils::service::Service; +use std::{sync::Arc, time::Duration}; +use tokio::task::JoinSet; + +#[derive(Clone)] +pub struct P2pService { + config: P2pParams, + db_backend: Arc, + add_transaction_provider: Arc, +} + +impl P2pService { + pub async fn new( + config: P2pParams, + db: &DatabaseService, + add_transaction_provider: Arc, + ) -> anyhow::Result { + Ok(Self { config, db_backend: Arc::clone(db.backend()), add_transaction_provider }) + } +} + +#[async_trait::async_trait] +impl Service for P2pService { + async fn start(&mut self, join_set: &mut JoinSet>) -> anyhow::Result<()> { + if self.config.p2p { + let P2pService { db_backend, add_transaction_provider, config } = self.clone(); + + let config = mc_p2p::P2pConfig { + bootstrap_nodes: self.db_backend.chain_config().p2p_bootstrap_nodes.clone(), + port: config.p2p_port, + status_interval: Duration::from_secs(3), + }; + let mut p2p = + mc_p2p::MadaraP2p::new(config, db_backend, add_transaction_provider).context("Creating p2p service")?; + join_set.spawn(async move { + p2p.run().await?; + Ok(()) + }); + } + Ok(()) + } +} diff --git a/crates/node/src/service/rpc.rs b/crates/node/src/service/rpc.rs index 95b97d277..acdcf7867 100644 --- a/crates/node/src/service/rpc.rs +++ b/crates/node/src/service/rpc.rs @@ -10,7 +10,7 @@ use mp_utils::service::Service; use metrics::RpcMetrics; use server::{start_server, ServerConfig}; -use crate::cli::{RpcMethods, RpcParams}; +use crate::cli::RpcParams; mod metrics; mod middleware; @@ -26,22 +26,11 @@ impl RpcService { db: &DatabaseService, add_txs_method_provider: Arc, ) -> anyhow::Result { - if config.rpc_disabled { + if config.rpc_disable { return Ok(Self { server_config: None, server_handle: None }); } - let (rpcs, node_operator) = match (config.rpc_methods, config.rpc_external) { - (RpcMethods::Safe, _) => (true, false), - (RpcMethods::Unsafe, _) => (true, true), - (RpcMethods::Auto, false) => (true, true), - (RpcMethods::Auto, true) => { - tracing::warn!( - "Option `--rpc-external` will hide node operator endpoints. To enable them, please pass \ - `--rpc-methods unsafe`." - ); - (true, false) - } - }; + let (rpcs, node_operator) = (true, true); let (read, write, trace, internal, ws) = (rpcs, rpcs, rpcs, node_operator, rpcs); let starknet = Starknet::new(Arc::clone(db.backend()), add_txs_method_provider); let metrics = RpcMetrics::register()?; diff --git a/crates/node/src/service/sync.rs b/crates/node/src/service/sync.rs index de0513968..d86adda9e 100644 --- a/crates/node/src/service/sync.rs +++ b/crates/node/src/service/sync.rs @@ -32,7 +32,7 @@ impl SyncService { ) -> anyhow::Result { let fetch_config = config.block_fetch_config(chain_config.chain_id.clone(), chain_config.clone()); - tracing::info!("🛰️ Using feeder gateway URL: {}", fetch_config.feeder_gateway.as_str()); + tracing::info!("🛰️ Using feeder gateway URL: {}", fetch_config.feeder_gateway.as_str()); Ok(Self { db_backend: Arc::clone(db.backend()), diff --git a/crates/primitives/chain_config/Cargo.toml b/crates/primitives/chain_config/Cargo.toml index 0df76791b..2037648b0 100644 --- a/crates/primitives/chain_config/Cargo.toml +++ b/crates/primitives/chain_config/Cargo.toml @@ -26,12 +26,13 @@ mp-utils.workspace = true # Other anyhow.workspace = true lazy_static.workspace = true -log.workspace = true +multiaddr.workspace = true primitive-types.workspace = true serde = { workspace = true, features = ["derive"] } serde_json.workspace = true serde_yaml.workspace = true thiserror.workspace = true +tracing.workspace = true url.workspace = true [dev-dependencies] diff --git a/crates/primitives/chain_config/src/chain_config.rs b/crates/primitives/chain_config/src/chain_config.rs index b23298c74..af2907b8b 100644 --- a/crates/primitives/chain_config/src/chain_config.rs +++ b/crates/primitives/chain_config/src/chain_config.rs @@ -116,7 +116,7 @@ pub struct ChainConfig { /// The Starknet core contract address for the L1 watcher. pub eth_core_contract_address: H160, - /// The Starknet SHARP verifier La address. Check out the [docs](https://docs.starknet.io/architecture-and-concepts/solidity-verifier/) + /// The Starknet SHARP verifier L1 address. Check out the [docs](https://docs.starknet.io/architecture-and-concepts/solidity-verifier/) /// for more information pub eth_gps_statement_verifier: H160, @@ -130,6 +130,9 @@ pub struct ChainConfig { #[serde(skip_serializing)] #[serde(deserialize_with = "deserialize_private_key")] pub private_key: ZeroingPrivateKey, + + #[serde(default)] + pub p2p_bootstrap_nodes: Vec, } impl ChainConfig { @@ -236,6 +239,7 @@ impl ChainConfig { ), private_key: ZeroingPrivateKey::default(), + p2p_bootstrap_nodes: vec![], } } diff --git a/crates/primitives/chain_config/src/rpc_version.rs b/crates/primitives/chain_config/src/rpc_version.rs index f86c21d65..9239c5d7a 100644 --- a/crates/primitives/chain_config/src/rpc_version.rs +++ b/crates/primitives/chain_config/src/rpc_version.rs @@ -31,38 +31,38 @@ impl RpcVersion { } pub fn from_request_path(path: &str) -> Result { - log::debug!(target: "rpc_version", "extracting rpc version from request: {path}"); + tracing::debug!(target: "rpc_version", "extracting rpc version from request: {path}"); let path = path.to_ascii_lowercase(); let parts: Vec<&str> = path.split('/').collect(); - log::debug!(target: "rpc_version", "version parts are: {parts:?}"); + tracing::debug!(target: "rpc_version", "version parts are: {parts:?}"); // If we have an empty path or just "/", fallback to latest rpc version if parts.len() == 1 || (parts.len() == 2 && parts[1].is_empty()) { - log::debug!(target: "rpc_version", "no version, defaulting to latest"); + tracing::debug!(target: "rpc_version", "no version, defaulting to latest"); return Ok(Self::RPC_VERSION_LATEST); } // Check if the path follows the correct format, i.e. /rpc/v[version]. // If not, fallback to the latest version if parts.len() != 3 || parts[1] != "rpc" || !parts[2].starts_with('v') { - log::debug!(target: "rpc_version", "invalid version format, defaulting to latest"); + tracing::debug!(target: "rpc_version", "invalid version format, defaulting to latest"); return Ok(Self::RPC_VERSION_LATEST); } - log::debug!(target: "rpc_version", "looking for matching version..."); + tracing::debug!(target: "rpc_version", "looking for matching version..."); let version_str = &parts[2][1..]; // without the 'v' prefix if let Ok(version) = RpcVersion::from_str(version_str) { if SUPPORTED_RPC_VERSIONS.contains(&version) { - log::debug!(target: "rpc_version", "found matching version: {version}"); + tracing::debug!(target: "rpc_version", "found matching version: {version}"); Ok(version) } else { - log::debug!(target: "rpc_version", "no matching version"); + tracing::debug!(target: "rpc_version", "no matching version"); Err(RpcVersionError::UnsupportedVersion) } } else { - log::debug!(target: "rpc_version", "invalid version format: {version_str}"); + tracing::debug!(target: "rpc_version", "invalid version format: {version_str}"); Err(RpcVersionError::InvalidVersion) } } diff --git a/crates/primitives/convert/src/felt.rs b/crates/primitives/convert/src/felt.rs index 53fc86579..0557b6ec9 100644 --- a/crates/primitives/convert/src/felt.rs +++ b/crates/primitives/convert/src/felt.rs @@ -1,5 +1,56 @@ +use std::cmp::Ordering; + use starknet_types_core::felt::Felt; +#[derive(Debug, thiserror::Error)] +#[error("Malformated field element.")] +pub struct MalformatedFelt; + +pub trait FeltExt { + fn from_slice_be_checked(slice: &[u8]) -> Result; + fn from_bytes_checked(slice: &[u8; 32]) -> Result; +} + +impl FeltExt for Felt { + fn from_slice_be_checked(slice: &[u8]) -> Result { + if slice.len() > 32 { + return Err(MalformatedFelt); + } + + let mut unpacked = [0; 32]; + for i in 0..slice.len() { + unpacked[32 - slice.len() + i] = slice[i] + } + + Felt::from_bytes_checked(&unpacked) + } + + fn from_bytes_checked(b: &[u8; 32]) -> Result { + let limbs = [ + u64::from_be_bytes([b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]]), + u64::from_be_bytes([b[8], b[9], b[10], b[11], b[12], b[13], b[14], b[15]]), + u64::from_be_bytes([b[16], b[17], b[18], b[19], b[20], b[21], b[22], b[23]]), + u64::from_be_bytes([b[24], b[25], b[26], b[27], b[28], b[29], b[30], b[31]]), + ]; + + // Check if it overflows the modulus. + + // p=2^251 + 17*2^192 + 1 + const MODULUS_U64: [u64; 4] = [576460752303423505u64, 0, 0, 1]; + + for i in 0..4 { + match u64::cmp(&limbs[i], &MODULUS_U64[i]) { + Ordering::Less => break, + Ordering::Equal if i == 3 => return Err(MalformatedFelt), + Ordering::Equal => continue, + Ordering::Greater => return Err(MalformatedFelt), + } + } + + Ok(Felt::from_raw(limbs)) + } +} + #[derive(Debug, thiserror::Error)] #[error("Felt is too big to convert to u64.")] pub struct FeltToU64Error; diff --git a/crates/primitives/convert/src/lib.rs b/crates/primitives/convert/src/lib.rs index 425674078..08f11095e 100644 --- a/crates/primitives/convert/src/lib.rs +++ b/crates/primitives/convert/src/lib.rs @@ -2,7 +2,7 @@ mod felt; pub mod hex_serde; mod to_felt; -pub use felt::{felt_to_u128, felt_to_u64}; +pub use felt::{felt_to_u128, felt_to_u64, FeltExt}; pub use to_felt::{DisplayFeltAsHex, FeltHexDisplay, ToFelt}; pub mod test {