diff --git a/.github/actions/build-dependencies/action.yml b/.github/actions/build-dependencies/action.yml index ee57ff9db..b1d0eacf1 100644 --- a/.github/actions/build-dependencies/action.yml +++ b/.github/actions/build-dependencies/action.yml @@ -28,8 +28,6 @@ runs: sudo apt remove -y "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*" sudo apt remove -y "*qemu*" "*sql*" "*texinfo*" "*imagemagick*" sudo apt autoremove -y - sudo apt update - sudo apt install python3 python3-pip -y sudo apt clean docker system prune -a --volumes @@ -41,9 +39,9 @@ runs: - name: Install solc shell: bash run: | - pip3 install solc-select==0.2.1 - solc-select install 0.8.16 - solc-select use 0.8.16 + cargo install svm-rs + svm install 0.8.16 + svm use 0.8.16 - name: Install Rust uses: dtolnay/rust-toolchain@5cb429dd810e16ff67df78472fa81cf760f4d1c0 diff --git a/Cargo.lock b/Cargo.lock index 86d7cc217..54d2b3b73 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -96,9 +96,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea5d730647d4fadd988536d06fecce94b7b4f2a7efdae548f1cf4b63205518ab" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" dependencies = [ "memchr", ] @@ -135,9 +135,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.5.0" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f58811cfac344940f1a400b6e6231ce35171f614f26439e80f8c1465c5cc0c" +checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" dependencies = [ "anstyle", "anstyle-parse", @@ -149,15 +149,15 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84bf0a05bbb2a83e5eb6fa36bb6e87baa08193c35ff52bbf6b38d8af2890e46" +checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" [[package]] name = "anstyle-parse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" +checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" dependencies = [ "utf8parse", ] @@ -173,9 +173,9 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "2.1.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58f54d10c6dfa51283a066ceab3ec1ab78d13fae00aa49243a45e4571fb79dfd" +checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" dependencies = [ "anstyle", "windows-sys", @@ -198,9 +198,9 @@ dependencies = [ [[package]] name = "arbitrary" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d098ff73c1ca148721f37baad5ea6a465a13f9573aba8641fbbbae8164a54e" +checksum = "a2e1373abdaa212b704512ec2bd8b26bd0b7d5c3f70117411a5d9a451383c859" [[package]] name = "array-bytes" @@ -293,7 +293,7 @@ dependencies = [ "log", "parking", "polling", - "rustix 0.37.23", + "rustix 0.37.25", "slab", "socket2 0.4.9", "waker-fn", @@ -310,13 +310,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.73" +version = "0.1.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" +checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -464,7 +464,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -543,9 +543,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" [[package]] name = "bitvec" @@ -674,9 +674,9 @@ dependencies = [ [[package]] name = "bounded-collections" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5b05133427c07c4776906f673ccf36c21b102c9829c641a5b56bd151d44fd6" +checksum = "ca548b6163b872067dc5eb82fd130c56881435e30367d2073594a3d9744120dd" dependencies = [ "log", "parity-scale-codec", @@ -696,9 +696,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.6.2" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c2f7349907b712260e64b0afe2f84692af14a454be26187d9df565c7f69266a" +checksum = "c79ad7fb2dd38f3dabd76b09c6a5a20c038fc0213ef1e9afd30eb777f120f019" dependencies = [ "memchr", "serde", @@ -733,9 +733,9 @@ checksum = "374d28ec25809ee0e23827c2ab573d729e293f281dfe393500e7ad618baa61c6" [[package]] name = "byteorder" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" @@ -768,9 +768,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cfa25e60aea747ec7e1124f238816749faa93759c6ff5b31f1ccdda137f4479" +checksum = "12024c4645c97566567129c204f65d5815a8c9aecf30fcbe682b2fe034996d36" dependencies = [ "serde", ] @@ -783,7 +783,7 @@ checksum = "e7daec1a2a2129eeba1644b220b4647ec537b0b5d4bfd6876fcc5a540056b592" dependencies = [ "camino", "cargo-platform", - "semver 1.0.19", + "semver 1.0.20", "serde", "serde_json", "thiserror", @@ -948,9 +948,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.4" +version = "4.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d7b8d5ec32af0fadc644bf1fd509a688c2103b185644bb1e29d164e0703136" +checksum = "d04704f56c2cde07f43e8e2c154b43f216dc5c92fc98ada720177362f953b956" dependencies = [ "clap_builder", "clap_derive", @@ -958,9 +958,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.4" +version = "4.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5179bb514e4d7c2051749d8fcefa2ed6d06a9f4e6d69faf3805f5d80b8cf8d56" +checksum = "0e231faeaca65ebd1ea3c737966bf858971cd38c3849107aa3ea7de90a804e45" dependencies = [ "anstream", "anstyle", @@ -977,7 +977,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1067,18 +1067,18 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" +checksum = "f057a694a54f12365049b0958a1685bb52d567f5593b355fbf685838e873d400" dependencies = [ "crossbeam-utils", ] [[package]] name = "const-hex" -version = "1.9.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa72a10d0e914cad6bcad4e7409e68d230c1c2db67896e19a37f758b1fcbdab5" +checksum = "c37be52ef5e3b394db27a2341010685ad5103c72ac15ce2e9420a7e8f93f342c" dependencies = [ "cfg-if", "cpufeatures", @@ -1423,14 +1423,14 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] name = "cxx" -version = "1.0.107" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbe98ba1789d56fb3db3bee5e032774d4f421b685de7ba703643584ba24effbe" +checksum = "c390c123d671cc547244943ecad81bdaab756c6ea332d9ca9c1f48d952a24895" dependencies = [ "cc", "cxxbridge-flags", @@ -1440,9 +1440,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.107" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4ce20f6b8433da4841b1dadfb9468709868022d829d5ca1f2ffbda928455ea3" +checksum = "00d3d3ac9ffb900304edf51ca719187c779f4001bb544f26c4511d621de905cf" dependencies = [ "cc", "codespan-reporting", @@ -1450,24 +1450,24 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] name = "cxxbridge-flags" -version = "1.0.107" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20888d9e1d2298e2ff473cee30efe7d5036e437857ab68bbfea84c74dba91da2" +checksum = "94415827ecfea0f0c74c8cad7d1a86ddb3f05354d6a6ddeda0adee5e875d2939" [[package]] name = "cxxbridge-macro" -version = "1.0.107" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fa16a70dd58129e4dfffdff535fb1bce66673f7bbeec4a5a1765a504e1ccd84" +checksum = "e33dbbe9f5621c9247f97ec14213b04f350bff4b6cebefe834c60055db266ecf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1556,7 +1556,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1589,7 +1589,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core 0.20.3", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1653,9 +1653,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" +checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3" [[package]] name = "derivative" @@ -1805,7 +1805,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1927,9 +1927,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "2.2.2" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ "pkcs8", "signature", @@ -1957,7 +1957,7 @@ checksum = "7d9ce6874da5d4415896cd45ffbc4d1cfc0c4f9c079427bd870742c30f2f65a9" dependencies = [ "curve25519-dalek 4.1.1", "ed25519", - "hashbrown 0.14.0", + "hashbrown 0.14.1", "hex", "rand_core 0.6.4", "sha2", @@ -1972,9 +1972,9 @@ checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "elliptic-curve" -version = "0.13.5" +version = "0.13.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "968405c8fdc9b3bf4df0a6638858cc0b52462836ab6b1c87377785dd09cf1c0b" +checksum = "d97ca172ae9dc9f9b779a6e3a65d308f2af74e5b8c921299075bdb4a0370e914" dependencies = [ "base16ct", "crypto-bigint", @@ -1986,6 +1986,7 @@ dependencies = [ "rand_core 0.6.4", "sec1", "subtle", + "tap", "zeroize", ] @@ -2064,25 +2065,14 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136526188508e25c6fef639d7927dfb3e0e3084488bf202267829cf7fc23dbdd" +checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860" dependencies = [ - "errno-dragonfly", "libc", "windows-sys", ] -[[package]] -name = "errno-dragonfly" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" -dependencies = [ - "cc", - "libc", -] - [[package]] name = "eth-keystore" version = "0.5.0" @@ -2236,7 +2226,7 @@ dependencies = [ "regex", "serde", "serde_json", - "syn 2.0.37", + "syn 2.0.38", "toml 0.7.8", "walkdir", ] @@ -2254,7 +2244,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -2280,7 +2270,7 @@ dependencies = [ "serde", "serde_json", "strum 0.25.0", - "syn 2.0.37", + "syn 2.0.38", "tempfile", "thiserror", "tiny-keccak", @@ -2296,7 +2286,7 @@ dependencies = [ "ethers-core", "ethers-solc", "reqwest", - "semver 1.0.19", + "semver 1.0.20", "serde", "serde_json", "thiserror", @@ -2404,7 +2394,7 @@ dependencies = [ "path-slash", "rayon", "regex", - "semver 1.0.19", + "semver 1.0.20", "serde", "serde_json", "solang-parser", @@ -2441,7 +2431,7 @@ dependencies = [ "fs-err", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -2471,9 +2461,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "fdlimit" @@ -2799,7 +2789,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -2811,7 +2801,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -2821,7 +2811,7 @@ source = "git+https://github.com/serai-dex/substrate#98ab693fdf71f371d5059aa6924 dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -2982,7 +2972,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -3061,7 +3051,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27d12c0aed7f1e24276a241aadc4cb8ea9f83000f34bc062b7cc2d51e3b0fabd" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "debugid", "fxhash", "serde", @@ -3260,9 +3250,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +checksum = "7dfda62a12f55daeae5015f81b0baea145391cb4520f86c248fc615d72640d12" dependencies = [ "ahash", "allocator-api2", @@ -3590,12 +3580,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.0" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" dependencies = [ "equivalent", - "hashbrown 0.14.0", + "hashbrown 0.14.1", "serde", ] @@ -3668,7 +3658,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", - "rustix 0.38.14", + "rustix 0.38.19", "windows-sys", ] @@ -3698,9 +3688,9 @@ checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "jobserver" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" +checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" dependencies = [ "libc", ] @@ -3946,11 +3936,10 @@ checksum = "3f35c735096c0293d313e8f2a641627472b83d01b937177fe76e5e2708d31e0d" [[package]] name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +version = "1.5.0" +source = "git+https://github.com/rust-lang-nursery/lazy-static.rs?rev=5735630d46572f1e5377c8f2ba0f79d18f53b10c#5735630d46572f1e5377c8f2ba0f79d18f53b10c" dependencies = [ - "spin 0.5.2", + "spin 0.9.8", ] [[package]] @@ -3967,9 +3956,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.148" +version = "0.2.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" +checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" [[package]] name = "libloading" @@ -3983,9 +3972,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libp2p" @@ -4313,9 +4302,9 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.43.4" +version = "0.43.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0cf749abdc5ca1dce6296dc8ea0f012464dfcfd3ddd67ffc0cabd8241c4e1da" +checksum = "ab94183f8fc2325817835b57946deb44340c99362cd4606c0a5717299b2ba369" dependencies = [ "either", "fnv", @@ -4344,7 +4333,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -4498,9 +4487,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.7" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128" +checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" [[package]] name = "lock_api" @@ -4520,9 +4509,9 @@ checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "loom" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86a17963e5073acf8d3e2637402657c6b467218f36fe10d696b3e1095ae019bf" +checksum = "7e045d70ddfbc984eacfa964ded019534e8f6cbf36f6410aee0ed5cefa5a9175" dependencies = [ "cfg-if", "generator", @@ -4587,7 +4576,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -4601,18 +4590,18 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] name = "macro_magic_core_macros" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c12469fc165526520dff2807c2975310ab47cf7190a45b99b49a7dc8befab17b" +checksum = "d710e1214dffbab3b5dacb21475dde7d6ed84c69ff722b3a47a782668d44fbac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -4623,7 +4612,7 @@ checksum = "b8fb85ec1620619edf2984a7693497d4ec88a9665d8b87e942856884c92dbf2a" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -4678,9 +4667,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.6.3" +version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "memfd" @@ -4688,7 +4677,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2cffa4ad52c6f791f4f8b15f0c05f9824b2ced1160e88cc393d64fff9a8ac64" dependencies = [ - "rustix 0.38.14", + "rustix 0.38.19", ] [[package]] @@ -5253,9 +5242,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" dependencies = [ "autocfg", "libm", @@ -5289,7 +5278,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -5365,7 +5354,7 @@ version = "0.10.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "cfg-if", "foreign-types", "libc", @@ -5382,7 +5371,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -5627,9 +5616,9 @@ dependencies = [ [[package]] name = "parity-db" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab512a34b3c2c5e465731cc7668edf79208bbe520be03484eeb05e63ed221735" +checksum = "59e9ab494af9e6e813c72170f0d3c1de1500990d62c97cc05cc7576f91aa402f" dependencies = [ "blake2", "crc32fast", @@ -5680,9 +5669,9 @@ checksum = "e1ad0aff30c1da14b1254fcb2af73e1fa9a28670e584a626f53a369d0e157304" [[package]] name = "parking" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" +checksum = "e52c774a4c39359c1d1c52e43f73dd91a75a614652c825408eec30c95a9b2067" [[package]] name = "parking_lot" @@ -5820,9 +5809,9 @@ checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.7.3" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7a4d085fd991ac8d5b05a147b437791b4260b76326baf0fc60cf7c9c27ecd33" +checksum = "c022f1e7b65d6a24c0dbbd5fb344c66881bc01f3e5ae74a1c8100f2f985d98a4" dependencies = [ "memchr", "thiserror", @@ -5831,9 +5820,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.3" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bee7be22ce7918f641a33f08e3f43388c7656772244e2bbb2477f44cc9021a" +checksum = "35513f630d46400a977c4cb58f78e1bfbe01434316e60c37d27b9ad6139c66d8" dependencies = [ "pest", "pest_generator", @@ -5841,22 +5830,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.3" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1511785c5e98d79a05e8a6bc34b4ac2168a0e3e92161862030ad84daa223141" +checksum = "bc9fc1b9e7057baba189b5c626e2d6f40681ae5b6eb064dc7c7834101ec8123a" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] name = "pest_meta" -version = "2.7.3" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42f0394d3123e33353ca5e1e89092e533d2cc490389f2bd6131c43c634ebc5f" +checksum = "1df74e9e7ec4053ceb980e7c0c8bd3594e977fde1af91daba9c928e8e8c6708d" dependencies = [ "once_cell", "pest", @@ -5870,7 +5859,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.0.0", + "indexmap 2.0.2", ] [[package]] @@ -5913,7 +5902,7 @@ dependencies = [ "phf_shared 0.11.2", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -5951,7 +5940,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -6092,7 +6081,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" dependencies = [ "proc-macro2", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -6106,9 +6095,9 @@ dependencies = [ [[package]] name = "primitive-types" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f3486ccba82358b11a77516035647c34ba167dfa53312630de83b12bd4f3d66" +checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" dependencies = [ "fixed-hash", "impl-codec", @@ -6166,14 +6155,14 @@ checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] name = "proc-macro2" -version = "1.0.67" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" dependencies = [ "unicode-ident", ] @@ -6212,7 +6201,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -6522,14 +6511,14 @@ checksum = "7f7473c2cfcf90008193dd0e3e16599455cb601a9fce322b5bb55de799664925" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] name = "regalloc2" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b4dcbd3a2ae7fb94b5813fa0e957c6ab51bf5d0a8ee1b69e0c2d0f1e6eb8485" +checksum = "ad156d539c879b7a24a363a2016d77961786e71f48f2e2fc8302a92abd2429a6" dependencies = [ "hashbrown 0.13.2", "log", @@ -6584,9 +6573,9 @@ checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "reqwest" -version = "0.11.20" +version = "0.11.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" +checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" dependencies = [ "base64 0.21.4", "bytes", @@ -6609,6 +6598,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", + "system-configuration", "tokio", "tokio-native-tls", "tower-service", @@ -6755,7 +6745,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.19", + "semver 1.0.20", ] [[package]] @@ -6769,9 +6759,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.23" +version = "0.37.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" +checksum = "d4eb579851244c2c03e7c24f501c3432bed80b8f720af1d6e5b0e0f01555a035" dependencies = [ "bitflags 1.3.2", "errno", @@ -6783,14 +6773,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.14" +version = "0.38.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "747c788e9ce8e92b12cd485c49ddf90723550b654b32508f979b71a7b1ecda4f" +checksum = "745ecfa778e66b2b63c88a61cb36e0eea109e803b0b86bf9879fbc77c70e86ed" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "errno", "libc", - "linux-raw-sys 0.4.7", + "linux-raw-sys 0.4.10", "windows-sys", ] @@ -7001,7 +6991,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -7275,7 +7265,7 @@ dependencies = [ "cfg-if", "libc", "log", - "rustix 0.38.14", + "rustix 0.38.19", "sc-allocator", "sc-executor-common", "sp-runtime-interface", @@ -7763,7 +7753,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -7973,9 +7963,9 @@ dependencies = [ [[package]] name = "schnorrkel" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b3cebf217f367b9d6f2f27ca0ebd14c7d1dfb1ae3cdbf6f3fa1e5c3e4f67bb8" +checksum = "da18ffd9f2f5d01bc0b3050b37ce7728665f926b4dd1157fe3221b05737d924f" dependencies = [ "arrayref", "arrayvec", @@ -8107,9 +8097,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad977052201c6de01a8ef2aa3378c4bd23217a056337d1d6da40468d267a4fb0" +checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" dependencies = [ "serde", ] @@ -8569,7 +8559,6 @@ version = "0.1.0" dependencies = [ "frame-support", "frame-system", - "hashbrown 0.14.0", "pallet-session", "parity-scale-codec", "scale-info", @@ -8578,7 +8567,6 @@ dependencies = [ "sp-application-crypto", "sp-core", "sp-io", - "sp-runtime", "sp-std", ] @@ -8599,9 +8587,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.188" +version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" +checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" dependencies = [ "serde_derive", ] @@ -8617,13 +8605,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.188" +version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" +checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -8706,9 +8694,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", @@ -8727,9 +8715,9 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.1.4" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ "lazy_static", ] @@ -8916,7 +8904,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -9112,7 +9100,7 @@ source = "git+https://github.com/serai-dex/substrate#98ab693fdf71f371d5059aa6924 dependencies = [ "quote", "sp-core-hashing", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -9131,7 +9119,7 @@ source = "git+https://github.com/serai-dex/substrate#98ab693fdf71f371d5059aa6924 dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -9303,7 +9291,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -9415,7 +9403,7 @@ source = "git+https://github.com/serai-dex/substrate#98ab693fdf71f371d5059aa6924 dependencies = [ "ahash", "hash-db", - "hashbrown 0.14.0", + "hashbrown 0.14.1", "lazy_static", "memory-db", "nohash-hasher", @@ -9456,7 +9444,7 @@ dependencies = [ "parity-scale-codec", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -9577,7 +9565,7 @@ dependencies = [ name = "std-shims" version = "0.1.1" dependencies = [ - "hashbrown 0.14.0", + "hashbrown 0.14.1", "spin 0.9.8", ] @@ -9625,7 +9613,7 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" dependencies = [ - "strum_macros 0.25.2", + "strum_macros 0.25.3", ] [[package]] @@ -9643,15 +9631,15 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.25.2" +version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad8d03b598d3d0fff69bf533ee3ef19b8eeb342729596df84bcc7e1f96ec4059" +checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ "heck", "proc-macro2", "quote", "rustversion", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -9773,7 +9761,7 @@ dependencies = [ "quote", "scale-info", "subxt-metadata", - "syn 2.0.37", + "syn 2.0.38", "thiserror", "tokio", ] @@ -9787,7 +9775,7 @@ dependencies = [ "darling 0.20.3", "proc-macro-error", "subxt-codegen", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -9816,9 +9804,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.37" +version = "2.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8" +checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" dependencies = [ "proc-macro2", "quote", @@ -9877,9 +9865,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" dependencies = [ "cfg-if", - "fastrand 2.0.0", + "fastrand 2.0.1", "redox_syscall 0.3.5", - "rustix 0.38.14", + "rustix 0.38.19", "windows-sys", ] @@ -9924,22 +9912,22 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.48" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" +checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.48" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" +checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -10050,9 +10038,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.32.0" +version = "1.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" +checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" dependencies = [ "backtrace", "bytes", @@ -10075,7 +10063,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -10161,7 +10149,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.0.0", + "indexmap 2.0.2", "serde", "serde_spanned", "toml_datetime", @@ -10185,7 +10173,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "bytes", "futures-core", "futures-util", @@ -10211,11 +10199,10 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.37" +version = "0.1.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +checksum = "ee2ef2af84856a50c1d430afce2fdded0a4ec7eda868db86409b4543df0797f9" dependencies = [ - "cfg-if", "log", "pin-project-lite 0.2.13", "tracing-attributes", @@ -10224,20 +10211,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] name = "tracing-core" -version = "0.1.31" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", "valuable", @@ -10574,9 +10561,9 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "waker-fn" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" +checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" [[package]] name = "walkdir" @@ -10630,7 +10617,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", "wasm-bindgen-shared", ] @@ -10664,7 +10651,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -10754,8 +10741,8 @@ version = "0.110.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1dfcdb72d96f01e6c85b6bf20102e7423bdbaad5c337301bab2bbf253d26413c" dependencies = [ - "indexmap 2.0.0", - "semver 1.0.19", + "indexmap 2.0.2", + "semver 1.0.20", ] [[package]] @@ -10769,7 +10756,7 @@ dependencies = [ "bumpalo", "cfg-if", "fxprof-processed-profile", - "indexmap 2.0.0", + "indexmap 2.0.2", "libc", "log", "object 0.31.1", @@ -10811,7 +10798,7 @@ dependencies = [ "directories-next", "file-per-thread-logger", "log", - "rustix 0.38.14", + "rustix 0.38.19", "serde", "sha2", "toml 0.5.11", @@ -10868,7 +10855,7 @@ dependencies = [ "anyhow", "cranelift-entity", "gimli 0.27.3", - "indexmap 2.0.0", + "indexmap 2.0.2", "log", "object 0.31.1", "serde", @@ -10893,7 +10880,7 @@ dependencies = [ "log", "object 0.31.1", "rustc-demangle", - "rustix 0.38.14", + "rustix 0.38.19", "serde", "target-lexicon", "wasmtime-environ", @@ -10911,7 +10898,7 @@ checksum = "aef27ea6c34ef888030d15560037fe7ef27a5609fbbba8e1e3e41dc4245f5bb2" dependencies = [ "object 0.31.1", "once_cell", - "rustix 0.38.14", + "rustix 0.38.19", "wasmtime-versioned-export-macros", ] @@ -10935,7 +10922,7 @@ dependencies = [ "anyhow", "cc", "cfg-if", - "indexmap 2.0.0", + "indexmap 2.0.2", "libc", "log", "mach", @@ -10943,7 +10930,7 @@ dependencies = [ "memoffset", "paste", "rand", - "rustix 0.38.14", + "rustix 0.38.19", "sptr", "wasm-encoder", "wasmtime-asm-macros", @@ -10973,7 +10960,7 @@ checksum = "ca7af9bb3ee875c4907835e607a275d10b04d15623d3aebe01afe8fbd3f85050" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -11010,14 +10997,14 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.14", + "rustix 0.38.19", ] [[package]] name = "wide" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa469ffa65ef7e0ba0f164183697b89b854253fd31aeb92358b7b6155177d62f" +checksum = "ebecebefc38ff1860b4bc47550bbfa63af5746061cf0d29fcd7fa63171602598" dependencies = [ "bytemuck", "safe_arch", @@ -11156,9 +11143,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winnow" -version = "0.5.15" +version = "0.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c2e3184b9c4e92ad5167ca73039d0c42476302ab603e2fec4487511f38ccefc" +checksum = "a3b801d0e0a6726477cc207f60162da452f3a95adb368399bef20a946e06f65c" dependencies = [ "memchr", ] @@ -11289,7 +11276,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -11332,11 +11319,10 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.8+zstd.1.5.5" +version = "2.0.9+zstd.1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" +checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" dependencies = [ "cc", - "libc", "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index 0cf22f314..41ec781f4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -86,6 +86,9 @@ monero-serai = { opt-level = 3 } panic = "unwind" [patch.crates-io] +# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201 +lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" } + # subxt *can* pull these off crates.io yet there's no benefit to this sp-core-hashing = { git = "https://github.com/serai-dex/substrate" } sp-std = { git = "https://github.com/serai-dex/substrate" } diff --git a/coordinator/src/db.rs b/coordinator/src/db.rs index fd9380253..ed74a7c3b 100644 --- a/coordinator/src/db.rs +++ b/coordinator/src/db.rs @@ -8,13 +8,14 @@ use blake2::{ use scale::{Encode, Decode}; use serai_client::{ primitives::NetworkId, + validator_sets::primitives::{Session, ValidatorSet}, in_instructions::primitives::{Batch, SignedBatch}, }; pub use serai_db::*; use ::tributary::ReadWrite; -use crate::tributary::{TributarySpec, Transaction}; +use crate::tributary::{TributarySpec, Transaction, scanner::RecognizedIdType}; #[derive(Debug)] pub struct MainDb(PhantomData); @@ -33,11 +34,20 @@ impl MainDb { getter.get(Self::handled_message_key(network, id)).is_some() } - fn acive_tributaries_key() -> Vec { + fn in_tributary_key(set: ValidatorSet) -> Vec { + Self::main_key(b"in_tributary", set.encode()) + } + fn active_tributaries_key() -> Vec { Self::main_key(b"active_tributaries", []) } + fn retired_tributary_key(set: ValidatorSet) -> Vec { + Self::main_key(b"retired_tributary", set.encode()) + } + pub fn in_tributary(getter: &G, set: ValidatorSet) -> bool { + getter.get(Self::in_tributary_key(set)).is_some() + } pub fn active_tributaries(getter: &G) -> (Vec, Vec) { - let bytes = getter.get(Self::acive_tributaries_key()).unwrap_or(vec![]); + let bytes = getter.get(Self::active_tributaries_key()).unwrap_or(vec![]); let mut bytes_ref: &[u8] = bytes.as_ref(); let mut tributaries = vec![]; @@ -47,8 +57,10 @@ impl MainDb { (bytes, tributaries) } - pub fn add_active_tributary(txn: &mut D::Transaction<'_>, spec: &TributarySpec) { - let key = Self::acive_tributaries_key(); + pub fn add_participating_in_tributary(txn: &mut D::Transaction<'_>, spec: &TributarySpec) { + txn.put(Self::in_tributary_key(spec.set()), []); + + let key = Self::active_tributaries_key(); let (mut existing_bytes, existing) = Self::active_tributaries(txn); for tributary in &existing { if tributary == spec { @@ -59,6 +71,25 @@ impl MainDb { spec.write(&mut existing_bytes).unwrap(); txn.put(key, existing_bytes); } + pub fn retire_tributary(txn: &mut D::Transaction<'_>, set: ValidatorSet) { + let mut active = Self::active_tributaries(txn).1; + for i in 0 .. active.len() { + if active[i].set() == set { + active.remove(i); + break; + } + } + + let mut bytes = vec![]; + for active in active { + active.write(&mut bytes).unwrap(); + } + txn.put(Self::active_tributaries_key(), bytes); + txn.put(Self::retired_tributary_key(set), []); + } + pub fn is_tributary_retired(getter: &G, set: ValidatorSet) -> bool { + getter.get(Self::retired_tributary_key(set)).is_some() + } fn signed_transaction_key(nonce: u32) -> Vec { Self::main_key(b"signed_transaction", nonce.to_le_bytes()) @@ -75,35 +106,50 @@ impl MainDb { res } - fn first_preprocess_key(network: NetworkId, id: [u8; 32]) -> Vec { - Self::main_key(b"first_preprocess", (network, id).encode()) + fn first_preprocess_key(network: NetworkId, id_type: RecognizedIdType, id: [u8; 32]) -> Vec { + Self::main_key(b"first_preprocess", (network, id_type, id).encode()) } pub fn save_first_preprocess( txn: &mut D::Transaction<'_>, network: NetworkId, + id_type: RecognizedIdType, id: [u8; 32], preprocess: Vec, ) { - let key = Self::first_preprocess_key(network, id); + let key = Self::first_preprocess_key(network, id_type, id); if let Some(existing) = txn.get(&key) { assert_eq!(existing, preprocess, "saved a distinct first preprocess"); return; } txn.put(key, preprocess); } - pub fn first_preprocess(getter: &G, network: NetworkId, id: [u8; 32]) -> Option> { - getter.get(Self::first_preprocess_key(network, id)) + pub fn first_preprocess( + getter: &G, + network: NetworkId, + id_type: RecognizedIdType, + id: [u8; 32], + ) -> Option> { + getter.get(Self::first_preprocess_key(network, id_type, id)) } + fn last_received_batch_key(network: NetworkId) -> Vec { + Self::main_key(b"last_received_batch", network.encode()) + } fn expected_batch_key(network: NetworkId, id: u32) -> Vec { Self::main_key(b"expected_batch", (network, id).encode()) } pub fn save_expected_batch(txn: &mut D::Transaction<'_>, batch: &Batch) { + txn.put(Self::last_received_batch_key(batch.network), batch.id.to_le_bytes()); txn.put( Self::expected_batch_key(batch.network, batch.id), Blake2b::::digest(batch.instructions.encode()), ); } + pub fn last_received_batch(getter: &G, network: NetworkId) -> Option { + getter + .get(Self::last_received_batch_key(network)) + .map(|id| u32::from_le_bytes(id.try_into().unwrap())) + } pub fn expected_batch(getter: &G, network: NetworkId, id: u32) -> Option<[u8; 32]> { getter.get(Self::expected_batch_key(network, id)).map(|batch| batch.try_into().unwrap()) } @@ -131,4 +177,50 @@ impl MainDb { .get(Self::last_verified_batch_key(network)) .map(|id| u32::from_le_bytes(id.try_into().unwrap())) } + + fn handover_batch_key(set: ValidatorSet) -> Vec { + Self::main_key(b"handover_batch", set.encode()) + } + fn lookup_handover_batch_key(network: NetworkId, batch: u32) -> Vec { + Self::main_key(b"lookup_handover_batch", (network, batch).encode()) + } + pub fn set_handover_batch(txn: &mut D::Transaction<'_>, set: ValidatorSet, batch: u32) { + txn.put(Self::handover_batch_key(set), batch.to_le_bytes()); + txn.put(Self::lookup_handover_batch_key(set.network, batch), set.session.0.to_le_bytes()); + } + pub fn handover_batch(getter: &G, set: ValidatorSet) -> Option { + getter.get(Self::handover_batch_key(set)).map(|id| u32::from_le_bytes(id.try_into().unwrap())) + } + pub fn is_handover_batch( + getter: &G, + network: NetworkId, + batch: u32, + ) -> Option { + getter.get(Self::lookup_handover_batch_key(network, batch)).map(|session| ValidatorSet { + network, + session: Session(u32::from_le_bytes(session.try_into().unwrap())), + }) + } + + fn queued_batches_key(set: ValidatorSet) -> Vec { + Self::main_key(b"queued_batches", set.encode()) + } + pub fn queue_batch(txn: &mut D::Transaction<'_>, set: ValidatorSet, batch: Transaction) { + let key = Self::queued_batches_key(set); + let mut batches = txn.get(&key).unwrap_or(vec![]); + batches.extend(batch.serialize()); + txn.put(&key, batches); + } + pub fn take_queued_batches(txn: &mut D::Transaction<'_>, set: ValidatorSet) -> Vec { + let key = Self::queued_batches_key(set); + let batches_vec = txn.get(&key).unwrap_or(vec![]); + txn.del(&key); + let mut batches: &[u8] = &batches_vec; + + let mut res = vec![]; + while !batches.is_empty() { + res.push(Transaction::read(&mut batches).unwrap()); + } + res + } } diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 0dc79bd39..80d3712eb 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -1,8 +1,8 @@ -use core::{ops::Deref, future::Future}; +use core::ops::Deref; use std::{ sync::Arc, - time::{SystemTime, Duration}, - collections::{VecDeque, HashMap}, + time::Duration, + collections::{VecDeque, HashSet, HashMap}, }; use zeroize::{Zeroize, Zeroizing}; @@ -18,17 +18,22 @@ use frost::Participant; use serai_db::{DbTxn, Db}; use serai_env as env; -use serai_client::{primitives::NetworkId, Public, Serai}; +use serai_client::{ + primitives::NetworkId, + validator_sets::primitives::{Session, ValidatorSet}, + Public, Serai, SeraiInInstructions, +}; use message_queue::{Service, client::MessageQueue}; -use futures::stream::StreamExt; use tokio::{ sync::{RwLock, mpsc, broadcast}, time::sleep, }; -use ::tributary::{ReadWrite, ProvidedError, TransactionKind, TransactionTrait, Block, Tributary}; +use ::tributary::{ + ProvidedError, TransactionKind, TransactionError, TransactionTrait, Block, Tributary, +}; mod tributary; use crate::tributary::{ @@ -58,19 +63,30 @@ pub struct ActiveTributary { pub tributary: Arc>, } +#[derive(Clone)] +pub enum TributaryEvent { + NewTributary(ActiveTributary), + TributaryRetired(ValidatorSet), +} + // Creates a new tributary and sends it to all listeners. async fn add_tributary( db: D, key: Zeroizing<::F>, processors: &Pro, p2p: P, - tributaries: &broadcast::Sender>, + tributaries: &broadcast::Sender>, spec: TributarySpec, ) { + if MainDb::::is_tributary_retired(&db, spec.set()) { + log::info!("not adding tributary {:?} since it's been retired", spec.set()); + } + log::info!("adding tributary {:?}", spec.set()); let tributary = Tributary::<_, Transaction, _>::new( // TODO2: Use a db on a distinct volume to protect against DoS attacks + // TODO2: Delete said db once the Tributary is dropped db, spec.genesis(), spec.start_time(), @@ -103,419 +119,24 @@ async fn add_tributary( .await; tributaries - .send(ActiveTributary { spec, tributary: Arc::new(tributary) }) + .send(TributaryEvent::NewTributary(ActiveTributary { spec, tributary: Arc::new(tributary) })) .map_err(|_| "all ActiveTributary recipients closed") .unwrap(); } -pub async fn scan_substrate( - db: D, - key: Zeroizing<::F>, - processors: Pro, - serai: Arc, - new_tributary_spec: mpsc::UnboundedSender, -) { - log::info!("scanning substrate"); - - let mut db = SubstrateDb::new(db); - let mut next_substrate_block = db.next_block(); - - let new_substrate_block_notifier = { - let serai = &serai; - move || async move { - loop { - match serai.newly_finalized_block().await { - Ok(sub) => return sub, - Err(e) => { - log::error!("couldn't communicate with serai node: {e}"); - sleep(Duration::from_secs(5)).await; - } - } - } - } - }; - let mut substrate_block_notifier = new_substrate_block_notifier().await; - - loop { - // await the next block, yet if our notifier had an error, re-create it - { - let Ok(next_block) = - tokio::time::timeout(Duration::from_secs(60), substrate_block_notifier.next()).await - else { - // Timed out, which may be because Serai isn't finalizing or may be some issue with the - // notifier - if serai.get_latest_block().await.map(|block| block.number()).ok() == - Some(next_substrate_block.saturating_sub(1)) - { - log::info!("serai hasn't finalized a block in the last 60s..."); - } else { - substrate_block_notifier = new_substrate_block_notifier().await; - } - continue; - }; - - // next_block is a Option - if next_block.and_then(Result::ok).is_none() { - substrate_block_notifier = new_substrate_block_notifier().await; - continue; - } - } - - match substrate::handle_new_blocks( - &mut db, - &key, - |db: &mut D, spec: TributarySpec| { - log::info!("creating new tributary for {:?}", spec.set()); - - // Save it to the database - let mut txn = db.txn(); - MainDb::::add_active_tributary(&mut txn, &spec); - txn.commit(); - - // If we reboot before this is read, the fact it was saved to the database means it'll be - // handled on reboot - new_tributary_spec.send(spec).unwrap(); - }, - &processors, - &serai, - &mut next_substrate_block, - ) - .await - { - Ok(()) => {} - Err(e) => { - log::error!("couldn't communicate with serai node: {e}"); - sleep(Duration::from_secs(5)).await; - } - } - } -} - -pub(crate) trait RIDTrait: - Clone + Fn(NetworkId, [u8; 32], RecognizedIdType, [u8; 32], u32) -> FRid -{ -} -impl FRid> - RIDTrait for F -{ -} - -pub(crate) async fn scan_tributaries< - D: Db, - Pro: Processors, - P: P2p, - FRid: Send + Future, - RID: 'static + Send + Sync + RIDTrait, ->( - raw_db: D, - key: Zeroizing<::F>, - recognized_id: RID, - processors: Pro, - serai: Arc, - mut new_tributary: broadcast::Receiver>, -) { - log::info!("scanning tributaries"); - - loop { - match new_tributary.recv().await { - Ok(ActiveTributary { spec, tributary }) => { - // For each Tributary, spawn a dedicated scanner task - tokio::spawn({ - let raw_db = raw_db.clone(); - let key = key.clone(); - let recognized_id = recognized_id.clone(); - let processors = processors.clone(); - let serai = serai.clone(); - async move { - let spec = &spec; - let reader = tributary.reader(); - let mut tributary_db = tributary::TributaryDb::new(raw_db.clone()); - loop { - // Obtain the next block notification now to prevent obtaining it immediately after - // the next block occurs - let next_block_notification = tributary.next_block_notification().await; - - tributary::scanner::handle_new_blocks::<_, _, _, _, _, _, P>( - &mut tributary_db, - &key, - recognized_id.clone(), - &processors, - |set, tx| { - let serai = serai.clone(); - async move { - loop { - match serai.publish(&tx).await { - Ok(_) => { - log::info!("set key pair for {set:?}"); - break; - } - // This is assumed to be some ephemeral error due to the assumed fault-free - // creation - // TODO2: Differentiate connection errors from invariants - Err(e) => { - if let Ok(latest) = serai.get_latest_block_hash().await { - // Check if this failed because the keys were already set by someone - // else - if matches!(serai.get_keys(spec.set(), latest).await, Ok(Some(_))) { - log::info!("another coordinator set key pair for {:?}", set); - break; - } - - // The above block may return false if the keys have been pruned from - // the state - // Check if this session is no longer the latest session, meaning it at - // some point did set keys, and we're just operating off very - // historical data - if let Ok(Some(current_session)) = - serai.get_session(spec.set().network, latest).await - { - if current_session.0 > spec.set().session.0 { - log::warn!( - "trying to set keys for a set which isn't the latest {:?}", - set - ); - break; - } - } - } - - log::error!( - "couldn't connect to Serai node to publish set_keys TX: {:?}", - e - ); - tokio::time::sleep(Duration::from_secs(10)).await; - } - } - } - } - }, - spec, - &reader, - ) - .await; - - next_block_notification - .await - .map_err(|_| "") - .expect("tributary dropped its notifications?"); - } - } - }); - } - Err(broadcast::error::RecvError::Lagged(_)) => { - panic!("scan_tributaries lagged to handle new_tributary") - } - Err(broadcast::error::RecvError::Closed) => panic!("new_tributary sender closed"), - } - } -} - -pub async fn heartbeat_tributaries( - p2p: P, - mut new_tributary: broadcast::Receiver>, -) { - let ten_blocks_of_time = - Duration::from_secs((10 * Tributary::::block_time()).into()); - - let mut readers = vec![]; - loop { - while let Ok(ActiveTributary { spec: _, tributary }) = { - match new_tributary.try_recv() { - Ok(tributary) => Ok(tributary), - Err(broadcast::error::TryRecvError::Empty) => Err(()), - Err(broadcast::error::TryRecvError::Lagged(_)) => { - panic!("heartbeat_tributaries lagged to handle new_tributary") - } - Err(broadcast::error::TryRecvError::Closed) => panic!("new_tributary sender closed"), - } - } { - readers.push(tributary.reader()); - } - - for tributary in &readers { - let tip = tributary.tip(); - let block_time = - SystemTime::UNIX_EPOCH + Duration::from_secs(tributary.time_of_block(&tip).unwrap_or(0)); - - // Only trigger syncing if the block is more than a minute behind - if SystemTime::now() > (block_time + Duration::from_secs(60)) { - log::warn!("last known tributary block was over a minute ago"); - let mut msg = tip.to_vec(); - // Also include the timestamp so LibP2p doesn't flag this as an old message re-circulating - let timestamp = SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .expect("system clock is wrong") - .as_secs(); - // Divide by the block time so if multiple parties send a Heartbeat, they're more likely to - // overlap - let time_unit = timestamp / u64::from(Tributary::::block_time()); - msg.extend(time_unit.to_le_bytes()); - P2p::broadcast(&p2p, P2pMessageKind::Heartbeat(tributary.genesis()), msg).await; - } - } - - // Only check once every 10 blocks of time - sleep(ten_blocks_of_time).await; - } -} - -pub async fn handle_p2p( - our_key: ::G, - p2p: P, - mut new_tributary: broadcast::Receiver>, -) { - let channels = Arc::new(RwLock::new(HashMap::new())); - tokio::spawn({ - let p2p = p2p.clone(); - let channels = channels.clone(); - async move { - loop { - let tributary = new_tributary.recv().await.unwrap(); - let genesis = tributary.spec.genesis(); - - let (send, mut recv) = mpsc::unbounded_channel(); - channels.write().await.insert(genesis, send); - - tokio::spawn({ - let p2p = p2p.clone(); - async move { - loop { - let mut msg: Message

= recv.recv().await.unwrap(); - match msg.kind { - P2pMessageKind::KeepAlive => {} - - P2pMessageKind::Tributary(msg_genesis) => { - assert_eq!(msg_genesis, genesis); - log::trace!("handling message for tributary {:?}", tributary.spec.set()); - if tributary.tributary.handle_message(&msg.msg).await { - P2p::broadcast(&p2p, msg.kind, msg.msg).await; - } - } - - // TODO2: Rate limit this per timestamp - // And/or slash on Heartbeat which justifies a response, since the node obviously - // was offline and we must now use our bandwidth to compensate for them? - P2pMessageKind::Heartbeat(msg_genesis) => { - assert_eq!(msg_genesis, genesis); - if msg.msg.len() != 40 { - log::error!("validator sent invalid heartbeat"); - continue; - } - - let p2p = p2p.clone(); - let spec = tributary.spec.clone(); - let reader = tributary.tributary.reader(); - // Spawn a dedicated task as this may require loading large amounts of data from - // disk and take a notable amount of time - tokio::spawn(async move { - /* - // Have sqrt(n) nodes reply with the blocks - let mut responders = (tributary.spec.n() as f32).sqrt().floor() as u64; - // Try to have at least 3 responders - if responders < 3 { - responders = tributary.spec.n().min(3).into(); - } - */ - - // Have up to three nodes respond - let responders = u64::from(spec.n().min(3)); - - // Decide which nodes will respond by using the latest block's hash as a - // mutually agreed upon entropy source - // This isn't a secure source of entropy, yet it's fine for this - let entropy = u64::from_le_bytes(reader.tip()[.. 8].try_into().unwrap()); - // If n = 10, responders = 3, we want `start` to be 0 ..= 7 - // (so the highest is 7, 8, 9) - // entropy % (10 + 1) - 3 = entropy % 8 = 0 ..= 7 - let start = - usize::try_from(entropy % (u64::from(spec.n() + 1) - responders)).unwrap(); - let mut selected = false; - for validator in - &spec.validators()[start .. (start + usize::try_from(responders).unwrap())] - { - if our_key == validator.0 { - selected = true; - break; - } - } - if !selected { - log::debug!("received heartbeat and not selected to respond"); - return; - } - - log::debug!("received heartbeat and selected to respond"); - - let mut latest = msg.msg[.. 32].try_into().unwrap(); - while let Some(next) = reader.block_after(&latest) { - let mut res = reader.block(&next).unwrap().serialize(); - res.extend(reader.commit(&next).unwrap()); - // Also include the timestamp used within the Heartbeat - res.extend(&msg.msg[32 .. 40]); - p2p.send(msg.sender, P2pMessageKind::Block(spec.genesis()), res).await; - latest = next; - } - }); - } - - P2pMessageKind::Block(msg_genesis) => { - assert_eq!(msg_genesis, genesis); - let mut msg_ref: &[u8] = msg.msg.as_ref(); - let Ok(block) = Block::::read(&mut msg_ref) else { - log::error!("received block message with an invalidly serialized block"); - continue; - }; - // Get just the commit - msg.msg.drain(.. (msg.msg.len() - msg_ref.len())); - msg.msg.drain((msg.msg.len() - 8) ..); - - let res = tributary.tributary.sync_block(block, msg.msg).await; - log::debug!("received block from {:?}, sync_block returned {}", msg.sender, res); - } - } - } - } - }); - } - } - }); - - loop { - let msg = p2p.receive().await; - match msg.kind { - P2pMessageKind::KeepAlive => {} - P2pMessageKind::Tributary(genesis) => { - if let Some(channel) = channels.read().await.get(&genesis) { - channel.send(msg).unwrap(); - } - } - P2pMessageKind::Heartbeat(genesis) => { - if let Some(channel) = channels.read().await.get(&genesis) { - channel.send(msg).unwrap(); - } - } - P2pMessageKind::Block(genesis) => { - if let Some(channel) = channels.read().await.get(&genesis) { - channel.send(msg).unwrap(); - } - } - } - } -} - async fn publish_signed_transaction( - db: &mut D, + txn: &mut D::Transaction<'_>, tributary: &Tributary, tx: Transaction, ) { log::debug!("publishing transaction {}", hex::encode(tx.hash())); - let mut txn = db.txn(); let signer = if let TransactionKind::Signed(signed) = tx.kind() { let signer = signed.signer; // Safe as we should deterministically create transactions, meaning if this is already on-disk, // it's what we're saving now - MainDb::::save_signed_transaction(&mut txn, signed.nonce, tx); + MainDb::::save_signed_transaction(txn, signed.nonce, tx); signer } else { @@ -525,457 +146,578 @@ async fn publish_signed_transaction( // If we're trying to publish 5, when the last transaction published was 3, this will delay // publication until the point in time we publish 4 while let Some(tx) = MainDb::::take_signed_transaction( - &mut txn, + txn, tributary .next_nonce(signer) .await .expect("we don't have a nonce, meaning we aren't a participant on this tributary"), ) { - // We should've created a valid transaction - // This does assume publish_signed_transaction hasn't been called twice with the same - // transaction, which risks a race condition on the validity of this assert - // Our use case only calls this function sequentially - assert!(tributary.add_transaction(tx).await, "created an invalid transaction"); + // We need to return a proper error here to enable that, due to a race condition around + // multiple publications + match tributary.add_transaction(tx.clone()).await { + Ok(_) => {} + // Some asynchonicity if InvalidNonce, assumed safe to deterministic nonces + Err(TransactionError::InvalidNonce) => { + log::warn!("publishing TX {tx:?} returned InvalidNonce. was it already added?") + } + Err(e) => panic!("created an invalid transaction: {e:?}"), + } } - txn.commit(); } -async fn handle_processor_messages( - mut db: D, - key: Zeroizing<::F>, - serai: Arc, - mut processors: Pro, +async fn handle_processor_message( + db: &mut D, + key: &Zeroizing<::F>, + serai: &Serai, + tributaries: &HashMap>, network: NetworkId, - mut new_tributary: mpsc::UnboundedReceiver>, -) { - let mut db_clone = db.clone(); // Enables cloning the DB while we have a txn - let pub_key = Ristretto::generator() * key.deref(); + msg: &processors::Message, +) -> bool { + if MainDb::::handled_message(db, msg.network, msg.id) { + return true; + } - let mut tributaries = HashMap::new(); + let mut txn = db.txn(); - loop { - match new_tributary.try_recv() { - Ok(tributary) => { - let set = tributary.spec.set(); - assert_eq!(set.network, network); - tributaries.insert(set.session, tributary); + let mut relevant_tributary = match &msg.msg { + // We'll only receive these if we fired GenerateKey, which we'll only do if if we're + // in-set, making the Tributary relevant + ProcessorMessage::KeyGen(inner_msg) => match inner_msg { + key_gen::ProcessorMessage::Commitments { id, .. } => Some(id.set.session), + key_gen::ProcessorMessage::Shares { id, .. } => Some(id.set.session), + key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } => Some(id.set.session), + }, + // TODO: Review replacing key with Session in messages? + ProcessorMessage::Sign(inner_msg) => match inner_msg { + // We'll only receive Preprocess and Share if we're actively signing + sign::ProcessorMessage::Preprocess { id, .. } => { + Some(SubstrateDb::::session_for_key(&txn, &id.key).unwrap()) } - Err(mpsc::error::TryRecvError::Empty) => {} - Err(mpsc::error::TryRecvError::Disconnected) => { - panic!("handle_processor_messages new_tributary sender closed") + sign::ProcessorMessage::Share { id, .. } => { + Some(SubstrateDb::::session_for_key(&txn, &id.key).unwrap()) } - } - - // TODO: Check this ID is sane (last handled ID or expected next ID) - let msg = processors.recv(network).await; - - // TODO: We need to verify the Batches published to Substrate - - if !MainDb::::handled_message(&db, msg.network, msg.id) { - let mut txn = db.txn(); - - let relevant_tributary = match &msg.msg { - // We'll only receive these if we fired GenerateKey, which we'll only do if if we're - // in-set, making the Tributary relevant - ProcessorMessage::KeyGen(inner_msg) => match inner_msg { - key_gen::ProcessorMessage::Commitments { id, .. } => Some(id.set.session), - key_gen::ProcessorMessage::Shares { id, .. } => Some(id.set.session), - key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } => Some(id.set.session), - }, - // TODO: Review replacing key with Session in messages? - ProcessorMessage::Sign(inner_msg) => match inner_msg { - // We'll only receive Preprocess and Share if we're actively signing - sign::ProcessorMessage::Preprocess { id, .. } => { - Some(SubstrateDb::::session_for_key(&txn, &id.key).unwrap()) - } - sign::ProcessorMessage::Share { id, .. } => { - Some(SubstrateDb::::session_for_key(&txn, &id.key).unwrap()) - } - // While the Processor's Scanner will always emit Completed, that's routed through the - // Signer and only becomes a ProcessorMessage::Completed if the Signer is present and - // confirms it - sign::ProcessorMessage::Completed { key, .. } => { - Some(SubstrateDb::::session_for_key(&txn, key).unwrap()) + // While the Processor's Scanner will always emit Completed, that's routed through the + // Signer and only becomes a ProcessorMessage::Completed if the Signer is present and + // confirms it + sign::ProcessorMessage::Completed { key, .. } => { + Some(SubstrateDb::::session_for_key(&txn, key).unwrap()) + } + }, + ProcessorMessage::Coordinator(inner_msg) => match inner_msg { + // This is a special case as it's relevant to *all* Tributaries for this network + // It doesn't return a Tributary to become `relevant_tributary` though + coordinator::ProcessorMessage::SubstrateBlockAck { network, block, plans } => { + assert_eq!( + *network, msg.network, + "processor claimed to be a different network than it was for SubstrateBlockAck", + ); + + // Get the sessions for these keys + let keys = plans.iter().map(|plan| plan.key.clone()).collect::>(); + let mut sessions = vec![]; + for key in keys { + let session = SubstrateDb::::session_for_key(&txn, &key).unwrap(); + // Only keep them if we're in the Tributary AND they haven't been retied + let set = ValidatorSet { network: *network, session }; + if MainDb::::in_tributary(&txn, set) && (!MainDb::::is_tributary_retired(&txn, set)) + { + sessions.push((session, key)); } - }, - ProcessorMessage::Coordinator(inner_msg) => match inner_msg { - // This is a special case as it's relevant to *all* Tributaries for this network - // It doesn't return a Tributary to become `relevant_tributary` though - coordinator::ProcessorMessage::SubstrateBlockAck { network, block, plans } => { - assert_eq!( - *network, msg.network, - "processor claimed to be a different network than it was for SubstrateBlockAck", - ); - - // TODO: Find all Tributaries active at this Substrate block, and make sure we have - // them all - - for tributary in tributaries.values() { - // TODO: This needs to be scoped per multisig - TributaryDb::::set_plan_ids(&mut txn, tributary.spec.genesis(), *block, plans); - - let tx = Transaction::SubstrateBlock(*block); - log::trace!("processor message effected transaction {}", hex::encode(tx.hash())); - log::trace!("providing transaction {}", hex::encode(tx.hash())); - let res = tributary.tributary.provide_transaction(tx).await; - if !(res.is_ok() || (res == Err(ProvidedError::AlreadyProvided))) { - panic!("provided an invalid transaction: {res:?}"); - } - } + } - None - } - // We'll only fire these if we are the Substrate signer, making the Tributary relevant - coordinator::ProcessorMessage::BatchPreprocess { id, .. } => { - Some(SubstrateDb::::session_for_key(&txn, &id.key).unwrap()) - } - coordinator::ProcessorMessage::BatchShare { id, .. } => { - Some(SubstrateDb::::session_for_key(&txn, &id.key).unwrap()) + // Ensure we have the Tributaries + for (session, _) in &sessions { + if !tributaries.contains_key(session) { + return false; } - }, - // These don't return a relevant Tributary as there's no Tributary with action expected - ProcessorMessage::Substrate(inner_msg) => match inner_msg { - processor_messages::substrate::ProcessorMessage::Batch { batch } => { - assert_eq!( - batch.network, msg.network, - "processor sent us a batch for a different network than it was for", - ); - let this_batch_id = batch.id; - MainDb::::save_expected_batch(&mut txn, batch); - - // Re-define batch - // We can't drop it, yet it shouldn't be accidentally used in the following block - #[allow(clippy::let_unit_value, unused_variables)] - let batch = (); - - // Verify all `Batch`s which we've already indexed from Substrate - // This won't be complete, as it only runs when a `Batch` message is received, which - // will be before we get a `SignedBatch`. It is, however, incremental. We can use a - // complete version to finish the last section when we need a complete version. - let last = MainDb::::last_verified_batch(&txn, msg.network); - // This variable exists so Rust can verify Send/Sync properties - let mut faulty = None; - for id in last.map(|last| last + 1).unwrap_or(0) ..= this_batch_id { - if let Some(on_chain) = SubstrateDb::::batch_instructions_hash(&txn, network, id) { - let off_chain = MainDb::::expected_batch(&txn, network, id).unwrap(); - if on_chain != off_chain { - faulty = Some((id, off_chain, on_chain)); - break; - } - MainDb::::save_last_verified_batch(&mut txn, msg.network, id); - } - } + } - if let Some((id, off_chain, on_chain)) = faulty { - // Halt operations on this network and spin, as this is a critical fault + for (session, key) in sessions { + let tributary = &tributaries[&session]; + let plans = plans + .iter() + .filter_map(|plan| Some(plan.id).filter(|_| plan.key == key)) + .collect::>(); + TributaryDb::::set_plan_ids(&mut txn, tributary.spec.genesis(), *block, &plans); + + let tx = Transaction::SubstrateBlock(*block); + log::trace!("processor message effected transaction {}", hex::encode(tx.hash())); + log::trace!("providing transaction {}", hex::encode(tx.hash())); + let res = tributary.tributary.provide_transaction(tx).await; + if !(res.is_ok() || (res == Err(ProvidedError::AlreadyProvided))) { + if res == Err(ProvidedError::LocalMismatchesOnChain) { + // Spin, since this is a crit for this Tributary loop { log::error!( - "{}! network: {:?} id: {} off-chain: {} on-chain: {}", - "on-chain batch doesn't match off-chain", - network, - id, - hex::encode(off_chain), - hex::encode(on_chain), + "{}. tributary: {}, provided: SubstrateBlock({})", + "tributary added distinct provided to delayed locally provided TX", + hex::encode(tributary.spec.genesis()), + block, ); sleep(Duration::from_secs(60)).await; } } - - None + panic!("provided an invalid transaction: {res:?}"); } - // If this is a new Batch, immediately publish it (if we can) - processor_messages::substrate::ProcessorMessage::SignedBatch { batch } => { - assert_eq!( - batch.batch.network, msg.network, - "processor sent us a signed batch for a different network than it was for", - ); - // TODO: Check this key's key pair's substrate key is authorized to publish batches + } - log::debug!("received batch {:?} {}", batch.batch.network, batch.batch.id); + None + } + // We'll only fire these if we are the Substrate signer, making the Tributary relevant + coordinator::ProcessorMessage::BatchPreprocess { id, .. } => { + Some(SubstrateDb::::session_for_key(&txn, &id.key).unwrap()) + } + coordinator::ProcessorMessage::BatchShare { id, .. } => { + Some(SubstrateDb::::session_for_key(&txn, &id.key).unwrap()) + } + }, + // These don't return a relevant Tributary as there's no Tributary with action expected + ProcessorMessage::Substrate(inner_msg) => match inner_msg { + processor_messages::substrate::ProcessorMessage::Batch { batch } => { + assert_eq!( + batch.network, msg.network, + "processor sent us a batch for a different network than it was for", + ); + let this_batch_id = batch.id; + MainDb::::save_expected_batch(&mut txn, batch); + + // Re-define batch + // We can't drop it, yet it shouldn't be accidentally used in the following block + #[allow(clippy::let_unit_value, unused_variables)] + let batch = (); + + // This won't be complete, as this call is when a `Batch` message is received, which + // will be before we get a `SignedBatch` + // It is, however, incremental + // When we need a complete version, we use another call, continuously called as-needed + substrate::verify_published_batches::(&mut txn, msg.network, this_batch_id).await; + + None + } + // If this is a new Batch, immediately publish it (if we can) + processor_messages::substrate::ProcessorMessage::SignedBatch { batch } => { + assert_eq!( + batch.batch.network, msg.network, + "processor sent us a signed batch for a different network than it was for", + ); + + log::debug!("received batch {:?} {}", batch.batch.network, batch.batch.id); + + // Save this batch to the disk + MainDb::::save_batch(&mut txn, batch.clone()); + + // Get the next-to-execute batch ID + let mut next = substrate::get_expected_next_batch(serai, network).await; + + // Since we have a new batch, publish all batches yet to be published to Serai + // This handles the edge-case where batch n+1 is signed before batch n is + let mut batches = VecDeque::new(); + while let Some(batch) = MainDb::::batch(&txn, network, next) { + batches.push_back(batch); + next += 1; + } - // Save this batch to the disk - MainDb::::save_batch(&mut txn, batch.clone()); + let start_id = batches.front().map(|batch| batch.batch.id); + let last_id = batches.back().map(|batch| batch.batch.id); + while let Some(batch) = batches.pop_front() { + // If this Batch should no longer be published, continue + if substrate::get_expected_next_batch(serai, network).await > batch.batch.id { + continue; + } - // Get the next-to-execute batch ID - async fn get_next(serai: &Serai, network: NetworkId) -> u32 { - let mut first = true; - loop { - if !first { - log::error!( - "{} {network:?}", - "couldn't connect to Serai node to get the next batch ID for", - ); - tokio::time::sleep(Duration::from_secs(5)).await; - } - first = false; - - let Ok(latest_block) = serai.get_latest_block().await else { - continue; - }; - let Ok(last) = serai.get_last_batch_for_network(latest_block.hash(), network).await - else { - continue; - }; - break if let Some(last) = last { last + 1 } else { 0 }; - } - } - let mut next = get_next(&serai, network).await; - - // Since we have a new batch, publish all batches yet to be published to Serai - // This handles the edge-case where batch n+1 is signed before batch n is - let mut batches = VecDeque::new(); - while let Some(batch) = MainDb::::batch(&txn, network, next) { - batches.push_back(batch); - next += 1; + let tx = SeraiInInstructions::execute_batch(batch.clone()); + log::debug!("attempting to publish batch {:?} {}", batch.batch.network, batch.batch.id,); + // This publish may fail if this transactions already exists in the mempool, which is + // possible, or if this batch was already executed on-chain + // Either case will have eventual resolution and be handled by the above check on if + // this batch should execute + let res = serai.publish(&tx).await; + if res.is_ok() { + log::info!( + "published batch {network:?} {} (block {})", + batch.batch.id, + hex::encode(batch.batch.block), + ); + } else { + log::debug!( + "couldn't publish batch {:?} {}: {:?}", + batch.batch.network, + batch.batch.id, + res, + ); + // If we failed to publish it, restore it + batches.push_front(batch); + // Sleep for a few seconds before retrying to prevent hammering the node + sleep(Duration::from_secs(5)).await; + } + } + // Verify the `Batch`s we just published + if let Some(last_id) = last_id { + loop { + let verified = + substrate::verify_published_batches::(&mut txn, msg.network, last_id).await; + if verified == Some(last_id) { + break; } + } + } - while let Some(batch) = batches.pop_front() { - // If this Batch should no longer be published, continue - if get_next(&serai, network).await > batch.batch.id { - continue; - } - - let tx = Serai::execute_batch(batch.clone()); - log::debug!( - "attempting to publish batch {:?} {}", - batch.batch.network, - batch.batch.id, - ); - // This publish may fail if this transactions already exists in the mempool, which is - // possible, or if this batch was already executed on-chain - // Either case will have eventual resolution and be handled by the above check on if - // this batch should execute - let res = serai.publish(&tx).await; - if res.is_ok() { - log::info!( - "published batch {network:?} {} (block {})", - batch.batch.id, - hex::encode(batch.batch.block), - ); - } else { - log::debug!( - "couldn't publish batch {:?} {}: {:?}", - batch.batch.network, - batch.batch.id, - res, - ); - // If we failed to publish it, restore it - batches.push_front(batch); - // Sleep for a few seconds before retrying to prevent hammering the node - tokio::time::sleep(Duration::from_secs(5)).await; - } + // Check if any of these `Batch`s were a handover `Batch` + // If so, we need to publish any delayed `Batch` provided transactions + let mut relevant = None; + if let Some(start_id) = start_id { + let last_id = last_id.unwrap(); + for batch in start_id .. last_id { + if let Some(set) = MainDb::::is_handover_batch(&txn, msg.network, batch) { + // relevant may already be Some. This is a safe over-write, as we don't need to + // be concerned for handovers of Tributaries which have completed their handovers + // While this does bypass the checks that Tributary would've performed at the + // time, if we ever actually participate in a handover, we will verify *all* + // prior `Batch`s, including the ones which would've been explicitly verified + // then + // + // We should only declare this session relevant if it's relevant to us + // We only set handover `Batch`s when we're trying to produce said `Batch`, so this + // would be a `Batch` we were involved in the production of + // Accordingly, iy's relevant + relevant = Some(set.session); } - - None } - }, - }; + } + relevant + } + }, + }; - // If there's a relevant Tributary... - if let Some(relevant_tributary) = relevant_tributary { - // Make sure we have it - // Per the reasoning above, we only return a Tributary as relevant if we're a participant - // Accordingly, we do *need* to have this Tributary now to handle it UNLESS the Tributary - // has already completed and this is simply an old message - // TODO: Check if the Tributary has already been completed - let Some(ActiveTributary { spec, tributary }) = tributaries.get(&relevant_tributary) else { - // Since we don't, sleep for a fraction of a second and move to the next loop iteration - // At the start of the loop, we'll check for new tributaries, making this eventually - // resolve - sleep(Duration::from_millis(100)).await; - continue; - }; + // If we have a relevant Tributary, check it's actually still relevant and has yet to be retired + if let Some(relevant_tributary_value) = relevant_tributary { + if MainDb::::is_tributary_retired( + &txn, + ValidatorSet { network: msg.network, session: relevant_tributary_value }, + ) { + relevant_tributary = None; + } + } - let genesis = spec.genesis(); - - let tx = match msg.msg.clone() { - ProcessorMessage::KeyGen(inner_msg) => match inner_msg { - key_gen::ProcessorMessage::Commitments { id, commitments } => Some( - Transaction::DkgCommitments(id.attempt, commitments, Transaction::empty_signed()), - ), - key_gen::ProcessorMessage::Shares { id, mut shares } => { - // Create a MuSig-based machine to inform Substrate of this key generation - let nonces = crate::tributary::dkg_confirmation_nonces(&key, spec, id.attempt); - - let mut tx_shares = Vec::with_capacity(shares.len()); - for i in 1 ..= spec.n() { - let i = Participant::new(i).unwrap(); - if i == - spec - .i(pub_key) - .expect("processor message to DKG for a session we aren't a validator in") - { - continue; - } - tx_shares.push( - shares.remove(&i).expect("processor didn't send share for another validator"), - ); - } + // If there's a relevant Tributary... + if let Some(relevant_tributary) = relevant_tributary { + // Make sure we have it + // Per the reasoning above, we only return a Tributary as relevant if we're a participant + // Accordingly, we do *need* to have this Tributary now to handle it UNLESS the Tributary has + // already completed and this is simply an old message (which we prior checked) + let Some(ActiveTributary { spec, tributary }) = tributaries.get(&relevant_tributary) else { + // Since we don't, sleep for a fraction of a second and return false, signaling we didn't + // handle this message + // At the start of the loop which calls this function, we'll check for new tributaries, + // making this eventually resolve + sleep(Duration::from_millis(100)).await; + return false; + }; + + let genesis = spec.genesis(); + let pub_key = Ristretto::generator() * key.deref(); + + let txs = match msg.msg.clone() { + ProcessorMessage::KeyGen(inner_msg) => match inner_msg { + key_gen::ProcessorMessage::Commitments { id, commitments } => { + vec![Transaction::DkgCommitments(id.attempt, commitments, Transaction::empty_signed())] + } + key_gen::ProcessorMessage::Shares { id, mut shares } => { + // Create a MuSig-based machine to inform Substrate of this key generation + let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, id.attempt); + + let mut tx_shares = Vec::with_capacity(shares.len()); + for i in 1 ..= spec.n() { + let i = Participant::new(i).unwrap(); + if i == + spec + .i(pub_key) + .expect("processor message to DKG for a session we aren't a validator in") + { + continue; + } + tx_shares + .push(shares.remove(&i).expect("processor didn't send share for another validator")); + } - Some(Transaction::DkgShares { - attempt: id.attempt, - shares: tx_shares, - confirmation_nonces: nonces, - signed: Transaction::empty_signed(), - }) + vec![Transaction::DkgShares { + attempt: id.attempt, + shares: tx_shares, + confirmation_nonces: nonces, + signed: Transaction::empty_signed(), + }] + } + key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } => { + assert_eq!( + id.set.network, msg.network, + "processor claimed to be a different network than it was for GeneratedKeyPair", + ); + // TODO2: Also check the other KeyGenId fields + + // Tell the Tributary the key pair, get back the share for the MuSig signature + let share = crate::tributary::generated_key_pair::( + &mut txn, + key, + spec, + &(Public(substrate_key), network_key.try_into().unwrap()), + id.attempt, + ); + + match share { + Ok(share) => { + vec![Transaction::DkgConfirmed(id.attempt, share, Transaction::empty_signed())] } - key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } => { - assert_eq!( - id.set.network, msg.network, - "processor claimed to be a different network than it was for GeneratedKeyPair", - ); - // TODO2: Also check the other KeyGenId fields - - // Tell the Tributary the key pair, get back the share for the MuSig signature - let share = crate::tributary::generated_key_pair::( - &mut txn, - &key, - spec, - &(Public(substrate_key), network_key.try_into().unwrap()), - id.attempt, - ); - - match share { - Ok(share) => { - Some(Transaction::DkgConfirmed(id.attempt, share, Transaction::empty_signed())) - } - Err(p) => { - todo!("participant {p:?} sent invalid DKG confirmation preprocesses") - } - } + Err(p) => { + todo!("participant {p:?} sent invalid DKG confirmation preprocesses") } - }, - ProcessorMessage::Sign(msg) => match msg { - sign::ProcessorMessage::Preprocess { id, preprocess } => { - if id.attempt == 0 { - MainDb::::save_first_preprocess(&mut txn, network, id.id, preprocess); + } + } + }, + ProcessorMessage::Sign(msg) => match msg { + sign::ProcessorMessage::Preprocess { id, preprocess } => { + if id.attempt == 0 { + MainDb::::save_first_preprocess( + &mut txn, + network, + RecognizedIdType::Plan, + id.id, + preprocess, + ); - None - } else { - Some(Transaction::SignPreprocess(SignData { - plan: id.id, - attempt: id.attempt, - data: preprocess, - signed: Transaction::empty_signed(), - })) - } - } - sign::ProcessorMessage::Share { id, share } => Some(Transaction::SignShare(SignData { + vec![] + } else { + vec![Transaction::SignPreprocess(SignData { plan: id.id, attempt: id.attempt, - data: share, + data: preprocess, signed: Transaction::empty_signed(), - })), - sign::ProcessorMessage::Completed { key: _, id, tx } => { - let r = Zeroizing::new(::F::random(&mut OsRng)); - #[allow(non_snake_case)] - let R = ::generator() * r.deref(); - let mut tx = Transaction::SignCompleted { - plan: id, - tx_hash: tx, - first_signer: pub_key, - signature: SchnorrSignature { R, s: ::F::ZERO }, - }; - let signed = SchnorrSignature::sign(&key, r, tx.sign_completed_challenge()); - match &mut tx { - Transaction::SignCompleted { signature, .. } => { - *signature = signed; + })] + } + } + sign::ProcessorMessage::Share { id, share } => vec![Transaction::SignShare(SignData { + plan: id.id, + attempt: id.attempt, + data: share, + signed: Transaction::empty_signed(), + })], + sign::ProcessorMessage::Completed { key: _, id, tx } => { + let r = Zeroizing::new(::F::random(&mut OsRng)); + #[allow(non_snake_case)] + let R = ::generator() * r.deref(); + let mut tx = Transaction::SignCompleted { + plan: id, + tx_hash: tx, + first_signer: pub_key, + signature: SchnorrSignature { R, s: ::F::ZERO }, + }; + let signed = SchnorrSignature::sign(key, r, tx.sign_completed_challenge()); + match &mut tx { + Transaction::SignCompleted { signature, .. } => { + *signature = signed; + } + _ => unreachable!(), + } + vec![tx] + } + }, + ProcessorMessage::Coordinator(inner_msg) => match inner_msg { + coordinator::ProcessorMessage::SubstrateBlockAck { .. } => unreachable!(), + coordinator::ProcessorMessage::BatchPreprocess { id, block, preprocess } => { + log::info!( + "informed of batch (sign ID {}, attempt {}) for block {}", + hex::encode(id.id), + id.attempt, + hex::encode(block), + ); + + // If this is the first attempt instance, wait until we synchronize around the batch + // first + if id.attempt == 0 { + MainDb::::save_first_preprocess( + &mut txn, + spec.set().network, + RecognizedIdType::Batch, + id.id, + preprocess, + ); + + // If this is the new key's first Batch, only create this TX once we verify all + // all prior published `Batch`s + let last_received = MainDb::::last_received_batch(&txn, msg.network).unwrap(); + let handover_batch = MainDb::::handover_batch(&txn, spec.set()); + if handover_batch.is_none() { + MainDb::::set_handover_batch(&mut txn, spec.set(), last_received); + if last_received != 0 { + // Decrease by 1, to get the ID of the Batch prior to this Batch + let prior_sets_last_batch = last_received - 1; + loop { + let successfully_verified = substrate::verify_published_batches::( + &mut txn, + msg.network, + prior_sets_last_batch, + ) + .await; + if successfully_verified == Some(prior_sets_last_batch) { + break; + } + sleep(Duration::from_secs(5)).await; } - _ => unreachable!(), } - Some(tx) } - }, - ProcessorMessage::Coordinator(inner_msg) => match inner_msg { - coordinator::ProcessorMessage::SubstrateBlockAck { .. } => unreachable!(), - coordinator::ProcessorMessage::BatchPreprocess { id, block, preprocess } => { - log::info!( - "informed of batch (sign ID {}, attempt {}) for block {}", - hex::encode(id.id), - id.attempt, - hex::encode(block), - ); - // If this is the first attempt instance, wait until we synchronize around - // the batch first - if id.attempt == 0 { - MainDb::::save_first_preprocess(&mut txn, spec.set().network, id.id, preprocess); - - // TODO: If this is the new key's first Batch, only create this TX once we verify - // all prior published `Batch`s - Some(Transaction::Batch(block.0, id.id)) + + // There is a race condition here. We may verify all `Batch`s from the prior set, + // start signing the handover `Batch` `n`, start signing `n+1`, have `n+1` signed + // before `n` (or at the same time), yet then the prior set forges a malicious + // `Batch` `n`. + // + // The malicious `Batch` `n` would be publishable to Serai, as Serai can't + // distinguish what's intended to be a handover `Batch`, yet then anyone could + // publish the new set's `n+1`, causing their acceptance of the handover. + // + // To fix this, if this is after the handover `Batch` and we have yet to verify + // publication of the handover `Batch`, don't yet yield the provided. + let handover_batch = MainDb::::handover_batch(&txn, spec.set()).unwrap(); + let intended = Transaction::Batch(block.0, id.id); + let mut res = vec![intended.clone()]; + if last_received > handover_batch { + if let Some(last_verified) = MainDb::::last_verified_batch(&txn, msg.network) { + if last_verified < handover_batch { + res = vec![]; + } } else { - Some(Transaction::BatchPreprocess(SignData { - plan: id.id, - attempt: id.attempt, - data: preprocess, - signed: Transaction::empty_signed(), - })) + res = vec![]; } } - coordinator::ProcessorMessage::BatchShare { id, share } => { - Some(Transaction::BatchShare(SignData { - plan: id.id, - attempt: id.attempt, - data: share.to_vec(), - signed: Transaction::empty_signed(), - })) - } - }, - ProcessorMessage::Substrate(inner_msg) => match inner_msg { - processor_messages::substrate::ProcessorMessage::Batch { .. } => unreachable!(), - processor_messages::substrate::ProcessorMessage::SignedBatch { .. } => unreachable!(), - }, - }; - // If this created a transaction, publish it - if let Some(mut tx) = tx { - log::trace!("processor message effected transaction {}", hex::encode(tx.hash())); + if res.is_empty() { + MainDb::::queue_batch(&mut txn, spec.set(), intended); + } - match tx.kind() { - TransactionKind::Provided(_) => { - log::trace!("providing transaction {}", hex::encode(tx.hash())); - let res = tributary.provide_transaction(tx).await; - if !(res.is_ok() || (res == Err(ProvidedError::AlreadyProvided))) { - panic!("provided an invalid transaction: {res:?}"); + res + } else { + vec![Transaction::BatchPreprocess(SignData { + plan: id.id, + attempt: id.attempt, + data: preprocess, + signed: Transaction::empty_signed(), + })] + } + } + coordinator::ProcessorMessage::BatchShare { id, share } => { + vec![Transaction::BatchShare(SignData { + plan: id.id, + attempt: id.attempt, + data: share.to_vec(), + signed: Transaction::empty_signed(), + })] + } + }, + ProcessorMessage::Substrate(inner_msg) => match inner_msg { + processor_messages::substrate::ProcessorMessage::Batch { .. } => unreachable!(), + processor_messages::substrate::ProcessorMessage::SignedBatch { .. } => { + // We only reach here if this SignedBatch triggered the publication of a handover + // Batch + // Since the handover `Batch` was successfully published and verified, we no longer + // have to worry about the above n+1 attack + MainDb::::take_queued_batches(&mut txn, spec.set()) + } + }, + }; + + // If this created transactions, publish them + for mut tx in txs { + log::trace!("processor message effected transaction {}", hex::encode(tx.hash())); + + match tx.kind() { + TransactionKind::Provided(_) => { + log::trace!("providing transaction {}", hex::encode(tx.hash())); + let res = tributary.provide_transaction(tx.clone()).await; + if !(res.is_ok() || (res == Err(ProvidedError::AlreadyProvided))) { + if res == Err(ProvidedError::LocalMismatchesOnChain) { + // Spin, since this is a crit for this Tributary + loop { + log::error!( + "{}. tributary: {}, provided: {:?}", + "tributary added distinct provided to delayed locally provided TX", + hex::encode(spec.genesis()), + &tx, + ); + sleep(Duration::from_secs(60)).await; } } - TransactionKind::Unsigned => { - log::trace!("publishing unsigned transaction {}", hex::encode(tx.hash())); - // Ignores the result since we can't differentiate already in-mempool from - // already on-chain from invalid - // TODO: Don't ignore the result - tributary.add_transaction(tx).await; - } - TransactionKind::Signed(_) => { - log::trace!("getting next nonce for Tributary TX in response to processor message"); - - let nonce = loop { - let Some(nonce) = NonceDecider::::nonce(&txn, genesis, &tx) - .expect("signed TX didn't have nonce") - else { - // This can be None if: - // 1) We scanned the relevant transaction(s) in a Tributary block - // 2) The processor was sent a message and responded - // 3) The Tributary TXN has yet to be committed - log::warn!("nonce has yet to be saved for processor-instigated transaction"); - sleep(Duration::from_millis(100)).await; - continue; - }; - break nonce; - }; - tx.sign(&mut OsRng, genesis, &key, nonce); - - publish_signed_transaction(&mut db_clone, tributary, tx).await; - } + panic!("provided an invalid transaction: {res:?}"); } } + TransactionKind::Unsigned => { + log::trace!("publishing unsigned transaction {}", hex::encode(tx.hash())); + match tributary.add_transaction(tx.clone()).await { + Ok(_) => {} + Err(e) => panic!("created an invalid unsigned transaction: {e:?}"), + } + } + TransactionKind::Signed(_) => { + log::trace!("getting next nonce for Tributary TX in response to processor message"); + + let nonce = loop { + let Some(nonce) = + NonceDecider::::nonce(&txn, genesis, &tx).expect("signed TX didn't have nonce") + else { + // This can be None if the following events occur, in order: + // 1) We scanned the relevant transaction(s) in a Tributary block + // 2) The processor was sent a message and responded + // 3) The Tributary TXN has yet to be committed + log::warn!("nonce has yet to be saved for processor-instigated transaction"); + sleep(Duration::from_millis(100)).await; + continue; + }; + break nonce; + }; + tx.sign(&mut OsRng, genesis, key, nonce); + + publish_signed_transaction(&mut txn, tributary, tx).await; + } } + } + } + + MainDb::::save_handled_message(&mut txn, msg.network, msg.id); + txn.commit(); + + true +} - MainDb::::save_handled_message(&mut txn, msg.network, msg.id); - txn.commit(); +async fn handle_processor_messages( + mut db: D, + key: Zeroizing<::F>, + serai: Arc, + mut processors: Pro, + network: NetworkId, + mut tributary_event: mpsc::UnboundedReceiver>, +) { + let mut tributaries = HashMap::new(); + loop { + match tributary_event.try_recv() { + Ok(event) => match event { + TributaryEvent::NewTributary(tributary) => { + let set = tributary.spec.set(); + assert_eq!(set.network, network); + tributaries.insert(set.session, tributary); + } + TributaryEvent::TributaryRetired(set) => { + tributaries.remove(&set.session); + } + }, + Err(mpsc::error::TryRecvError::Empty) => {} + Err(mpsc::error::TryRecvError::Disconnected) => { + panic!("handle_processor_messages tributary_event sender closed") + } } - processors.ack(msg).await; + // TODO: Check this ID is sane (last handled ID or expected next ID) + let msg = processors.recv(network).await; + if handle_processor_message(&mut db, &key, &serai, &tributaries, network, &msg).await { + processors.ack(msg).await; + } } } @@ -984,7 +726,7 @@ pub async fn handle_processors( key: Zeroizing<::F>, serai: Arc, processors: Pro, - mut new_tributary: broadcast::Receiver>, + mut tributary_event: broadcast::Receiver>, ) { let mut channels = HashMap::new(); for network in serai_client::primitives::NETWORKS { @@ -1005,8 +747,14 @@ pub async fn handle_processors( // Listen to new tributary events loop { - let tributary = new_tributary.recv().await.unwrap(); - channels[&tributary.spec.set().network].send(tributary).unwrap(); + match tributary_event.recv().await.unwrap() { + TributaryEvent::NewTributary(tributary) => channels[&tributary.spec.set().network] + .send(TributaryEvent::NewTributary(tributary)) + .unwrap(), + TributaryEvent::TributaryRetired(set) => { + channels[&set.network].send(TributaryEvent::TributaryRetired(set)).unwrap() + } + }; } } @@ -1025,24 +773,38 @@ pub async fn run( new_tributary_spec_send.send(spec).unwrap(); } + let (tributary_retired_send, mut tributary_retired_recv) = mpsc::unbounded_channel(); + // Handle new Substrate blocks - tokio::spawn(scan_substrate( + tokio::spawn(crate::substrate::scan_task( raw_db.clone(), key.clone(), processors.clone(), serai.clone(), new_tributary_spec_send, + tributary_retired_send, )); // Handle the Tributaries // This should be large enough for an entire rotation of all tributaries // If it's too small, the coordinator fail to boot, which is a decent sanity check - let (new_tributary, mut new_tributary_listener_1) = broadcast::channel(32); - let new_tributary_listener_2 = new_tributary.subscribe(); - let new_tributary_listener_3 = new_tributary.subscribe(); - let new_tributary_listener_4 = new_tributary.subscribe(); - let new_tributary_listener_5 = new_tributary.subscribe(); + let (tributary_event, mut tributary_event_listener_1) = broadcast::channel(32); + let tributary_event_listener_2 = tributary_event.subscribe(); + let tributary_event_listener_3 = tributary_event.subscribe(); + let tributary_event_listener_4 = tributary_event.subscribe(); + let tributary_event_listener_5 = tributary_event.subscribe(); + + // Emit TributaryEvent::TributaryRetired + tokio::spawn({ + let tributary_event = tributary_event.clone(); + async move { + loop { + let retired = tributary_retired_recv.recv().await.unwrap(); + tributary_event.send(TributaryEvent::TributaryRetired(retired)).map_err(|_| ()).unwrap(); + } + } + }); // Spawn a task to further add Tributaries as needed tokio::spawn({ @@ -1059,9 +821,9 @@ pub async fn run( let key = key.clone(); let processors = processors.clone(); let p2p = p2p.clone(); - let new_tributary = new_tributary.clone(); + let tributary_event = tributary_event.clone(); async move { - add_tributary(raw_db, key, &processors, p2p, &new_tributary, spec).await; + add_tributary(raw_db, key, &processors, p2p, &tributary_event, spec).await; } }); } @@ -1069,29 +831,39 @@ pub async fn run( }); // When we reach synchrony on an event requiring signing, send our preprocess for it + // TODO: Properly place this into the Tributary scanner, as it's a mess out here let recognized_id = { let raw_db = raw_db.clone(); let key = key.clone(); let tributaries = Arc::new(RwLock::new(HashMap::new())); + // Spawn a task to maintain a local view of the tributaries for whenever recognized_id is + // called tokio::spawn({ let tributaries = tributaries.clone(); + let mut set_to_genesis = HashMap::new(); async move { loop { - match new_tributary_listener_1.recv().await { - Ok(tributary) => { + match tributary_event_listener_1.recv().await { + Ok(TributaryEvent::NewTributary(tributary)) => { + set_to_genesis.insert(tributary.spec.set(), tributary.spec.genesis()); tributaries.write().await.insert(tributary.spec.genesis(), tributary.tributary); } + Ok(TributaryEvent::TributaryRetired(set)) => { + if let Some(genesis) = set_to_genesis.remove(&set) { + tributaries.write().await.remove(&genesis); + } + } Err(broadcast::error::RecvError::Lagged(_)) => { - panic!("recognized_id lagged to handle new_tributary") + panic!("recognized_id lagged to handle tributary_event") } - Err(broadcast::error::RecvError::Closed) => panic!("new_tributary sender closed"), + Err(broadcast::error::RecvError::Closed) => panic!("tributary_event sender closed"), } } } }); - move |network, genesis, id_type, id, nonce| { + move |set: ValidatorSet, genesis, id_type, id, nonce| { let mut raw_db = raw_db.clone(); let key = key.clone(); let tributaries = tributaries.clone(); @@ -1099,10 +871,11 @@ pub async fn run( // The transactions for these are fired before the preprocesses are actually // received/saved, creating a race between Tributary ack and the availability of all // Preprocesses - // This waits until the necessary preprocess is available - let get_preprocess = |raw_db, id| async move { + // This waits until the necessary preprocess is available 0, + let get_preprocess = |raw_db, id_type, id| async move { loop { - let Some(preprocess) = MainDb::::first_preprocess(raw_db, network, id) else { + let Some(preprocess) = MainDb::::first_preprocess(raw_db, set.network, id_type, id) + else { sleep(Duration::from_millis(100)).await; continue; }; @@ -1114,26 +887,47 @@ pub async fn run( RecognizedIdType::Batch => Transaction::BatchPreprocess(SignData { plan: id, attempt: 0, - data: get_preprocess(&raw_db, id).await, + data: get_preprocess(&raw_db, id_type, id).await, signed: Transaction::empty_signed(), }), RecognizedIdType::Plan => Transaction::SignPreprocess(SignData { plan: id, attempt: 0, - data: get_preprocess(&raw_db, id).await, + data: get_preprocess(&raw_db, id_type, id).await, signed: Transaction::empty_signed(), }), }; tx.sign(&mut OsRng, genesis, &key, nonce); - let tributaries = tributaries.read().await; - let Some(tributary) = tributaries.get(&genesis) else { - // TODO: This may happen if the task above is simply slow - panic!("tributary we don't have came to consensus on an Batch"); - }; - publish_signed_transaction(&mut raw_db, tributary, tx).await; + let mut first = true; + loop { + if !first { + sleep(Duration::from_millis(100)).await; + } + first = false; + + let tributaries = tributaries.read().await; + let Some(tributary) = tributaries.get(&genesis) else { + // If we don't have this Tributary because it's retired, break and move on + if MainDb::::is_tributary_retired(&raw_db, set) { + break; + } + + // This may happen if the task above is simply slow + log::warn!("tributary we don't have yet came to consensus on an Batch"); + continue; + }; + // This is safe to perform multiple times and solely needs atomicity with regards to + // itself + // TODO: Should this not take a txn accordingly? It's best practice to take a txn, yet + // taking a txn fails to declare its achieved independence + let mut txn = raw_db.txn(); + publish_signed_transaction(&mut txn, tributary, tx).await; + txn.commit(); + break; + } } } }; @@ -1141,25 +935,25 @@ pub async fn run( // Handle new blocks for each Tributary { let raw_db = raw_db.clone(); - tokio::spawn(scan_tributaries( + tokio::spawn(tributary::scanner::scan_tributaries_task( raw_db, key.clone(), recognized_id, processors.clone(), serai.clone(), - new_tributary_listener_2, + tributary_event_listener_2, )); } // Spawn the heartbeat task, which will trigger syncing if there hasn't been a Tributary block // in a while (presumably because we're behind) - tokio::spawn(heartbeat_tributaries(p2p.clone(), new_tributary_listener_3)); + tokio::spawn(p2p::heartbeat_tributaries_task(p2p.clone(), tributary_event_listener_3)); // Handle P2P messages - tokio::spawn(handle_p2p(Ristretto::generator() * key.deref(), p2p, new_tributary_listener_4)); + tokio::spawn(p2p::handle_p2p_task(p2p, tributary_event_listener_4)); // Handle all messages from processors - handle_processors(raw_db, key, serai, processors, new_tributary_listener_5).await; + handle_processors(raw_db, key, serai, processors, tributary_event_listener_5).await; } #[tokio::main] diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index bc252d506..d435f1863 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -1,9 +1,19 @@ use core::{time::Duration, fmt}; -use std::{sync::Arc, time::Instant, io::Read}; +use std::{ + sync::Arc, + io::Read, + collections::HashMap, + time::{SystemTime, Instant}, +}; use async_trait::async_trait; -use tokio::sync::{mpsc, Mutex}; +use serai_db::Db; + +use tokio::{ + sync::{Mutex, RwLock, mpsc, broadcast}, + time::sleep, +}; use libp2p::{ futures::StreamExt, @@ -20,7 +30,9 @@ use libp2p::{ swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent, Swarm}, }; -pub use tributary::P2p as TributaryP2p; +pub(crate) use tributary::{ReadWrite, P2p as TributaryP2p}; + +use crate::{Transaction, Block, Tributary, ActiveTributary, TributaryEvent}; // TODO: Use distinct topics const LIBP2P_TOPIC: &str = "serai-coordinator"; @@ -366,3 +378,224 @@ impl TributaryP2p for LibP2p { ::broadcast(self, P2pMessageKind::Tributary(genesis), msg).await } } + +pub async fn heartbeat_tributaries_task( + p2p: P, + mut tributary_event: broadcast::Receiver>, +) { + let ten_blocks_of_time = + Duration::from_secs((10 * Tributary::::block_time()).into()); + + let mut readers = HashMap::new(); + loop { + loop { + match tributary_event.try_recv() { + Ok(TributaryEvent::NewTributary(ActiveTributary { spec, tributary })) => { + readers.insert(spec.set(), tributary.reader()); + } + Ok(TributaryEvent::TributaryRetired(set)) => { + readers.remove(&set); + } + Err(broadcast::error::TryRecvError::Empty) => break, + Err(broadcast::error::TryRecvError::Lagged(_)) => { + panic!("heartbeat_tributaries lagged to handle tributary_event") + } + Err(broadcast::error::TryRecvError::Closed) => panic!("tributary_event sender closed"), + } + } + + for tributary in readers.values() { + let tip = tributary.tip(); + let block_time = + SystemTime::UNIX_EPOCH + Duration::from_secs(tributary.time_of_block(&tip).unwrap_or(0)); + + // Only trigger syncing if the block is more than a minute behind + if SystemTime::now() > (block_time + Duration::from_secs(60)) { + log::warn!("last known tributary block was over a minute ago"); + let mut msg = tip.to_vec(); + // Also include the timestamp so LibP2p doesn't flag this as an old message re-circulating + let timestamp = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("system clock is wrong") + .as_secs(); + // Divide by the block time so if multiple parties send a Heartbeat, they're more likely to + // overlap + let time_unit = timestamp / u64::from(Tributary::::block_time()); + msg.extend(time_unit.to_le_bytes()); + P2p::broadcast(&p2p, P2pMessageKind::Heartbeat(tributary.genesis()), msg).await; + } + } + + // Only check once every 10 blocks of time + sleep(ten_blocks_of_time).await; + } +} + +pub async fn handle_p2p_task( + p2p: P, + mut tributary_event: broadcast::Receiver>, +) { + let channels = Arc::new(RwLock::new(HashMap::<_, mpsc::UnboundedSender>>::new())); + tokio::spawn({ + let p2p = p2p.clone(); + let channels = channels.clone(); + let mut set_to_genesis = HashMap::new(); + async move { + loop { + match tributary_event.recv().await.unwrap() { + TributaryEvent::NewTributary(tributary) => { + let genesis = tributary.spec.genesis(); + set_to_genesis.insert(tributary.spec.set(), genesis); + + let (send, mut recv) = mpsc::unbounded_channel(); + channels.write().await.insert(genesis, send); + + // Per-Tributary P2P message handler + tokio::spawn({ + let p2p = p2p.clone(); + async move { + loop { + let Some(mut msg) = recv.recv().await else { + // Channel closure happens when the tributary retires + break; + }; + match msg.kind { + P2pMessageKind::KeepAlive => {} + + P2pMessageKind::Tributary(msg_genesis) => { + assert_eq!(msg_genesis, genesis); + log::trace!("handling message for tributary {:?}", tributary.spec.set()); + if tributary.tributary.handle_message(&msg.msg).await { + P2p::broadcast(&p2p, msg.kind, msg.msg).await; + } + } + + // TODO2: Rate limit this per timestamp + // And/or slash on Heartbeat which justifies a response, since the node + // obviously was offline and we must now use our bandwidth to compensate for + // them? + P2pMessageKind::Heartbeat(msg_genesis) => { + assert_eq!(msg_genesis, genesis); + if msg.msg.len() != 40 { + log::error!("validator sent invalid heartbeat"); + continue; + } + + let p2p = p2p.clone(); + let spec = tributary.spec.clone(); + let reader = tributary.tributary.reader(); + // Spawn a dedicated task as this may require loading large amounts of data + // from disk and take a notable amount of time + tokio::spawn(async move { + /* + // Have sqrt(n) nodes reply with the blocks + let mut responders = (tributary.spec.n() as f32).sqrt().floor() as u64; + // Try to have at least 3 responders + if responders < 3 { + responders = tributary.spec.n().min(3).into(); + } + */ + + /* + // Have up to three nodes respond + let responders = u64::from(spec.n().min(3)); + + // Decide which nodes will respond by using the latest block's hash as a + // mutually agreed upon entropy source + // This isn't a secure source of entropy, yet it's fine for this + let entropy = u64::from_le_bytes(reader.tip()[.. 8].try_into().unwrap()); + // If n = 10, responders = 3, we want `start` to be 0 ..= 7 + // (so the highest is 7, 8, 9) + // entropy % (10 + 1) - 3 = entropy % 8 = 0 ..= 7 + let start = + usize::try_from(entropy % (u64::from(spec.n() + 1) - responders)) + .unwrap(); + let mut selected = false; + for validator in &spec.validators() + [start .. (start + usize::try_from(responders).unwrap())] + { + if our_key == validator.0 { + selected = true; + break; + } + } + if !selected { + log::debug!("received heartbeat and not selected to respond"); + return; + } + + log::debug!("received heartbeat and selected to respond"); + */ + + // Have every node respond + // While we could only have a subset respond, LibP2P will sync all messages + // it isn't aware of + // It's cheaper to be aware from our disk than from over the network + // TODO: Spawn a dedicated topic for this heartbeat response? + let mut latest = msg.msg[.. 32].try_into().unwrap(); + while let Some(next) = reader.block_after(&latest) { + let mut res = reader.block(&next).unwrap().serialize(); + res.extend(reader.commit(&next).unwrap()); + // Also include the timestamp used within the Heartbeat + res.extend(&msg.msg[32 .. 40]); + p2p.send(msg.sender, P2pMessageKind::Block(spec.genesis()), res).await; + latest = next; + } + }); + } + + P2pMessageKind::Block(msg_genesis) => { + assert_eq!(msg_genesis, genesis); + let mut msg_ref: &[u8] = msg.msg.as_ref(); + let Ok(block) = Block::::read(&mut msg_ref) else { + log::error!("received block message with an invalidly serialized block"); + continue; + }; + // Get just the commit + msg.msg.drain(.. (msg.msg.len() - msg_ref.len())); + msg.msg.drain((msg.msg.len() - 8) ..); + + let res = tributary.tributary.sync_block(block, msg.msg).await; + log::debug!( + "received block from {:?}, sync_block returned {}", + msg.sender, + res + ); + } + } + } + } + }); + } + TributaryEvent::TributaryRetired(set) => { + if let Some(genesis) = set_to_genesis.remove(&set) { + channels.write().await.remove(&genesis); + } + } + } + } + } + }); + + loop { + let msg = p2p.receive().await; + match msg.kind { + P2pMessageKind::KeepAlive => {} + P2pMessageKind::Tributary(genesis) => { + if let Some(channel) = channels.read().await.get(&genesis) { + channel.send(msg).unwrap(); + } + } + P2pMessageKind::Heartbeat(genesis) => { + if let Some(channel) = channels.read().await.get(&genesis) { + channel.send(msg).unwrap(); + } + } + P2pMessageKind::Block(genesis) => { + if let Some(channel) = channels.read().await.get(&genesis) { + channel.send(msg).unwrap(); + } + } + } + } +} diff --git a/coordinator/src/substrate/mod.rs b/coordinator/src/substrate/mod.rs index 529273f89..d25125a92 100644 --- a/coordinator/src/substrate/mod.rs +++ b/coordinator/src/substrate/mod.rs @@ -1,26 +1,30 @@ use core::{ops::Deref, time::Duration}; -use std::collections::{HashSet, HashMap}; +use std::{ + sync::Arc, + collections::{HashSet, HashMap}, +}; use zeroize::Zeroizing; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; use serai_client::{ - SeraiError, Block, Serai, + SeraiError, Block, Serai, TemporalSerai, primitives::{BlockHash, NetworkId}, validator_sets::{ - primitives::{ValidatorSet, KeyPair}, + primitives::{ValidatorSet, KeyPair, amortize_excess_key_shares}, ValidatorSetsEvent, }, in_instructions::InInstructionsEvent, - tokens::{primitives::OutInstructionWithBalance, TokensEvent}, + coins::{primitives::OutInstructionWithBalance, TokensEvent}, }; use serai_db::DbTxn; use processor_messages::SubstrateContext; -use tokio::time::sleep; +use futures::stream::StreamExt; +use tokio::{sync::mpsc, time::sleep}; use crate::{ Db, @@ -33,33 +37,53 @@ pub use db::*; async fn in_set( key: &Zeroizing<::F>, - serai: &Serai, + serai: &TemporalSerai<'_>, set: ValidatorSet, - block_hash: [u8; 32], ) -> Result, SeraiError> { - let Some(participants) = serai.get_validator_set_participants(set.network, block_hash).await? - else { + let Some(participants) = serai.validator_sets().participants(set.network).await? else { return Ok(None); }; let key = (Ristretto::generator() * key.deref()).to_bytes(); Ok(Some(participants.iter().any(|participant| participant.0 == key))) } -async fn handle_new_set( - db: &mut D, +async fn handle_new_set( + txn: &mut D::Transaction<'_>, key: &Zeroizing<::F>, - create_new_tributary: CNT, + new_tributary_spec: &mpsc::UnboundedSender, serai: &Serai, block: &Block, set: ValidatorSet, ) -> Result<(), SeraiError> { - if in_set(key, serai, set, block.hash()).await?.expect("NewSet for set which doesn't exist") { + if in_set(key, &serai.as_of(block.hash()), set) + .await? + .expect("NewSet for set which doesn't exist") + { log::info!("present in set {:?}", set); - let set_participants = serai - .get_validator_set_participants(set.network, block.hash()) - .await? - .expect("NewSet for set which doesn't exist"); + let set_data = { + let serai = serai.as_of(block.hash()).validator_sets(); + let set_participants = + serai.participants(set.network).await?.expect("NewSet for set which doesn't exist"); + + let allocation_per_key_share = serai + .allocation_per_key_share(set.network) + .await? + .expect("NewSet for set which didn't have an allocation per key share") + .0; + + let mut set_data = vec![]; + for participant in set_participants { + let allocation = serai + .allocation(set.network, participant) + .await? + .expect("validator selected for set yet didn't have an allocation") + .0; + set_data.push((participant, allocation / allocation_per_key_share)); + } + amortize_excess_key_shares(&mut set_data); + set_data + }; let time = if let Ok(time) = block.time() { time @@ -67,7 +91,7 @@ async fn handle_new_set( assert_eq!(block.number(), 0); // Use the next block's time loop { - let Ok(Some(res)) = serai.get_block_by_number(1).await else { + let Ok(Some(res)) = serai.block_by_number(1).await else { sleep(Duration::from_secs(5)).await; continue; }; @@ -82,8 +106,19 @@ async fn handle_new_set( const SUBSTRATE_TO_TRIBUTARY_TIME_DELAY: u64 = 120; let time = time + SUBSTRATE_TO_TRIBUTARY_TIME_DELAY; - let spec = TributarySpec::new(block.hash(), time, set, set_participants); - create_new_tributary(db, spec.clone()); + let spec = TributarySpec::new(block.hash(), time, set, set_data); + + log::info!("creating new tributary for {:?}", spec.set()); + + // Save it to the database now, not on the channel receiver's side, so this is safe against + // reboots + // If this txn finishes, and we reboot, then this'll be reloaded from active Tributaries + // If this txn doesn't finish, this will be re-fired + // If we waited to save to the DB, this txn may be finished, preventing re-firing, yet the + // prior fired event may have not been received yet + crate::MainDb::::add_participating_in_tributary(txn, &spec); + + new_tributary_spec.send(spec).unwrap(); } else { log::info!("not present in set {:?}", set); } @@ -111,7 +146,9 @@ async fn handle_key_gen( context: SubstrateContext { serai_time: block.time().unwrap() / 1000, network_latest_finalized_block: serai - .get_latest_block_for_network(block.hash(), set.network) + .as_of(block.hash()) + .in_instructions() + .latest_block_for_network(set.network) .await? // The processor treats this as a magic value which will cause it to find a network // block which has a time greater than or equal to the Serai time @@ -132,8 +169,6 @@ async fn handle_batch_and_burns( serai: &Serai, block: &Block, ) -> Result<(), SeraiError> { - let hash = block.hash(); - // Track which networks had events with a Vec in ordr to preserve the insertion order // While that shouldn't be needed, ensuring order never hurts, and may enable design choices // with regards to Processor <-> Coordinator message passing @@ -152,7 +187,8 @@ async fn handle_batch_and_burns( let mut batches = HashMap::>::new(); let mut burns = HashMap::new(); - for batch in serai.get_batch_events(hash).await? { + let serai = serai.as_of(block.hash()); + for batch in serai.in_instructions().batch_events().await? { if let InInstructionsEvent::Batch { network, id, block: network_block, instructions_hash } = batch { @@ -172,7 +208,7 @@ async fn handle_batch_and_burns( } } - for burn in serai.get_burn_events(hash).await? { + for burn in serai.coins().burn_events().await? { if let TokensEvent::Burn { address: _, balance, instruction } = burn { let network = balance.coin.network(); network_had_event(&mut burns, &mut batches, network); @@ -192,7 +228,8 @@ async fn handle_batch_and_burns( } else { // If it's had a batch or a burn, it must have had a block acknowledged serai - .get_latest_block_for_network(hash, network) + .in_instructions() + .latest_block_for_network(network) .await? .expect("network had a batch/burn yet never set a latest block") }; @@ -219,11 +256,11 @@ async fn handle_batch_and_burns( // Handle a specific Substrate block, returning an error when it fails to get data // (not blocking / holding) -#[allow(clippy::needless_pass_by_ref_mut)] // False positive? -async fn handle_block( +async fn handle_block( db: &mut SubstrateDb, key: &Zeroizing<::F>, - create_new_tributary: CNT, + new_tributary_spec: &mpsc::UnboundedSender, + tributary_retired: &mpsc::UnboundedSender, processors: &Pro, serai: &Serai, block: Block, @@ -234,7 +271,7 @@ async fn handle_block::handled_event(&db.0, hash, event_id) { log::info!("found fresh new set event {:?}", new_set); - handle_new_set(&mut db.0, key, create_new_tributary.clone(), serai, &block, set).await?; let mut txn = db.0.txn(); + handle_new_set::(&mut txn, key, new_tributary_spec, serai, &block, set).await?; SubstrateDb::::handle_event(&mut txn, hash, event_id); txn.commit(); } @@ -260,7 +297,7 @@ async fn handle_block::handled_event(&db.0, hash, event_id) { log::info!("found fresh key gen event {:?}", key_gen); if let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen { @@ -281,6 +318,26 @@ async fn handle_block::handled_event(&db.0, hash, event_id) { + log::info!("found fresh set retired event {:?}", retired_set); + let mut txn = db.0.txn(); + crate::MainDb::::retire_tributary(&mut txn, set); + tributary_retired.send(set).unwrap(); + SubstrateDb::::handle_event(&mut txn, hash, event_id); + txn.commit(); + } + event_id += 1; + } + // Finally, tell the processor of acknowledged blocks/burns // This uses a single event as. unlike prior events which individually executed code, all // following events share data collection @@ -296,16 +353,17 @@ async fn handle_block( +async fn handle_new_blocks( db: &mut SubstrateDb, key: &Zeroizing<::F>, - create_new_tributary: CNT, + new_tributary_spec: &mpsc::UnboundedSender, + tributary_retired: &mpsc::UnboundedSender, processors: &Pro, serai: &Serai, next_block: &mut u64, ) -> Result<(), SeraiError> { // Check if there's been a new Substrate block - let latest = serai.get_latest_block().await?; + let latest = serai.latest_block().await?; let latest_number = latest.number(); if latest_number < *next_block { return Ok(()); @@ -317,14 +375,15 @@ pub async fn handle_new_blocks( + db: D, + key: Zeroizing<::F>, + processors: Pro, + serai: Arc, + new_tributary_spec: mpsc::UnboundedSender, + tributary_retired: mpsc::UnboundedSender, +) { + log::info!("scanning substrate"); + + let mut db = SubstrateDb::new(db); + let mut next_substrate_block = db.next_block(); + + let new_substrate_block_notifier = { + let serai = &serai; + move || async move { + loop { + match serai.newly_finalized_block().await { + Ok(sub) => return sub, + Err(e) => { + log::error!("couldn't communicate with serai node: {e}"); + sleep(Duration::from_secs(5)).await; + } + } + } + } + }; + let mut substrate_block_notifier = new_substrate_block_notifier().await; + + loop { + // await the next block, yet if our notifier had an error, re-create it + { + let Ok(next_block) = + tokio::time::timeout(Duration::from_secs(60), substrate_block_notifier.next()).await + else { + // Timed out, which may be because Serai isn't finalizing or may be some issue with the + // notifier + if serai.latest_block().await.map(|block| block.number()).ok() == + Some(next_substrate_block.saturating_sub(1)) + { + log::info!("serai hasn't finalized a block in the last 60s..."); + } else { + substrate_block_notifier = new_substrate_block_notifier().await; + } + continue; + }; + + // next_block is a Option + if next_block.and_then(Result::ok).is_none() { + substrate_block_notifier = new_substrate_block_notifier().await; + continue; + } + } + + match handle_new_blocks( + &mut db, + &key, + &new_tributary_spec, + &tributary_retired, + &processors, + &serai, + &mut next_substrate_block, + ) + .await + { + Ok(()) => {} + Err(e) => { + log::error!("couldn't communicate with serai node: {e}"); + sleep(Duration::from_secs(5)).await; + } + } + } +} + +/// Gets the expected ID for the next Batch. +pub(crate) async fn get_expected_next_batch(serai: &Serai, network: NetworkId) -> u32 { + let mut first = true; + loop { + if !first { + log::error!("{} {network:?}", "couldn't connect to Serai node to get the next batch ID for",); + sleep(Duration::from_secs(5)).await; + } + first = false; + + let Ok(latest_block) = serai.latest_block().await else { + continue; + }; + let Ok(last) = + serai.as_of(latest_block.hash()).in_instructions().last_batch_for_network(network).await + else { + continue; + }; + break if let Some(last) = last { last + 1 } else { 0 }; + } +} + +/// Verifies `Batch`s which have already been indexed from Substrate. +pub(crate) async fn verify_published_batches( + txn: &mut D::Transaction<'_>, + network: NetworkId, + optimistic_up_to: u32, +) -> Option { + // TODO: Localize from MainDb to SubstrateDb + let last = crate::MainDb::::last_verified_batch(txn, network); + for id in last.map(|last| last + 1).unwrap_or(0) ..= optimistic_up_to { + let Some(on_chain) = SubstrateDb::::batch_instructions_hash(txn, network, id) else { + break; + }; + let off_chain = crate::MainDb::::expected_batch(txn, network, id).unwrap(); + if on_chain != off_chain { + // Halt operations on this network and spin, as this is a critical fault + loop { + log::error!( + "{}! network: {:?} id: {} off-chain: {} on-chain: {}", + "on-chain batch doesn't match off-chain", + network, + id, + hex::encode(off_chain), + hex::encode(on_chain), + ); + sleep(Duration::from_secs(60)).await; + } + } + crate::MainDb::::save_last_verified_batch(txn, network, id); + } + + crate::MainDb::::last_verified_batch(txn, network) +} diff --git a/coordinator/src/tests/mod.rs b/coordinator/src/tests/mod.rs index 6aaa907aa..9a01e1223 100644 --- a/coordinator/src/tests/mod.rs +++ b/coordinator/src/tests/mod.rs @@ -1,7 +1,7 @@ use core::fmt::Debug; use std::{ sync::Arc, - collections::{VecDeque, HashMap}, + collections::{VecDeque, HashSet, HashMap}, }; use serai_client::primitives::NetworkId; @@ -45,11 +45,11 @@ impl Processors for MemProcessors { #[allow(clippy::type_complexity)] #[derive(Clone, Debug)] -pub struct LocalP2p(usize, pub Arc)>>>>); +pub struct LocalP2p(usize, pub Arc>, Vec)>>)>>); impl LocalP2p { pub fn new(validators: usize) -> Vec { - let shared = Arc::new(RwLock::new(vec![VecDeque::new(); validators])); + let shared = Arc::new(RwLock::new((HashSet::new(), vec![VecDeque::new(); validators]))); let mut res = vec![]; for i in 0 .. validators { res.push(LocalP2p(i, shared.clone())); @@ -63,11 +63,22 @@ impl P2p for LocalP2p { type Id = usize; async fn send_raw(&self, to: Self::Id, msg: Vec) { - self.1.write().await[to].push_back((self.0, msg)); + self.1.write().await.1[to].push_back((self.0, msg)); } async fn broadcast_raw(&self, msg: Vec) { - for (i, msg_queue) in self.1.write().await.iter_mut().enumerate() { + // Content-based deduplication + let mut lock = self.1.write().await; + { + let already_sent = &mut lock.0; + if already_sent.contains(&msg) { + return; + } + already_sent.insert(msg.clone()); + } + let queues = &mut lock.1; + + for (i, msg_queue) in queues.iter_mut().enumerate() { if i == self.0 { continue; } @@ -78,7 +89,7 @@ impl P2p for LocalP2p { async fn receive_raw(&self) -> (Self::Id, Vec) { // This is a cursed way to implement an async read from a Vec loop { - if let Some(res) = self.1.write().await[self.0].pop_front() { + if let Some(res) = self.1.write().await.1[self.0].pop_front() { return res; } tokio::time::sleep(std::time::Duration::from_millis(100)).await; diff --git a/coordinator/src/tests/tributary/chain.rs b/coordinator/src/tests/tributary/chain.rs index a7d227472..36bdef416 100644 --- a/coordinator/src/tests/tributary/chain.rs +++ b/coordinator/src/tests/tributary/chain.rs @@ -54,7 +54,7 @@ pub fn new_spec( let set_participants = keys .iter() - .map(|key| sr25519::Public((::generator() * **key).to_bytes())) + .map(|key| (sr25519::Public((::generator() * **key).to_bytes()), 1)) .collect::>(); let res = TributarySpec::new(serai_block, start_time, set, set_participants); diff --git a/coordinator/src/tests/tributary/dkg.rs b/coordinator/src/tests/tributary/dkg.rs index aa8ea7c94..433faad34 100644 --- a/coordinator/src/tests/tributary/dkg.rs +++ b/coordinator/src/tests/tributary/dkg.rs @@ -56,7 +56,7 @@ async fn dkg_test() { // Publish all commitments but one for (i, tx) in txs.iter().enumerate().skip(1) { - assert!(tributaries[i].1.add_transaction(tx.clone()).await); + assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true)); } // Wait until these are included @@ -104,7 +104,7 @@ async fn dkg_test() { // Publish the last commitment let block_before_tx = tributaries[0].1.tip().await; - assert!(tributaries[0].1.add_transaction(txs[0].clone()).await); + assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true)); wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await; sleep(Duration::from_secs(Tributary::::block_time().into())).await; @@ -181,7 +181,7 @@ async fn dkg_test() { let block_before_tx = tributaries[0].1.tip().await; for (i, tx) in txs.iter().enumerate().skip(1) { - assert!(tributaries[i].1.add_transaction(tx.clone()).await); + assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true)); } for tx in txs.iter().skip(1) { wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await; @@ -205,7 +205,7 @@ async fn dkg_test() { // Publish the final set of shares let block_before_tx = tributaries[0].1.tip().await; - assert!(tributaries[0].1.add_transaction(txs[0].clone()).await); + assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true)); wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await; sleep(Duration::from_secs(Tributary::::block_time().into())).await; @@ -296,7 +296,7 @@ async fn dkg_test() { } let block_before_tx = tributaries[0].1.tip().await; for (i, tx) in txs.iter().enumerate() { - assert!(tributaries[i].1.add_transaction(tx.clone()).await); + assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true)); } for tx in txs.iter() { wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await; diff --git a/coordinator/src/tests/tributary/handle_p2p.rs b/coordinator/src/tests/tributary/handle_p2p.rs index 87576dd8f..3cbc686e5 100644 --- a/coordinator/src/tests/tributary/handle_p2p.rs +++ b/coordinator/src/tests/tributary/handle_p2p.rs @@ -3,8 +3,6 @@ use std::sync::Arc; use rand_core::OsRng; -use ciphersuite::{Ciphersuite, Ristretto}; - use tokio::{sync::broadcast, time::sleep}; use serai_db::MemDb; @@ -13,7 +11,8 @@ use tributary::Tributary; use crate::{ tributary::Transaction, - ActiveTributary, handle_p2p, + ActiveTributary, TributaryEvent, + p2p::handle_p2p_task, tests::{ LocalP2p, tributary::{new_keys, new_spec, new_tributaries}, @@ -29,13 +28,13 @@ async fn handle_p2p_test() { let mut tributary_senders = vec![]; let mut tributary_arcs = vec![]; - for (i, (p2p, tributary)) in tributaries.drain(..).enumerate() { + for (p2p, tributary) in tributaries.drain(..) { let tributary = Arc::new(tributary); tributary_arcs.push(tributary.clone()); let (new_tributary_send, new_tributary_recv) = broadcast::channel(5); - tokio::spawn(handle_p2p(Ristretto::generator() * *keys[i], p2p, new_tributary_recv)); + tokio::spawn(handle_p2p_task(p2p, new_tributary_recv)); new_tributary_send - .send(ActiveTributary { spec: spec.clone(), tributary }) + .send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), tributary })) .map_err(|_| "failed to send ActiveTributary") .unwrap(); tributary_senders.push(new_tributary_send); diff --git a/coordinator/src/tests/tributary/sync.rs b/coordinator/src/tests/tributary/sync.rs index 3dfc3757e..1ff2d6b11 100644 --- a/coordinator/src/tests/tributary/sync.rs +++ b/coordinator/src/tests/tributary/sync.rs @@ -13,7 +13,8 @@ use tributary::Tributary; use crate::{ tributary::Transaction, - ActiveTributary, handle_p2p, heartbeat_tributaries, + ActiveTributary, TributaryEvent, + p2p::{heartbeat_tributaries_task, handle_p2p_task}, tests::{ LocalP2p, tributary::{new_keys, new_spec, new_tributaries}, @@ -37,14 +38,13 @@ async fn sync_test() { let mut tributary_senders = vec![]; let mut tributary_arcs = vec![]; let mut p2p_threads = vec![]; - for (i, (p2p, tributary)) in tributaries.drain(..).enumerate() { + for (p2p, tributary) in tributaries.drain(..) { let tributary = Arc::new(tributary); tributary_arcs.push(tributary.clone()); let (new_tributary_send, new_tributary_recv) = broadcast::channel(5); - let thread = - tokio::spawn(handle_p2p(Ristretto::generator() * *keys[i], p2p, new_tributary_recv)); + let thread = tokio::spawn(handle_p2p_task(p2p, new_tributary_recv)); new_tributary_send - .send(ActiveTributary { spec: spec.clone(), tributary }) + .send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), tributary })) .map_err(|_| "failed to send ActiveTributary") .unwrap(); tributary_senders.push(new_tributary_send); @@ -52,12 +52,13 @@ async fn sync_test() { } let tributaries = tributary_arcs; - // After three blocks of time, we should have a new block + // After four blocks of time, we should have a new block // We don't wait one block of time as we may have missed the chance for the first block // We don't wait two blocks because we may have missed the chance, and then had a failure to - // propose by our 'offline' validator + // propose by our 'offline' validator, which would cause the Tendermint round time to increase, + // requiring a longer delay let block_time = u64::from(Tributary::::block_time()); - sleep(Duration::from_secs(3 * block_time)).await; + sleep(Duration::from_secs(4 * block_time)).await; let tip = tributaries[0].tip().await; assert!(tip != spec.genesis()); @@ -70,15 +71,18 @@ async fn sync_test() { // Now that we've confirmed the other tributaries formed a net without issue, drop the syncer's // pending P2P messages - syncer_p2p.1.write().await.last_mut().unwrap().clear(); + syncer_p2p.1.write().await.1.last_mut().unwrap().clear(); // Have it join the net let syncer_key = Ristretto::generator() * *syncer_key; let syncer_tributary = Arc::new(syncer_tributary); let (syncer_tributary_send, syncer_tributary_recv) = broadcast::channel(5); - tokio::spawn(handle_p2p(syncer_key, syncer_p2p.clone(), syncer_tributary_recv)); + tokio::spawn(handle_p2p_task(syncer_p2p.clone(), syncer_tributary_recv)); syncer_tributary_send - .send(ActiveTributary { spec: spec.clone(), tributary: syncer_tributary.clone() }) + .send(TributaryEvent::NewTributary(ActiveTributary { + spec: spec.clone(), + tributary: syncer_tributary.clone(), + })) .map_err(|_| "failed to send ActiveTributary to syncer") .unwrap(); @@ -94,9 +98,12 @@ async fn sync_test() { // Start the heartbeat protocol let (syncer_heartbeat_tributary_send, syncer_heartbeat_tributary_recv) = broadcast::channel(5); - tokio::spawn(heartbeat_tributaries(syncer_p2p, syncer_heartbeat_tributary_recv)); + tokio::spawn(heartbeat_tributaries_task(syncer_p2p, syncer_heartbeat_tributary_recv)); syncer_heartbeat_tributary_send - .send(ActiveTributary { spec: spec.clone(), tributary: syncer_tributary.clone() }) + .send(TributaryEvent::NewTributary(ActiveTributary { + spec: spec.clone(), + tributary: syncer_tributary.clone(), + })) .map_err(|_| "failed to send ActiveTributary to heartbeat") .unwrap(); diff --git a/coordinator/src/tests/tributary/tx.rs b/coordinator/src/tests/tributary/tx.rs index 3030834d7..0d2124dfb 100644 --- a/coordinator/src/tests/tributary/tx.rs +++ b/coordinator/src/tests/tributary/tx.rs @@ -43,7 +43,7 @@ async fn tx_test() { Transaction::DkgCommitments(attempt, commitments.clone(), Transaction::empty_signed()); tx.sign(&mut OsRng, spec.genesis(), &key, 0); - assert!(tributaries[sender].1.add_transaction(tx.clone()).await); + assert_eq!(tributaries[sender].1.add_transaction(tx.clone()).await, Ok(true)); let included_in = wait_for_tx_inclusion(&tributaries[sender].1, block_before_tx, tx.hash()).await; // Also sleep for the block time to ensure the block is synced around before we run checks on it sleep(Duration::from_secs(Tributary::::block_time().into())).await; diff --git a/coordinator/src/tributary/db.rs b/coordinator/src/tributary/db.rs index 9a89139b6..3a936f637 100644 --- a/coordinator/src/tributary/db.rs +++ b/coordinator/src/tributary/db.rs @@ -79,19 +79,24 @@ impl TributaryDb { } // If a validator has been fatally slashed - fn fatal_slash_key(genesis: [u8; 32]) -> Vec { - Self::tributary_key(b"fatal_slash", genesis) + fn fatal_slashes_key(genesis: [u8; 32]) -> Vec { + Self::tributary_key(b"fatal_slashes", genesis) } - pub fn set_fatally_slashed(txn: &mut D::Transaction<'_>, genesis: [u8; 32], id: [u8; 32]) { - let key = Self::fatal_slash_key(genesis); + fn fatally_slashed_key(account: [u8; 32]) -> Vec { + Self::tributary_key(b"fatally_slashed", account) + } + pub fn set_fatally_slashed(txn: &mut D::Transaction<'_>, genesis: [u8; 32], account: [u8; 32]) { + txn.put(Self::fatally_slashed_key(account), []); + + let key = Self::fatal_slashes_key(genesis); let mut existing = txn.get(&key).unwrap_or(vec![]); // Don't append if we already have it - if existing.chunks(32).any(|ex_id| ex_id == id) { + if existing.chunks(32).any(|existing| existing == account) { return; } - existing.extend(id); + existing.extend(account); txn.put(key, existing); } diff --git a/coordinator/src/tributary/dkg_confirmer.rs b/coordinator/src/tributary/dkg_confirmer.rs new file mode 100644 index 000000000..dc2fdecda --- /dev/null +++ b/coordinator/src/tributary/dkg_confirmer.rs @@ -0,0 +1,194 @@ +use std::collections::HashMap; + +use zeroize::Zeroizing; + +use rand_core::SeedableRng; +use rand_chacha::ChaCha20Rng; + +use transcript::{Transcript, RecommendedTranscript}; +use ciphersuite::{Ciphersuite, Ristretto}; +use frost::{ + FrostError, + dkg::{Participant, musig::musig}, + sign::*, +}; +use frost_schnorrkel::Schnorrkel; + +use serai_client::validator_sets::primitives::{KeyPair, musig_context, set_keys_message}; + +use crate::tributary::TributarySpec; + +/* + The following confirms the results of the DKG performed by the Processors onto Substrate. + + This is done by a signature over the generated key pair by the validators' MuSig-aggregated + public key. The MuSig-aggregation achieves on-chain efficiency and prevents on-chain censorship + of individual validator's DKG results by the Serai validator set. + + Since we're using the validators public keys, as needed for their being the root of trust, the + coordinator must perform the signing. This is distinct from all other group-signing operations + which are generally done by the processor. + + Instead of maintaining state, the following rebuilds the full state on every call. This is deemed + acceptable re: performance as: + + 1) The DKG confirmation is only done upon the start of the Tributary. + 2) This is an O(n) algorithm. + 3) The size of the validator set is bounded by MAX_KEY_SHARES_PER_SET. + + Accordingly, this should be infrequently ran and of tolerable algorithmic complexity. + + As for safety, it is explicitly unsafe to reuse nonces across signing sessions. This is in + contradiction with our rebuilding which is dependent on deterministic nonces. Safety is derived + from the deterministic nonces being context-bound under a BFT protocol. The flow is as follows: + + 1) Derive a deterministic nonce by hashing the private key, Tributary parameters, and attempt. + 2) Publish the nonces' commitments, receiving everyone elses *and the DKG shares determining the + message to be signed*. + 3) Sign and publish the signature share. + + In order for nonce re-use to occur, the received nonce commitments, or the received DKG shares, + would have to be distinct and sign would have to be called again. + + Before we act on any received messages, they're ordered and finalized by a BFT algorithm. The + only way to operate on distinct received messages would be if: + + 1) A logical flaw exists, letting new messages over write prior messages + 2) A reorganization occured from chain A to chain B, and with it, different messages + + Reorganizations are not supported, as BFT is assumed by the presence of a BFT algorithm. While + a significant amount of processes may be byzantine, leading to BFT being broken, that still will + not trigger a reorganization. The only way to move to a distinct chain, with distinct messages, + would be by rebuilding the local process entirely (this time following chain B). + + Accordingly, safety follows if: + + 1) The local view of received messages is static + 2) The local process doesn't rebuild after a byzantine fault produces multiple blockchains + + We assume the former. The latter is deemed acceptable but sub-optimal. + + The benefit for this behavior is that on a validator's infrastructure collapsing, they can + successfully rebuild on a new system. + + TODO: Replace this with entropy. If a validator happens to have their infrastructure fail at this + exact moment, they should just be kicked out and accept the loss. The risk of losing a private + key on rebuild, by a feature meant to enable rebuild, can't be successfully argued for. + + Not only do we need to use randomly selected entropy, we need to confirm our local preprocess + matches the on-chain preprocess before actually publishing our shares. + + We also need to review how we're handling Processor preprocesses and likely implement the same + on-chain-preprocess-matches-presumed-preprocess check before publishing shares (though a delay of + the re-attempt protocol's trigger length would also be sufficient). +*/ +pub(crate) struct DkgConfirmer; +impl DkgConfirmer { + fn preprocess_internal( + spec: &TributarySpec, + key: &Zeroizing<::F>, + attempt: u32, + ) -> (AlgorithmSignMachine, [u8; 64]) { + // TODO: Does Substrate already have a validator-uniqueness check? + let validators = spec.validators().iter().map(|val| val.0).collect::>(); + + let context = musig_context(spec.set()); + let mut chacha = ChaCha20Rng::from_seed({ + let mut entropy_transcript = RecommendedTranscript::new(b"DkgConfirmer Entropy"); + entropy_transcript.append_message(b"spec", spec.serialize()); + entropy_transcript.append_message(b"key", Zeroizing::new(key.to_bytes())); + entropy_transcript.append_message(b"attempt", attempt.to_le_bytes()); + Zeroizing::new(entropy_transcript).rng_seed(b"preprocess") + }); + let (machine, preprocess) = AlgorithmMachine::new( + Schnorrkel::new(b"substrate"), + musig(&context, key, &validators) + .expect("confirming the DKG for a set we aren't in/validator present multiple times") + .into(), + ) + .preprocess(&mut chacha); + + (machine, preprocess.serialize().try_into().unwrap()) + } + // Get the preprocess for this confirmation. + pub(crate) fn preprocess( + spec: &TributarySpec, + key: &Zeroizing<::F>, + attempt: u32, + ) -> [u8; 64] { + Self::preprocess_internal(spec, key, attempt).1 + } + + fn share_internal( + spec: &TributarySpec, + key: &Zeroizing<::F>, + attempt: u32, + preprocesses: HashMap>, + key_pair: &KeyPair, + ) -> Result<(AlgorithmSignatureMachine, [u8; 32]), Participant> { + let machine = Self::preprocess_internal(spec, key, attempt).0; + let preprocesses = preprocesses + .into_iter() + .map(|(p, preprocess)| { + machine + .read_preprocess(&mut preprocess.as_slice()) + .map(|preprocess| (p, preprocess)) + .map_err(|_| p) + }) + .collect::, _>>()?; + let (machine, share) = machine + .sign(preprocesses, &set_keys_message(&spec.set(), key_pair)) + .map_err(|e| match e { + FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), + FrostError::InvalidParticipant(_, _) | + FrostError::InvalidSigningSet(_) | + FrostError::InvalidParticipantQuantity(_, _) | + FrostError::DuplicatedParticipant(_) | + FrostError::MissingParticipant(_) => unreachable!("{e:?}"), + FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, + })?; + + Ok((machine, share.serialize().try_into().unwrap())) + } + // Get the share for this confirmation, if the preprocesses are valid. + pub(crate) fn share( + spec: &TributarySpec, + key: &Zeroizing<::F>, + attempt: u32, + preprocesses: HashMap>, + key_pair: &KeyPair, + ) -> Result<[u8; 32], Participant> { + Self::share_internal(spec, key, attempt, preprocesses, key_pair).map(|(_, share)| share) + } + + pub(crate) fn complete( + spec: &TributarySpec, + key: &Zeroizing<::F>, + attempt: u32, + preprocesses: HashMap>, + key_pair: &KeyPair, + shares: HashMap>, + ) -> Result<[u8; 64], Participant> { + let machine = Self::share_internal(spec, key, attempt, preprocesses, key_pair) + .expect("trying to complete a machine which failed to preprocess") + .0; + + let shares = shares + .into_iter() + .map(|(p, share)| { + machine.read_share(&mut share.as_slice()).map(|share| (p, share)).map_err(|_| p) + }) + .collect::, _>>()?; + let signature = machine.complete(shares).map_err(|e| match e { + FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), + FrostError::InvalidParticipant(_, _) | + FrostError::InvalidSigningSet(_) | + FrostError::InvalidParticipantQuantity(_, _) | + FrostError::DuplicatedParticipant(_) | + FrostError::MissingParticipant(_) => unreachable!("{e:?}"), + FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, + })?; + + Ok(signature.to_bytes()) + } +} diff --git a/coordinator/src/tributary/handle.rs b/coordinator/src/tributary/handle.rs index 1ec83a97d..cde71d3c3 100644 --- a/coordinator/src/tributary/handle.rs +++ b/coordinator/src/tributary/handle.rs @@ -3,23 +3,14 @@ use std::collections::HashMap; use zeroize::Zeroizing; -use rand_core::SeedableRng; -use rand_chacha::ChaCha20Rng; - -use transcript::{Transcript, RecommendedTranscript}; -use ciphersuite::{Ciphersuite, Ristretto}; -use frost::{ - FrostError, - dkg::{Participant, musig::musig}, - sign::*, -}; -use frost_schnorrkel::Schnorrkel; +use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; +use frost::dkg::Participant; use serai_client::{ Signature, - validator_sets::primitives::{ValidatorSet, KeyPair, musig_context, set_keys_message}, + validator_sets::primitives::{ValidatorSet, KeyPair}, subxt::utils::Encoded, - Serai, + SeraiValidatorSets, }; use tributary::Signed; @@ -35,8 +26,10 @@ use serai_db::{Get, Db}; use crate::{ processors::Processors, tributary::{ - Transaction, TributarySpec, Topic, DataSpecification, TributaryDb, nonce_decider::NonceDecider, - scanner::RecognizedIdType, + Transaction, TributarySpec, Topic, DataSpecification, TributaryDb, + nonce_decider::NonceDecider, + dkg_confirmer::DkgConfirmer, + scanner::{RecognizedIdType, RIDTrait}, }, }; @@ -53,122 +46,6 @@ const BATCH_SHARE: &str = "b_share"; const SIGN_PREPROCESS: &str = "s_preprocess"; const SIGN_SHARE: &str = "s_share"; -// Instead of maintaing state, this simply re-creates the machine(s) in-full on every call (which -// should only be once per tributary). -// This simplifies data flow and prevents requiring multiple paths. -// While more expensive, this only runs an O(n) algorithm, which is tolerable to run multiple -// times. -struct DkgConfirmer; -impl DkgConfirmer { - fn preprocess_internal( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - ) -> (AlgorithmSignMachine, [u8; 64]) { - // TODO: Does Substrate already have a validator-uniqueness check? - let validators = spec.validators().iter().map(|val| val.0).collect::>(); - - let context = musig_context(spec.set()); - let mut chacha = ChaCha20Rng::from_seed({ - let mut entropy_transcript = RecommendedTranscript::new(b"DkgConfirmer Entropy"); - entropy_transcript.append_message(b"spec", spec.serialize()); - entropy_transcript.append_message(b"key", Zeroizing::new(key.to_bytes())); - entropy_transcript.append_message(b"attempt", attempt.to_le_bytes()); - Zeroizing::new(entropy_transcript).rng_seed(b"preprocess") - }); - let (machine, preprocess) = AlgorithmMachine::new( - Schnorrkel::new(b"substrate"), - musig(&context, key, &validators) - .expect("confirming the DKG for a set we aren't in/validator present multiple times") - .into(), - ) - .preprocess(&mut chacha); - - (machine, preprocess.serialize().try_into().unwrap()) - } - // Get the preprocess for this confirmation. - fn preprocess( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - ) -> [u8; 64] { - Self::preprocess_internal(spec, key, attempt).1 - } - - fn share_internal( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - preprocesses: HashMap>, - key_pair: &KeyPair, - ) -> Result<(AlgorithmSignatureMachine, [u8; 32]), Participant> { - let machine = Self::preprocess_internal(spec, key, attempt).0; - let preprocesses = preprocesses - .into_iter() - .map(|(p, preprocess)| { - machine - .read_preprocess(&mut preprocess.as_slice()) - .map(|preprocess| (p, preprocess)) - .map_err(|_| p) - }) - .collect::, _>>()?; - let (machine, share) = machine - .sign(preprocesses, &set_keys_message(&spec.set(), key_pair)) - .map_err(|e| match e { - FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!("{e:?}"), - FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, - })?; - - Ok((machine, share.serialize().try_into().unwrap())) - } - // Get the share for this confirmation, if the preprocesses are valid. - fn share( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - preprocesses: HashMap>, - key_pair: &KeyPair, - ) -> Result<[u8; 32], Participant> { - Self::share_internal(spec, key, attempt, preprocesses, key_pair).map(|(_, share)| share) - } - - fn complete( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - preprocesses: HashMap>, - key_pair: &KeyPair, - shares: HashMap>, - ) -> Result<[u8; 64], Participant> { - let machine = Self::share_internal(spec, key, attempt, preprocesses, key_pair) - .expect("trying to complete a machine which failed to preprocess") - .0; - - let shares = shares - .into_iter() - .map(|(p, share)| { - machine.read_share(&mut share.as_slice()).map(|share| (p, share)).map_err(|_| p) - }) - .collect::, _>>()?; - let signature = machine.complete(shares).map_err(|e| match e { - FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!("{e:?}"), - FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, - })?; - - Ok(signature.to_bytes()) - } -} - fn read_known_to_exist_data( getter: &G, spec: &TributarySpec, @@ -234,13 +111,24 @@ pub fn generated_key_pair( DkgConfirmer::share(spec, key, attempt, preprocesses, key_pair) } +pub(crate) fn fatal_slash( + txn: &mut D::Transaction<'_>, + genesis: [u8; 32], + account: [u8; 32], + reason: &str, +) { + log::warn!("fatally slashing {}. reason: {}", hex::encode(account), reason); + TributaryDb::::set_fatally_slashed(txn, genesis, account); + // TODO: disconnect the node from network/ban from further participation in all Tributaries +} + pub(crate) async fn handle_application_tx< D: Db, Pro: Processors, FPst: Future, PST: Clone + Fn(ValidatorSet, Encoded) -> FPst, FRid: Future, - RID: crate::RIDTrait, + RID: RIDTrait, >( tx: Transaction, spec: &TributarySpec, @@ -252,20 +140,24 @@ pub(crate) async fn handle_application_tx< ) { let genesis = spec.genesis(); - let handle = |txn: &mut _, data_spec: &DataSpecification, bytes: Vec, signed: &Signed| { + let handle = |txn: &mut ::Transaction<'_>, + data_spec: &DataSpecification, + bytes: Vec, + signed: &Signed| { let Some(curr_attempt) = TributaryDb::::attempt(txn, genesis, data_spec.topic) else { - // TODO: Full slash - todo!(); + // Premature publication of a valid ID/publication of an invalid ID + fatal_slash::( + txn, + genesis, + signed.signer.to_bytes(), + "published data for ID without an attempt", + ); + return None; }; // If they've already published a TX for this attempt, slash - if let Some(data) = TributaryDb::::data(txn, genesis, data_spec, signed.signer) { - if data != bytes { - // TODO: Full slash - todo!(); - } - - // TODO: Slash + if TributaryDb::::data(txn, genesis, data_spec, signed.signer).is_some() { + fatal_slash::(txn, genesis, signed.signer.to_bytes(), "published data multiple times"); return None; } @@ -274,9 +166,15 @@ pub(crate) async fn handle_application_tx< // TODO: Slash for being late return None; } + // If the attempt is greater, this is a premature publication, full slash if data_spec.attempt > curr_attempt { - // TODO: Full slash - todo!(); + fatal_slash::( + txn, + genesis, + signed.signer.to_bytes(), + "published data with an attempt which hasn't started", + ); + return None; } // TODO: We can also full slash if shares before all commitments, or share before the @@ -323,8 +221,8 @@ pub(crate) async fn handle_application_tx< Transaction::DkgShares { attempt, mut shares, confirmation_nonces, signed } => { if shares.len() != (usize::from(spec.n()) - 1) { - // TODO: Full slash - todo!(); + fatal_slash::(txn, genesis, signed.signer.to_bytes(), "invalid amount of DKG shares"); + return; } let sender_i = spec @@ -412,7 +310,7 @@ pub(crate) async fn handle_application_tx< publish_serai_tx( spec.set(), - Serai::set_validator_set_keys(spec.set().network, key_pair, Signature(sig)), + SeraiValidatorSets::set_keys(spec.set().network, key_pair, Signature(sig)), ) .await; } @@ -425,7 +323,7 @@ pub(crate) async fn handle_application_tx< // Because this Batch has achieved synchrony, its batch ID should be authorized TributaryDb::::recognize_topic(txn, genesis, Topic::Batch(batch)); let nonce = NonceDecider::::handle_batch(txn, genesis, batch); - recognized_id(spec.set().network, genesis, RecognizedIdType::Batch, batch, nonce).await; + recognized_id(spec.set(), genesis, RecognizedIdType::Batch, batch, nonce).await; } Transaction::SubstrateBlock(block) => { @@ -437,7 +335,7 @@ pub(crate) async fn handle_application_tx< let nonces = NonceDecider::::handle_substrate_block(txn, genesis, &plan_ids); for (nonce, id) in nonces.into_iter().zip(plan_ids.into_iter()) { TributaryDb::::recognize_topic(txn, genesis, Topic::Sign(id)); - recognized_id(spec.set().network, genesis, RecognizedIdType::Plan, id, nonce).await; + recognized_id(spec.set(), genesis, RecognizedIdType::Plan, id, nonce).await; } } diff --git a/coordinator/src/tributary/mod.rs b/coordinator/src/tributary/mod.rs index ab7e3d42d..8f4bdf2dd 100644 --- a/coordinator/src/tributary/mod.rs +++ b/coordinator/src/tributary/mod.rs @@ -33,6 +33,8 @@ pub use db::*; mod nonce_decider; pub use nonce_decider::*; +mod dkg_confirmer; + mod handle; pub use handle::*; @@ -51,16 +53,15 @@ impl TributarySpec { serai_block: [u8; 32], start_time: u64, set: ValidatorSet, - set_participants: Vec, + set_participants: Vec<(PublicKey, u64)>, ) -> TributarySpec { let mut validators = vec![]; - for participant in set_participants { + for (participant, shares) in set_participants { // TODO: Ban invalid keys from being validators on the Serai side // (make coordinator key a session key?) let participant = ::read_G::<&[u8]>(&mut participant.0.as_ref()) .expect("invalid key registered as participant"); - // TODO: Give one weight on Tributary per bond instance - validators.push((participant, 1)); + validators.push((participant, shares)); } Self { serai_block, start_time, set, validators } diff --git a/coordinator/src/tributary/scanner.rs b/coordinator/src/tributary/scanner.rs index 5d8f00168..ed8949283 100644 --- a/coordinator/src/tributary/scanner.rs +++ b/coordinator/src/tributary/scanner.rs @@ -1,13 +1,17 @@ -use core::future::Future; +use core::{future::Future, time::Duration}; +use std::sync::Arc; use zeroize::Zeroizing; use ciphersuite::{Ciphersuite, Ristretto}; -use serai_client::{validator_sets::primitives::ValidatorSet, subxt::utils::Encoded}; +use tokio::sync::broadcast; + +use scale::{Encode, Decode}; +use serai_client::{validator_sets::primitives::ValidatorSet, subxt::utils::Encoded, Serai}; use tributary::{ - Transaction as TributaryTransaction, Block, TributaryReader, + TransactionKind, Transaction as TributaryTransaction, Block, TributaryReader, tendermint::{ tx::{TendermintTx, decode_evidence}, TendermintNetwork, @@ -18,27 +22,35 @@ use serai_db::DbTxn; use crate::{ Db, - tributary::handle::handle_application_tx, + tributary::handle::{fatal_slash, handle_application_tx}, processors::Processors, tributary::{TributaryDb, TributarySpec, Transaction}, P2p, }; -#[derive(Clone, Copy, PartialEq, Eq, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)] pub enum RecognizedIdType { Batch, Plan, } +pub(crate) trait RIDTrait: + Clone + Fn(ValidatorSet, [u8; 32], RecognizedIdType, [u8; 32], u32) -> FRid +{ +} +impl FRid> + RIDTrait for F +{ +} + // Handle a specific Tributary block -#[allow(clippy::needless_pass_by_ref_mut)] // False positive? async fn handle_block< D: Db, Pro: Processors, FPst: Future, PST: Clone + Fn(ValidatorSet, Encoded) -> FPst, FRid: Future, - RID: crate::RIDTrait, + RID: RIDTrait, P: P2p, >( db: &mut TributaryDb, @@ -72,9 +84,12 @@ async fn handle_block< // Since anything with evidence is fundamentally faulty behavior, not just temporal errors, // mark the node as fatally slashed - TributaryDb::::set_fatally_slashed(&mut txn, genesis, msgs.0.msg.sender); - - // TODO2: disconnect the node from network/ban from further participation in Tributary + fatal_slash::( + &mut txn, + genesis, + msgs.0.msg.sender, + &format!("invalid tendermint messages: {:?}", msgs), + ); } TributaryTransaction::Application(tx) => { handle_application_tx::( @@ -96,7 +111,7 @@ async fn handle_block< event_id += 1; } - // TODO2: Trigger any necessary re-attempts + // TODO: Trigger any necessary re-attempts } pub(crate) async fn handle_new_blocks< @@ -105,7 +120,7 @@ pub(crate) async fn handle_new_blocks< FPst: Future, PST: Clone + Fn(ValidatorSet, Encoded) -> FPst, FRid: Future, - RID: crate::RIDTrait, + RID: RIDTrait, P: P2p, >( db: &mut TributaryDb, @@ -120,6 +135,20 @@ pub(crate) async fn handle_new_blocks< let mut last_block = db.last_block(genesis); while let Some(next) = tributary.block_after(&last_block) { let block = tributary.block(&next).unwrap(); + + // Make sure we have all of the provided transactions for this block + for tx in &block.transactions { + // Provided TXs will appear first in the Block, so we can break after we hit a non-Provided + let TransactionKind::Provided(order) = tx.kind() else { + break; + }; + + // make sure we have all the provided txs in this block locally + if !tributary.locally_provided_txs_in_block(&block.hash(), order) { + return; + } + } + handle_block::<_, _, _, _, _, _, P>( db, key, @@ -134,3 +163,123 @@ pub(crate) async fn handle_new_blocks< db.set_last_block(genesis, next); } } + +pub(crate) async fn scan_tributaries_task< + D: Db, + Pro: Processors, + P: P2p, + FRid: Send + Future, + RID: 'static + Send + Sync + RIDTrait, +>( + raw_db: D, + key: Zeroizing<::F>, + recognized_id: RID, + processors: Pro, + serai: Arc, + mut tributary_event: broadcast::Receiver>, +) { + log::info!("scanning tributaries"); + + loop { + match tributary_event.recv().await { + Ok(crate::TributaryEvent::NewTributary(crate::ActiveTributary { spec, tributary })) => { + // For each Tributary, spawn a dedicated scanner task + tokio::spawn({ + let raw_db = raw_db.clone(); + let key = key.clone(); + let recognized_id = recognized_id.clone(); + let processors = processors.clone(); + let serai = serai.clone(); + async move { + let spec = &spec; + let reader = tributary.reader(); + let mut tributary_db = TributaryDb::new(raw_db.clone()); + loop { + // Check if the set was retired, and if so, don't further operate + if crate::MainDb::::is_tributary_retired(&raw_db, spec.set()) { + break; + } + + // Obtain the next block notification now to prevent obtaining it immediately after + // the next block occurs + let next_block_notification = tributary.next_block_notification().await; + + handle_new_blocks::<_, _, _, _, _, _, P>( + &mut tributary_db, + &key, + recognized_id.clone(), + &processors, + |set, tx| { + let serai = serai.clone(); + async move { + loop { + match serai.publish(&tx).await { + Ok(_) => { + log::info!("set key pair for {set:?}"); + break; + } + // This is assumed to be some ephemeral error due to the assumed fault-free + // creation + // TODO2: Differentiate connection errors from invariants + Err(e) => { + if let Ok(serai) = serai.with_current_latest_block().await { + let serai = serai.validator_sets(); + // Check if this failed because the keys were already set by someone + // else + if matches!(serai.keys(spec.set()).await, Ok(Some(_))) { + log::info!("another coordinator set key pair for {:?}", set); + break; + } + + // The above block may return false if the keys have been pruned from + // the state + // Check if this session is no longer the latest session, meaning it at + // some point did set keys, and we're just operating off very + // historical data + if let Ok(Some(current_session)) = + serai.session(spec.set().network).await + { + if current_session.0 > spec.set().session.0 { + log::warn!( + "trying to set keys for a set which isn't the latest {:?}", + set + ); + break; + } + } + } + + log::error!( + "couldn't connect to Serai node to publish set_keys TX: {:?}", + e + ); + tokio::time::sleep(core::time::Duration::from_secs(10)).await; + } + } + } + } + }, + spec, + &reader, + ) + .await; + + // Run either when the notification fires, or every interval of block_time + let _ = tokio::time::timeout( + Duration::from_secs(tributary::Tributary::::block_time().into()), + next_block_notification, + ) + .await; + } + } + }); + } + // The above loop simply checks the DB every few seconds, voiding the need for this event + Ok(crate::TributaryEvent::TributaryRetired(_)) => {} + Err(broadcast::error::RecvError::Lagged(_)) => { + panic!("scan_tributaries lagged to handle tributary_event") + } + Err(broadcast::error::RecvError::Closed) => panic!("tributary_event sender closed"), + } + } +} diff --git a/coordinator/tributary/src/block.rs b/coordinator/tributary/src/block.rs index 0a7a2f259..06b3a5a3d 100644 --- a/coordinator/tributary/src/block.rs +++ b/coordinator/tributary/src/block.rs @@ -33,7 +33,10 @@ pub enum BlockError { /// An unsigned transaction which was already added to the chain was present again. #[error("an unsigned transaction which was already added to the chain was present again")] UnsignedAlreadyIncluded, - /// Transactions weren't ordered as expected (Provided, followed by Unsigned, folowed by Signed). + /// A provided transaction which was already added to the chain was present again. + #[error("an provided transaction which was already added to the chain was present again")] + ProvidedAlreadyIncluded, + /// Transactions weren't ordered as expected (Provided, followed by Unsigned, followed by Signed). #[error("transactions weren't ordered as expected (Provided, Unsigned, Signed)")] WrongTransactionOrder, /// The block had a provided transaction this validator has yet to be provided. @@ -175,6 +178,8 @@ impl Block { schema: N::SignatureScheme, commit: impl Fn(u32) -> Option>, unsigned_in_chain: impl Fn([u8; 32]) -> bool, + provided_in_chain: impl Fn([u8; 32]) -> bool, // TODO: merge this with unsigned_on_chain? + allow_non_local_provided: bool, ) -> Result<(), BlockError> { #[derive(Clone, Copy, PartialEq, Eq, Debug)] enum Order { @@ -209,17 +214,21 @@ impl Block { let current_tx_order = match tx.kind() { TransactionKind::Provided(order) => { - let Some(local) = locally_provided.get_mut(order).and_then(|deque| deque.pop_front()) - else { - Err(BlockError::NonLocalProvided(txs.pop().unwrap()))? - }; - // Since this was a provided TX, it must be an application TX - let Transaction::Application(tx) = tx else { + if provided_in_chain(tx_hash) { + Err(BlockError::ProvidedAlreadyIncluded)?; + } + + if let Some(local) = locally_provided.get_mut(order).and_then(|deque| deque.pop_front()) { + // Since this was a provided TX, it must be an application TX + let Transaction::Application(tx) = tx else { + Err(BlockError::NonLocalProvided(txs.pop().unwrap()))? + }; + if tx != &local { + Err(BlockError::DistinctProvided)?; + } + } else if !allow_non_local_provided { Err(BlockError::NonLocalProvided(txs.pop().unwrap()))? }; - if tx != &local { - Err(BlockError::DistinctProvided)?; - } Order::Provided } @@ -241,12 +250,6 @@ impl Block { } last_tx_order = current_tx_order; - if current_tx_order == Order::Provided { - // We don't need to call verify_transaction since we did when we locally provided this - // transaction. Since it's identical, it must be valid - continue; - } - // TODO: should we modify the verify_transaction to take `Transaction` or // use this pattern of verifying tendermint Txs and app txs differently? match tx { diff --git a/coordinator/tributary/src/blockchain.rs b/coordinator/tributary/src/blockchain.rs index d21928ec0..3be658e01 100644 --- a/coordinator/tributary/src/blockchain.rs +++ b/coordinator/tributary/src/blockchain.rs @@ -10,7 +10,7 @@ use tendermint::ext::{Network, Commit}; use crate::{ ReadWrite, ProvidedError, ProvidedTransactions, BlockError, Block, Mempool, Transaction, - transaction::{Signed, TransactionKind, Transaction as TransactionTrait}, + transaction::{Signed, TransactionKind, TransactionError, Transaction as TransactionTrait}, }; #[derive(Debug)] @@ -50,6 +50,9 @@ impl Blockchain { fn unsigned_included_key(genesis: &[u8], hash: &[u8; 32]) -> Vec { D::key(b"tributary_blockchain", b"unsigned_included", [genesis, hash].concat()) } + fn provided_included_key(genesis: &[u8], hash: &[u8; 32]) -> Vec { + D::key(b"tributary_blockchain", b"provided_included", [genesis, hash].concat()) + } fn next_nonce_key(&self, signer: &::G) -> Vec { D::key( b"tributary_blockchain", @@ -136,6 +139,23 @@ impl Blockchain { db.get(Self::block_after_key(&genesis, block)).map(|bytes| bytes.try_into().unwrap()) } + pub(crate) fn locally_provided_txs_in_block( + db: &D, + genesis: &[u8; 32], + block: &[u8; 32], + order: &str, + ) -> bool { + let local_key = ProvidedTransactions::::locally_provided_quantity_key(genesis, order); + let local = + db.get(local_key).map(|bytes| u32::from_le_bytes(bytes.try_into().unwrap())).unwrap_or(0); + let block_key = + ProvidedTransactions::::block_provided_quantity_key(genesis, block, order); + let block = + db.get(block_key).map(|bytes| u32::from_le_bytes(bytes.try_into().unwrap())).unwrap_or(0); + + local >= block + } + pub(crate) fn tip_from_db(db: &D, genesis: [u8; 32]) -> [u8; 32] { db.get(Self::tip_key(genesis)).map(|bytes| bytes.try_into().unwrap()).unwrap_or(genesis) } @@ -145,7 +165,7 @@ impl Blockchain { internal: bool, tx: Transaction, schema: N::SignatureScheme, - ) -> bool { + ) -> Result { let db = self.db.as_ref().unwrap(); let genesis = self.genesis; @@ -182,7 +202,7 @@ impl Blockchain { self.mempool.block(&self.next_nonces, unsigned_in_chain), ); // build_block should not return invalid blocks - self.verify_block::(&block, schema).unwrap(); + self.verify_block::(&block, schema, false).unwrap(); block } @@ -190,10 +210,13 @@ impl Blockchain { &self, block: &Block, schema: N::SignatureScheme, + allow_non_local_provided: bool, ) -> Result<(), BlockError> { let db = self.db.as_ref().unwrap(); let unsigned_in_chain = |hash: [u8; 32]| db.get(Self::unsigned_included_key(&self.genesis, &hash)).is_some(); + let provided_in_chain = + |hash: [u8; 32]| db.get(Self::provided_included_key(&self.genesis, &hash)).is_some(); let commit = |block: u32| -> Option> { let commit = self.commit_by_block_number(block)?; // commit has to be valid if it is coming from our db @@ -207,6 +230,8 @@ impl Blockchain { schema, &commit, unsigned_in_chain, + provided_in_chain, + allow_non_local_provided, ) } @@ -217,7 +242,7 @@ impl Blockchain { commit: Vec, schema: N::SignatureScheme, ) -> Result<(), BlockError> { - self.verify_block::(block, schema)?; + self.verify_block::(block, schema, true)?; log::info!( "adding block {} to tributary {} with {} TXs", @@ -249,7 +274,9 @@ impl Blockchain { for tx in &block.transactions { match tx.kind() { TransactionKind::Provided(order) => { - self.provided.complete(&mut txn, order, tx.hash()); + let hash = tx.hash(); + self.provided.complete(&mut txn, order, self.tip, hash); + txn.put(Self::provided_included_key(&self.genesis, &hash), []); } TransactionKind::Unsigned => { let hash = tx.hash(); diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index 9d8094f2e..854c042b3 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -152,6 +152,14 @@ pub struct Tributary { synced_block: Arc>>>, synced_block_result: Arc>, messages: Arc>>>, + + p2p_meta_task_handle: Arc, +} + +impl Drop for Tributary { + fn drop(&mut self) { + self.p2p_meta_task_handle.abort(); + } } impl Tributary { @@ -186,26 +194,29 @@ impl Tributary { let to_rebroadcast = Arc::new(RwLock::new(vec![])); // Actively rebroadcast consensus messages to ensure they aren't prematurely dropped from the // P2P layer - tokio::spawn({ - let to_rebroadcast = to_rebroadcast.clone(); - let p2p = p2p.clone(); - async move { - loop { - let to_rebroadcast = to_rebroadcast.read().await.clone(); - for msg in to_rebroadcast { - p2p.broadcast(genesis, msg).await; + let p2p_meta_task_handle = Arc::new( + tokio::spawn({ + let to_rebroadcast = to_rebroadcast.clone(); + let p2p = p2p.clone(); + async move { + loop { + let to_rebroadcast = to_rebroadcast.read().await.clone(); + for msg in to_rebroadcast { + p2p.broadcast(genesis, msg).await; + } + tokio::time::sleep(core::time::Duration::from_secs(1)).await; } - tokio::time::sleep(core::time::Duration::from_secs(1)).await; } - } - }); + }) + .abort_handle(), + ); let network = TendermintNetwork { genesis, signer, validators, blockchain, to_rebroadcast, p2p }; let TendermintHandle { synced_block, synced_block_result, messages, machine } = TendermintMachine::new(network.clone(), block_number, start_time, proposal).await; - tokio::task::spawn(machine.run()); + tokio::spawn(machine.run()); Some(Self { db, @@ -214,6 +225,7 @@ impl Tributary { synced_block: Arc::new(RwLock::new(synced_block)), synced_block_result: Arc::new(RwLock::new(synced_block_result)), messages: Arc::new(RwLock::new(messages)), + p2p_meta_task_handle, }) } @@ -244,10 +256,10 @@ impl Tributary { self.network.blockchain.read().await.next_nonce(signer) } - // Returns if the transaction was new and valid. + // Returns Ok(true) if new, Ok(false) if an already present unsigned, or the error. // Safe to be &self since the only meaningful usage of self is self.network.blockchain which // successfully acquires its own write lock - pub async fn add_transaction(&self, tx: T) -> bool { + pub async fn add_transaction(&self, tx: T) -> Result { let tx = Transaction::Application(tx); let mut to_broadcast = vec![TRANSACTION_MESSAGE]; tx.write(&mut to_broadcast).unwrap(); @@ -256,7 +268,7 @@ impl Tributary { tx, self.network.signature_scheme(), ); - if res { + if res == Ok(true) { self.network.p2p.broadcast(self.genesis, to_broadcast).await; } res @@ -327,8 +339,8 @@ impl Tributary { tx, self.network.signature_scheme(), ); - log::debug!("received transaction message. valid new transaction: {res}"); - res + log::debug!("received transaction message. valid new transaction: {res:?}"); + res == Ok(true) } Some(&TENDERMINT_MESSAGE) => { @@ -397,6 +409,10 @@ impl TributaryReader { .map(|commit| Commit::::decode(&mut commit.as_ref()).unwrap().end_time) } + pub fn locally_provided_txs_in_block(&self, hash: &[u8; 32], order: &str) -> bool { + Blockchain::::locally_provided_txs_in_block(&self.0, &self.1, hash, order) + } + // This isn't static, yet can be read with only minor discrepancy risks pub fn tip(&self) -> [u8; 32] { Blockchain::::tip_from_db(&self.0, self.1) diff --git a/coordinator/tributary/src/mempool.rs b/coordinator/tributary/src/mempool.rs index 988702e6a..00d182a4b 100644 --- a/coordinator/tributary/src/mempool.rs +++ b/coordinator/tributary/src/mempool.rs @@ -8,7 +8,9 @@ use tendermint::ext::{Network, Commit}; use crate::{ ACCOUNT_MEMPOOL_LIMIT, ReadWrite, - transaction::{Signed, TransactionKind, Transaction as TransactionTrait, verify_transaction}, + transaction::{ + Signed, TransactionKind, TransactionError, Transaction as TransactionTrait, verify_transaction, + }, tendermint::tx::verify_tendermint_tx, Transaction, }; @@ -92,7 +94,7 @@ impl Mempool { res } - /// Returns true if this is a valid, new transaction. + // Returns Ok(true) if new, Ok(false) if an already present unsigned, or the error. pub(crate) fn add( &mut self, blockchain_next_nonces: &HashMap<::G, u32>, @@ -101,7 +103,7 @@ impl Mempool { schema: N::SignatureScheme, unsigned_in_chain: impl Fn([u8; 32]) -> bool, commit: impl Fn(u32) -> Option>, - ) -> bool { + ) -> Result { match &tx { Transaction::Tendermint(tendermint_tx) => { // All Tendermint transactions should be unsigned @@ -109,13 +111,11 @@ impl Mempool { // check we have the tx in the pool/chain if self.unsigned_already_exist(tx.hash(), unsigned_in_chain) { - return false; + return Ok(false); } // verify the tx - if verify_tendermint_tx::(tendermint_tx, schema, commit).is_err() { - return false; - } + verify_tendermint_tx::(tendermint_tx, schema, commit)?; } Transaction::Application(app_tx) => { match app_tx.kind() { @@ -123,7 +123,7 @@ impl Mempool { // Get the nonce from the blockchain let Some(blockchain_next_nonce) = blockchain_next_nonces.get(signer).cloned() else { // Not a participant - return false; + Err(TransactionError::InvalidSigner)? }; // If the blockchain's nonce is greater than the mempool's, use it @@ -140,32 +140,28 @@ impl Mempool { // If we have too many transactions from this sender, don't add this yet UNLESS we are // this sender if !internal && (nonce >= &(blockchain_next_nonce + ACCOUNT_MEMPOOL_LIMIT)) { - return false; + Err(TransactionError::TooManyInMempool)?; } - if verify_transaction(app_tx, self.genesis, &mut self.next_nonces).is_err() { - return false; - } + verify_transaction(app_tx, self.genesis, &mut self.next_nonces)?; debug_assert_eq!(self.next_nonces[signer], nonce + 1); } TransactionKind::Unsigned => { // check we have the tx in the pool/chain if self.unsigned_already_exist(tx.hash(), unsigned_in_chain) { - return false; + return Ok(false); } - if app_tx.verify().is_err() { - return false; - } + app_tx.verify()?; } - TransactionKind::Provided(_) => return false, + TransactionKind::Provided(_) => Err(TransactionError::ProvidedAddedToMempool)?, } } } // Save the TX to the pool self.save_tx(tx); - true + Ok(true) } // Returns None if the mempool doesn't have a nonce tracked. diff --git a/coordinator/tributary/src/provided.rs b/coordinator/tributary/src/provided.rs index 0bf284d4a..e4e8193c3 100644 --- a/coordinator/tributary/src/provided.rs +++ b/coordinator/tributary/src/provided.rs @@ -11,12 +11,15 @@ pub enum ProvidedError { /// The provided transaction's kind wasn't Provided #[error("transaction wasn't a provided transaction")] NotProvided, - /// The provided transaction was invalid. + /// The provided transaction was invalid #[error("provided transaction was invalid")] InvalidProvided(TransactionError), /// Transaction was already provided #[error("transaction was already provided")] AlreadyProvided, + /// Local transaction mismatches the on-chain provided + #[error("local provides mismatches on-chain provided")] + LocalMismatchesOnChain, } #[derive(Clone, PartialEq, Eq, Debug)] @@ -34,6 +37,27 @@ impl ProvidedTransactions { fn current_provided_key(&self) -> Vec { D::key(b"tributary_provided", b"current", self.genesis) } + pub(crate) fn locally_provided_quantity_key(genesis: &[u8; 32], order: &str) -> Vec { + D::key(b"tributary_provided", b"local_quantity", [genesis, order.as_bytes()].concat()) + } + pub(crate) fn on_chain_provided_quantity_key(genesis: &[u8; 32], order: &str) -> Vec { + D::key(b"tributary_provided", b"on_chain_quantity", [genesis, order.as_bytes()].concat()) + } + pub(crate) fn block_provided_quantity_key( + genesis: &[u8; 32], + block: &[u8; 32], + order: &str, + ) -> Vec { + D::key(b"tributary_provided", b"block_quantity", [genesis, block, order.as_bytes()].concat()) + } + + pub(crate) fn on_chain_provided_key(genesis: &[u8; 32], order: &str, id: u32) -> Vec { + D::key( + b"tributary_provided", + b"on_chain_tx", + [genesis, order.as_bytes(), &id.to_le_bytes()].concat(), + ) + } pub(crate) fn new(db: D, genesis: [u8; 32]) -> Self { let mut res = ProvidedTransactions { db, genesis, transactions: HashMap::new() }; @@ -69,27 +93,61 @@ impl ProvidedTransactions { Ok(()) => {} Err(e) => Err(ProvidedError::InvalidProvided(e))?, } - let tx_hash = tx.hash(); + + // Check it wasn't already provided let provided_key = self.transaction_key(&tx_hash); if self.db.get(&provided_key).is_some() { Err(ProvidedError::AlreadyProvided)?; } + // get local and on-chain tx numbers + let local_key = Self::locally_provided_quantity_key(&self.genesis, order); + let mut local_quantity = self + .db + .get(&local_key) + .map(|bytes| u32::from_le_bytes(bytes.try_into().unwrap())) + .unwrap_or(0); + let on_chain_key = Self::on_chain_provided_quantity_key(&self.genesis, order); + let on_chain_quantity = self + .db + .get(on_chain_key) + .map(|bytes| u32::from_le_bytes(bytes.try_into().unwrap())) + .unwrap_or(0); + let current_provided_key = self.current_provided_key(); - #[allow(clippy::unwrap_or_default)] - let mut currently_provided = self.db.get(¤t_provided_key).unwrap_or(vec![]); + // This would have a race-condition with multiple calls to provide, though this takes &mut self + // peventing multiple calls at once let mut txn = self.db.txn(); txn.put(provided_key, tx.serialize()); - currently_provided.extend(tx_hash); - txn.put(current_provided_key, currently_provided); - txn.commit(); - if self.transactions.get(order).is_none() { - self.transactions.insert(order, VecDeque::new()); + let this_provided_id = local_quantity; + + local_quantity += 1; + txn.put(local_key, local_quantity.to_le_bytes()); + + if this_provided_id < on_chain_quantity { + // Verify against the on-chain version + if tx_hash.as_ref() != + txn.get(Self::on_chain_provided_key(&self.genesis, order, this_provided_id)).unwrap() + { + Err(ProvidedError::LocalMismatchesOnChain)?; + } + txn.commit(); + } else { + #[allow(clippy::unwrap_or_default)] + let mut currently_provided = txn.get(¤t_provided_key).unwrap_or(vec![]); + currently_provided.extend(tx_hash); + txn.put(current_provided_key, currently_provided); + txn.commit(); + + if self.transactions.get(order).is_none() { + self.transactions.insert(order, VecDeque::new()); + } + self.transactions.get_mut(order).unwrap().push_back(tx); } - self.transactions.get_mut(order).unwrap().push_back(tx); + Ok(()) } @@ -98,27 +156,46 @@ impl ProvidedTransactions { &mut self, txn: &mut D::Transaction<'_>, order: &'static str, + block: [u8; 32], tx: [u8; 32], ) { - assert_eq!(self.transactions.get_mut(order).unwrap().pop_front().unwrap().hash(), tx); - - let current_provided_key = self.current_provided_key(); - let mut currently_provided = txn.get(¤t_provided_key).unwrap(); - - // Find this TX's hash - let mut i = 0; - loop { - if currently_provided[i .. (i + 32)] == tx { - assert_eq!(¤tly_provided.drain(i .. (i + 32)).collect::>(), &tx); - break; + if let Some(next_tx) = self.transactions.get_mut(order).and_then(|queue| queue.pop_front()) { + assert_eq!(next_tx.hash(), tx); + + let current_provided_key = self.current_provided_key(); + let mut currently_provided = txn.get(¤t_provided_key).unwrap(); + + // Find this TX's hash + let mut i = 0; + loop { + if currently_provided[i .. (i + 32)] == tx { + assert_eq!(¤tly_provided.drain(i .. (i + 32)).collect::>(), &tx); + break; + } + + i += 32; + if i >= currently_provided.len() { + panic!("couldn't find completed TX in currently provided"); + } } - i += 32; - if i >= currently_provided.len() { - panic!("couldn't find completed TX in currently provided"); - } + txn.put(current_provided_key, currently_provided); } - txn.put(current_provided_key, currently_provided); + // bump the on-chain tx number. + let on_chain_key = Self::on_chain_provided_quantity_key(&self.genesis, order); + let block_order_key = Self::block_provided_quantity_key(&self.genesis, &block, order); + let mut on_chain_quantity = self + .db + .get(&on_chain_key) + .map(|bytes| u32::from_le_bytes(bytes.try_into().unwrap())) + .unwrap_or(0); + + let this_provided_id = on_chain_quantity; + txn.put(Self::on_chain_provided_key(&self.genesis, order, this_provided_id), tx); + + on_chain_quantity += 1; + txn.put(on_chain_key, on_chain_quantity.to_le_bytes()); + txn.put(block_order_key, on_chain_quantity.to_le_bytes()); } } diff --git a/coordinator/tributary/src/tendermint/mod.rs b/coordinator/tributary/src/tendermint/mod.rs index fad9b3e83..67e0021b3 100644 --- a/coordinator/tributary/src/tendermint/mod.rs +++ b/coordinator/tributary/src/tendermint/mod.rs @@ -35,10 +35,7 @@ use tendermint::{ SlashEvent, }; -use tokio::{ - sync::RwLock, - time::{Duration, sleep}, -}; +use tokio::sync::RwLock; use crate::{ TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, BLOCK_MESSAGE, ReadWrite, @@ -352,7 +349,8 @@ impl Network for TendermintNetwork true, Transaction::Tendermint(tx), self.signature_scheme(), - ) { + ) == Ok(true) + { self.p2p.broadcast(signer.genesis, to_broadcast).await; } } @@ -360,12 +358,15 @@ impl Network for TendermintNetwork async fn validate(&mut self, block: &Self::Block) -> Result<(), TendermintBlockError> { let block = Block::read::<&[u8]>(&mut block.0.as_ref()).map_err(|_| TendermintBlockError::Fatal)?; - self.blockchain.read().await.verify_block::(&block, self.signature_scheme()).map_err( - |e| match e { + self + .blockchain + .read() + .await + .verify_block::(&block, self.signature_scheme(), false) + .map_err(|e| match e { BlockError::NonLocalProvided(_) => TendermintBlockError::Temporal, _ => TendermintBlockError::Fatal, - }, - ) + }) } async fn add_block( @@ -412,9 +413,6 @@ impl Network for TendermintNetwork hex::encode(hash), hex::encode(self.genesis) ); - // TODO: Use a notification system for when we have a new provided, in order to minimize - // latency - sleep(Duration::from_secs(Self::block_time().into())).await; } _ => return invalid_block(), } diff --git a/coordinator/tributary/src/tests/block.rs b/coordinator/tributary/src/tests/block.rs index 2bc4b8235..67b2c27a8 100644 --- a/coordinator/tributary/src/tests/block.rs +++ b/coordinator/tributary/src/tests/block.rs @@ -82,6 +82,7 @@ fn empty_block() { Some(Commit::> { end_time: 0, validators: vec![], signature: vec![] }) }; let unsigned_in_chain = |_: [u8; 32]| false; + let provided_in_chain = |_: [u8; 32]| false; Block::::new(LAST, vec![], vec![]) .verify::( GENESIS, @@ -91,6 +92,8 @@ fn empty_block() { validators, commit, unsigned_in_chain, + provided_in_chain, + false, ) .unwrap(); } @@ -114,6 +117,7 @@ fn duplicate_nonces() { Some(Commit::> { end_time: 0, validators: vec![], signature: vec![] }) }; let unsigned_in_chain = |_: [u8; 32]| false; + let provided_in_chain = |_: [u8; 32]| false; let res = Block::new(LAST, vec![], mempool).verify::( GENESIS, @@ -123,6 +127,8 @@ fn duplicate_nonces() { validators.clone(), commit, unsigned_in_chain, + provided_in_chain, + false, ); if i == 1 { res.unwrap(); diff --git a/coordinator/tributary/src/tests/blockchain.rs b/coordinator/tributary/src/tests/blockchain.rs index 21051095c..70a8c48c3 100644 --- a/coordinator/tributary/src/tests/blockchain.rs +++ b/coordinator/tributary/src/tests/blockchain.rs @@ -48,7 +48,7 @@ fn block_addition() { assert_eq!(block.header.parent, genesis); assert_eq!(block.header.transactions, [0; 32]); - blockchain.verify_block::(&block, validators.clone()).unwrap(); + blockchain.verify_block::(&block, validators.clone(), false).unwrap(); assert!(blockchain.add_block::(&block, vec![], validators).is_ok()); assert_eq!(blockchain.tip(), block.hash()); assert_eq!(blockchain.block_number(), 1); @@ -71,14 +71,14 @@ fn invalid_block() { #[allow(clippy::redundant_clone)] // False positive let mut block = block.clone(); block.header.parent = Blake2s256::digest(block.header.parent).into(); - assert!(blockchain.verify_block::(&block, validators.clone()).is_err()); + assert!(blockchain.verify_block::(&block, validators.clone(), false).is_err()); } // Mutate tranactions merkle { let mut block = block; block.header.transactions = Blake2s256::digest(block.header.transactions).into(); - assert!(blockchain.verify_block::(&block, validators.clone()).is_err()); + assert!(blockchain.verify_block::(&block, validators.clone(), false).is_err()); } let key = Zeroizing::new(::F::random(&mut OsRng)); @@ -89,7 +89,7 @@ fn invalid_block() { // Manually create the block to bypass build_block's checks let block = Block::new(blockchain.tip(), vec![], vec![Transaction::Application(tx.clone())]); assert_eq!(block.header.transactions, merkle(&[tx.hash()])); - assert!(blockchain.verify_block::(&block, validators.clone()).is_err()); + assert!(blockchain.verify_block::(&block, validators.clone(), false).is_err()); } // Run the rest of the tests with them as a participant @@ -99,24 +99,22 @@ fn invalid_block() { { let block = Block::new(blockchain.tip(), vec![], vec![Transaction::Application(tx.clone())]); assert_eq!(block.header.transactions, merkle(&[tx.hash()])); - blockchain.verify_block::(&block, validators.clone()).unwrap(); + blockchain.verify_block::(&block, validators.clone(), false).unwrap(); } { // Add a valid transaction let (_, mut blockchain) = new_blockchain(genesis, &[tx.1.signer]); - assert!(blockchain.add_transaction::( - true, - Transaction::Application(tx.clone()), - validators.clone() - )); + blockchain + .add_transaction::(true, Transaction::Application(tx.clone()), validators.clone()) + .unwrap(); let mut block = blockchain.build_block::(validators.clone()); assert_eq!(block.header.transactions, merkle(&[tx.hash()])); - blockchain.verify_block::(&block, validators.clone()).unwrap(); + blockchain.verify_block::(&block, validators.clone(), false).unwrap(); // And verify mutating the transactions merkle now causes a failure block.header.transactions = merkle(&[]); - assert!(blockchain.verify_block::(&block, validators.clone()).is_err()); + assert!(blockchain.verify_block::(&block, validators.clone(), false).is_err()); } { @@ -124,26 +122,24 @@ fn invalid_block() { let tx = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 5); // Manually create the block to bypass build_block's checks let block = Block::new(blockchain.tip(), vec![], vec![Transaction::Application(tx)]); - assert!(blockchain.verify_block::(&block, validators.clone()).is_err()); + assert!(blockchain.verify_block::(&block, validators.clone(), false).is_err()); } { // Invalid signature let (_, mut blockchain) = new_blockchain(genesis, &[tx.1.signer]); - assert!(blockchain.add_transaction::( - true, - Transaction::Application(tx), - validators.clone() - )); + blockchain + .add_transaction::(true, Transaction::Application(tx), validators.clone()) + .unwrap(); let mut block = blockchain.build_block::(validators.clone()); - blockchain.verify_block::(&block, validators.clone()).unwrap(); + blockchain.verify_block::(&block, validators.clone(), false).unwrap(); match &mut block.transactions[0] { Transaction::Application(tx) => { tx.1.signature.s += ::F::ONE; } _ => panic!("non-signed tx found"), } - assert!(blockchain.verify_block::(&block, validators.clone()).is_err()); + assert!(blockchain.verify_block::(&block, validators.clone(), false).is_err()); // Make sure this isn't because the merkle changed due to the transaction hash including the // signature (which it explicitly isn't allowed to anyways) @@ -170,11 +166,9 @@ fn signed_transaction() { panic!("tendermint tx found"); }; let next_nonce = blockchain.next_nonce(signer).unwrap(); - assert!(blockchain.add_transaction::( - true, - Transaction::Application(tx), - validators.clone() - )); + blockchain + .add_transaction::(true, Transaction::Application(tx), validators.clone()) + .unwrap(); assert_eq!(next_nonce + 1, blockchain.next_nonce(signer).unwrap()); } let block = blockchain.build_block::(validators.clone()); @@ -191,7 +185,7 @@ fn signed_transaction() { ); // Verify and add the block - blockchain.verify_block::(&block, validators.clone()).unwrap(); + blockchain.verify_block::(&block, validators.clone(), false).unwrap(); assert!(blockchain.add_block::(&block, vec![], validators.clone()).is_ok()); assert_eq!(blockchain.tip(), block.hash()); }; @@ -215,42 +209,134 @@ fn signed_transaction() { fn provided_transaction() { let genesis = new_genesis(); let validators = Arc::new(Validators::new(genesis, vec![]).unwrap()); - let (_, mut blockchain) = new_blockchain::(genesis, &[]); + let (db, mut blockchain) = new_blockchain::(genesis, &[]); - let tx = random_provided_transaction(&mut OsRng); + let tx = random_provided_transaction(&mut OsRng, "order1"); - // This should be provideable - let mut db = MemDb::new(); - let mut txs = ProvidedTransactions::<_, ProvidedTransaction>::new(db.clone(), genesis); + // This should be providable + let mut temp_db = MemDb::new(); + let mut txs = ProvidedTransactions::<_, ProvidedTransaction>::new(temp_db.clone(), genesis); txs.provide(tx.clone()).unwrap(); assert_eq!(txs.provide(tx.clone()), Err(ProvidedError::AlreadyProvided)); assert_eq!( - ProvidedTransactions::<_, ProvidedTransaction>::new(db.clone(), genesis).transactions, - HashMap::from([("provided", VecDeque::from([tx.clone()]))]), + ProvidedTransactions::<_, ProvidedTransaction>::new(temp_db.clone(), genesis).transactions, + HashMap::from([("order1", VecDeque::from([tx.clone()]))]), ); - let mut txn = db.txn(); - txs.complete(&mut txn, "provided", tx.hash()); + let mut txn = temp_db.txn(); + txs.complete(&mut txn, "order1", [0u8; 32], tx.hash()); txn.commit(); assert!(ProvidedTransactions::<_, ProvidedTransaction>::new(db.clone(), genesis) .transactions .is_empty()); - // Non-provided transactions should fail verification - let block = Block::new(blockchain.tip(), vec![tx.clone()], vec![]); - assert!(blockchain.verify_block::(&block, validators.clone()).is_err()); + // case we have the block's provided txs in our local as well + { + // Non-provided transactions should fail verification because we don't have them locally. + let block = Block::new(blockchain.tip(), vec![tx.clone()], vec![]); + assert!(blockchain.verify_block::(&block, validators.clone(), false).is_err()); + + // Provided transactions should pass verification + blockchain.provide_transaction(tx.clone()).unwrap(); + blockchain.verify_block::(&block, validators.clone(), false).unwrap(); + + // add_block should work for verified blocks + assert!(blockchain.add_block::(&block, vec![], validators.clone()).is_ok()); + + let block = Block::new(blockchain.tip(), vec![tx.clone()], vec![]); + + // The provided transaction should no longer considered provided but added to chain, + // causing this error + assert_eq!( + blockchain.verify_block::(&block, validators.clone(), false), + Err(BlockError::ProvidedAlreadyIncluded) + ); + } + + // case we don't have the block's provided txs in our local + { + let tx1 = random_provided_transaction(&mut OsRng, "order1"); + let tx2 = random_provided_transaction(&mut OsRng, "order1"); + let tx3 = random_provided_transaction(&mut OsRng, "order2"); + let tx4 = random_provided_transaction(&mut OsRng, "order2"); + + // add_block DOES NOT fail for unverified provided transactions if told to add them, + // since now we can have them later. + let block1 = Block::new(blockchain.tip(), vec![tx1.clone(), tx3.clone()], vec![]); + assert!(blockchain.add_block::(&block1, vec![], validators.clone()).is_ok()); + + // in fact, we can have many blocks that have provided txs that we don't have locally. + let block2 = Block::new(blockchain.tip(), vec![tx2.clone(), tx4.clone()], vec![]); + assert!(blockchain.add_block::(&block2, vec![], validators.clone()).is_ok()); + + // make sure we won't return ok for the block before we actually got the txs + let TransactionKind::Provided(order) = tx1.kind() else { panic!("tx wasn't provided") }; + assert!(!Blockchain::::locally_provided_txs_in_block( + &db, + &genesis, + &block1.hash(), + order + )); + // provide the first tx + blockchain.provide_transaction(tx1).unwrap(); + // it should be ok for this order now, since the second tx has different order. + assert!(Blockchain::::locally_provided_txs_in_block( + &db, + &genesis, + &block1.hash(), + order + )); - // Provided transactions should pass verification - blockchain.provide_transaction(tx.clone()).unwrap(); - blockchain.verify_block::(&block, validators.clone()).unwrap(); + // give the second tx + let TransactionKind::Provided(order) = tx3.kind() else { panic!("tx wasn't provided") }; + assert!(!Blockchain::::locally_provided_txs_in_block( + &db, + &genesis, + &block1.hash(), + order + )); + blockchain.provide_transaction(tx3).unwrap(); + // it should be ok now for the first block + assert!(Blockchain::::locally_provided_txs_in_block( + &db, + &genesis, + &block1.hash(), + order + )); - // add_block should work for verified blocks - assert!(blockchain.add_block::(&block, vec![], validators.clone()).is_ok()); + // provide the second block txs + let TransactionKind::Provided(order) = tx4.kind() else { panic!("tx wasn't provided") }; + // not ok yet + assert!(!Blockchain::::locally_provided_txs_in_block( + &db, + &genesis, + &block2.hash(), + order + )); + blockchain.provide_transaction(tx4).unwrap(); + // ok now + assert!(Blockchain::::locally_provided_txs_in_block( + &db, + &genesis, + &block2.hash(), + order + )); - let block = Block::new(blockchain.tip(), vec![tx], vec![]); - // The provided transaction should no longer considered provided, causing this error - assert!(blockchain.verify_block::(&block, validators.clone()).is_err()); - // add_block should fail for unverified provided transactions if told to add them - assert!(blockchain.add_block::(&block, vec![], validators.clone()).is_err()); + // provide the second block txs + let TransactionKind::Provided(order) = tx2.kind() else { panic!("tx wasn't provided") }; + assert!(!Blockchain::::locally_provided_txs_in_block( + &db, + &genesis, + &block2.hash(), + order + )); + blockchain.provide_transaction(tx2).unwrap(); + assert!(Blockchain::::locally_provided_txs_in_block( + &db, + &genesis, + &block2.hash(), + order + )); + } } #[tokio::test] @@ -271,11 +357,9 @@ async fn tendermint_evidence_tx() { let Transaction::Tendermint(tx) = tx else { panic!("non-tendermint tx found"); }; - assert!(blockchain.add_transaction::( - true, - Transaction::Tendermint(tx), - validators.clone() - )); + blockchain + .add_transaction::(true, Transaction::Tendermint(tx), validators.clone()) + .unwrap(); } let block = blockchain.build_block::(validators.clone()); assert_eq!(blockchain.tip(), tip); @@ -287,7 +371,7 @@ async fn tendermint_evidence_tx() { } // Verify and add the block - blockchain.verify_block::(&block, validators.clone()).unwrap(); + blockchain.verify_block::(&block, validators.clone(), false).unwrap(); assert!(blockchain.add_block::(&block, vec![], validators.clone()).is_ok()); assert_eq!(blockchain.tip(), block.hash()); }; @@ -383,7 +467,7 @@ async fn block_tx_ordering() { let signed_tx = Transaction::Application(SignedTx::Signed(Box::new( crate::tests::signed_transaction(&mut OsRng, genesis, &key, i), ))); - assert!(blockchain.add_transaction::(true, signed_tx.clone(), validators.clone())); + blockchain.add_transaction::(true, signed_tx.clone(), validators.clone()).unwrap(); mempool.push(signed_tx); let unsigned_tx = Transaction::Tendermint( @@ -393,10 +477,11 @@ async fn block_tx_ordering() { ) .await, ); - assert!(blockchain.add_transaction::(true, unsigned_tx.clone(), validators.clone())); + blockchain.add_transaction::(true, unsigned_tx.clone(), validators.clone()).unwrap(); mempool.push(unsigned_tx); - let provided_tx = SignedTx::Provided(Box::new(random_provided_transaction(&mut OsRng))); + let provided_tx = + SignedTx::Provided(Box::new(random_provided_transaction(&mut OsRng, "order1"))); blockchain.provide_transaction(provided_tx.clone()).unwrap(); provided_txs.push(provided_tx); } @@ -424,7 +509,7 @@ async fn block_tx_ordering() { } // should be a valid block - blockchain.verify_block::(&block, validators.clone()).unwrap(); + blockchain.verify_block::(&block, validators.clone(), false).unwrap(); // Unsigned before Provided { @@ -433,7 +518,7 @@ async fn block_tx_ordering() { let unsigned = block.transactions.remove(128); block.transactions.insert(0, unsigned); assert_eq!( - blockchain.verify_block::(&block, validators.clone()).unwrap_err(), + blockchain.verify_block::(&block, validators.clone(), false).unwrap_err(), BlockError::WrongTransactionOrder ); } @@ -444,7 +529,7 @@ async fn block_tx_ordering() { let signed = block.transactions.remove(256); block.transactions.insert(0, signed); assert_eq!( - blockchain.verify_block::(&block, validators.clone()).unwrap_err(), + blockchain.verify_block::(&block, validators.clone(), false).unwrap_err(), BlockError::WrongTransactionOrder ); } @@ -454,7 +539,7 @@ async fn block_tx_ordering() { let mut block = block; block.transactions.swap(128, 256); assert_eq!( - blockchain.verify_block::(&block, validators.clone()).unwrap_err(), + blockchain.verify_block::(&block, validators.clone(), false).unwrap_err(), BlockError::WrongTransactionOrder ); } diff --git a/coordinator/tributary/src/tests/mempool.rs b/coordinator/tributary/src/tests/mempool.rs index 20b69cc7c..3d3215078 100644 --- a/coordinator/tributary/src/tests/mempool.rs +++ b/coordinator/tributary/src/tests/mempool.rs @@ -10,7 +10,7 @@ use tendermint::ext::Commit; use serai_db::MemDb; use crate::{ - transaction::Transaction as TransactionTrait, + transaction::{TransactionError, Transaction as TransactionTrait}, tendermint::{TendermintBlock, Validators, Signer, TendermintNetwork}, ACCOUNT_MEMPOOL_LIMIT, Transaction, Mempool, tests::{SignedTransaction, signed_transaction, p2p::DummyP2p, random_evidence_tx}, @@ -43,69 +43,85 @@ async fn mempool_addition() { // Add TX 0 let mut blockchain_next_nonces = HashMap::from([(signer, 0)]); - assert!(mempool.add::( - &blockchain_next_nonces, - true, - Transaction::Application(first_tx.clone()), - validators.clone(), - unsigned_in_chain, - commit, - )); + assert!(mempool + .add::( + &blockchain_next_nonces, + true, + Transaction::Application(first_tx.clone()), + validators.clone(), + unsigned_in_chain, + commit, + ) + .unwrap()); assert_eq!(mempool.next_nonce(&signer), Some(1)); // add a tendermint evidence tx let evidence_tx = random_evidence_tx::(Signer::new(genesis, key.clone()).into(), TendermintBlock(vec![])) .await; - assert!(mempool.add::( - &blockchain_next_nonces, - true, - Transaction::Tendermint(evidence_tx.clone()), - validators.clone(), - unsigned_in_chain, - commit, - )); + assert!(mempool + .add::( + &blockchain_next_nonces, + true, + Transaction::Tendermint(evidence_tx.clone()), + validators.clone(), + unsigned_in_chain, + commit, + ) + .unwrap()); // Test reloading works assert_eq!(mempool, Mempool::new(db, genesis)); - // Adding it again should fail - assert!(!mempool.add::( - &blockchain_next_nonces, - true, - Transaction::Application(first_tx.clone()), - validators.clone(), - unsigned_in_chain, - commit, - )); - assert!(!mempool.add::( - &blockchain_next_nonces, - true, - Transaction::Tendermint(evidence_tx.clone()), - validators.clone(), - unsigned_in_chain, - commit, - )); + // Adding them again should fail + assert_eq!( + mempool.add::( + &blockchain_next_nonces, + true, + Transaction::Application(first_tx.clone()), + validators.clone(), + unsigned_in_chain, + commit, + ), + Err(TransactionError::InvalidNonce) + ); + assert_eq!( + mempool.add::( + &blockchain_next_nonces, + true, + Transaction::Tendermint(evidence_tx.clone()), + validators.clone(), + unsigned_in_chain, + commit, + ), + Ok(false) + ); // Do the same with the next nonce let second_tx = signed_transaction(&mut OsRng, genesis, &key, 1); - assert!(mempool.add::( - &blockchain_next_nonces, - true, - Transaction::Application(second_tx.clone()), - validators.clone(), - unsigned_in_chain, - commit, - )); + assert_eq!( + mempool.add::( + &blockchain_next_nonces, + true, + Transaction::Application(second_tx.clone()), + validators.clone(), + unsigned_in_chain, + commit, + ), + Ok(true) + ); assert_eq!(mempool.next_nonce(&signer), Some(2)); - assert!(!mempool.add::( - &blockchain_next_nonces, - true, - Transaction::Application(second_tx.clone()), - validators.clone(), - unsigned_in_chain, - commit, - )); + assert_eq!( + mempool.add::( + &blockchain_next_nonces, + true, + Transaction::Application(second_tx.clone()), + validators.clone(), + unsigned_in_chain, + commit, + ), + Err(TransactionError::InvalidNonce) + ); // If the mempool doesn't have a nonce for an account, it should successfully use the // blockchain's @@ -114,14 +130,16 @@ async fn mempool_addition() { let second_signer = tx.1.signer; assert_eq!(mempool.next_nonce(&second_signer), None); blockchain_next_nonces.insert(second_signer, 2); - assert!(mempool.add::( - &blockchain_next_nonces, - true, - Transaction::Application(tx.clone()), - validators.clone(), - unsigned_in_chain, - commit - )); + assert!(mempool + .add::( + &blockchain_next_nonces, + true, + Transaction::Application(tx.clone()), + validators.clone(), + unsigned_in_chain, + commit + ) + .unwrap()); assert_eq!(mempool.next_nonce(&second_signer), Some(3)); // Getting a block should work @@ -159,22 +177,32 @@ fn too_many_mempool() { // We should be able to add transactions up to the limit for i in 0 .. ACCOUNT_MEMPOOL_LIMIT { - assert!(mempool.add::( + assert!(mempool + .add::( + &HashMap::from([(signer, 0)]), + false, + Transaction::Application(signed_transaction(&mut OsRng, genesis, &key, i)), + validators.clone(), + unsigned_in_chain, + commit, + ) + .unwrap()); + } + // Yet adding more should fail + assert_eq!( + mempool.add::( &HashMap::from([(signer, 0)]), false, - Transaction::Application(signed_transaction(&mut OsRng, genesis, &key, i)), + Transaction::Application(signed_transaction( + &mut OsRng, + genesis, + &key, + ACCOUNT_MEMPOOL_LIMIT + )), validators.clone(), unsigned_in_chain, commit, - )); - } - // Yet adding more should fail - assert!(!mempool.add::( - &HashMap::from([(signer, 0)]), - false, - Transaction::Application(signed_transaction(&mut OsRng, genesis, &key, ACCOUNT_MEMPOOL_LIMIT)), - validators.clone(), - unsigned_in_chain, - commit, - )); + ), + Err(TransactionError::TooManyInMempool) + ); } diff --git a/coordinator/tributary/src/tests/transaction/mod.rs b/coordinator/tributary/src/tests/transaction/mod.rs index a7d3abcbe..266a11428 100644 --- a/coordinator/tributary/src/tests/transaction/mod.rs +++ b/coordinator/tributary/src/tests/transaction/mod.rs @@ -62,7 +62,11 @@ impl ReadWrite for ProvidedTransaction { impl Transaction for ProvidedTransaction { fn kind(&self) -> TransactionKind<'_> { - TransactionKind::Provided("provided") + match self.0[0] { + 1 => TransactionKind::Provided("order1"), + 2 => TransactionKind::Provided("order2"), + _ => panic!("unknown order"), + } } fn hash(&self) -> [u8; 32] { @@ -74,9 +78,17 @@ impl Transaction for ProvidedTransaction { } } -pub fn random_provided_transaction(rng: &mut R) -> ProvidedTransaction { +pub fn random_provided_transaction( + rng: &mut R, + order: &str, +) -> ProvidedTransaction { let mut data = vec![0; 512]; rng.fill_bytes(&mut data); + data[0] = match order { + "order1" => 1, + "order2" => 2, + _ => panic!("unknown order"), + }; ProvidedTransaction(data) } diff --git a/coordinator/tributary/src/transaction.rs b/coordinator/tributary/src/transaction.rs index da03591a7..c9c5ba1a6 100644 --- a/coordinator/tributary/src/transaction.rs +++ b/coordinator/tributary/src/transaction.rs @@ -31,6 +31,12 @@ pub enum TransactionError { /// Transaction's content is invalid. #[error("transaction content is invalid")] InvalidContent, + /// Transaction's signer has too many transactions in the mempool. + #[error("signer has too many transactions in the mempool")] + TooManyInMempool, + /// Provided Transaction added to mempool. + #[error("provided transaction added to mempool")] + ProvidedAddedToMempool, } /// Data for a signed transaction. @@ -78,7 +84,7 @@ impl ReadWrite for Signed { #[allow(clippy::large_enum_variant)] #[derive(Clone, PartialEq, Eq, Debug)] pub enum TransactionKind<'a> { - /// This tranaction should be provided by every validator, in an exact order. + /// This transaction should be provided by every validator, in an exact order. /// /// The contained static string names the orderer to use. This allows two distinct provided /// transaction kinds, without a synchronized order, to be ordered within their own kind without @@ -87,8 +93,9 @@ pub enum TransactionKind<'a> { /// The only malleability is in when this transaction appears on chain. The block producer will /// include it when they have it. Block verification will fail for validators without it. /// - /// If a supermajority of validators still produce a commit for a block with a provided - /// transaction which isn't locally held, the chain will sleep until it is locally provided. + /// If a supermajority of validators produce a commit for a block with a provided transaction + /// which isn't locally held, the block will be added to the local chain. When the transaction is + /// locally provided, it will be compared for correctness to the on-chain version Provided(&'static str), /// An unsigned transaction, only able to be included by the block producer. diff --git a/coordinator/tributary/tendermint/src/ext.rs b/coordinator/tributary/tendermint/src/ext.rs index 02c6dd28b..1f95362d1 100644 --- a/coordinator/tributary/tendermint/src/ext.rs +++ b/coordinator/tributary/tendermint/src/ext.rs @@ -282,7 +282,6 @@ pub trait Network: Sized + Send + Sync { /// Trigger a slash for the validator in question who was definitively malicious. /// /// The exact process of triggering a slash is undefined and left to the network as a whole. - // TODO: We need to provide some evidence for this. async fn slash(&mut self, validator: Self::ValidatorId, slash_event: SlashEvent); /// Validate a block. diff --git a/coordinator/tributary/tendermint/src/lib.rs b/coordinator/tributary/tendermint/src/lib.rs index a22672cef..487027b00 100644 --- a/coordinator/tributary/tendermint/src/lib.rs +++ b/coordinator/tributary/tendermint/src/lib.rs @@ -794,6 +794,9 @@ impl TendermintMachine { if self.block.log.has_consensus(self.block.round().number, Data::Prevote(Some(block.id()))) { match self.network.validate(block).await { Ok(_) => (), + // BlockError::Temporal is due to a temporal error we have, yet a supermajority of the + // network does not, Because we do not believe this block to be fatally invalid, and + // because a supermajority deems it valid, accept it. Err(BlockError::Temporal) => (), Err(BlockError::Fatal) => { log::warn!(target: "tendermint", "Validator proposed a fatally invalid block"); diff --git a/coordinator/tributary/tendermint/tests/ext.rs b/coordinator/tributary/tendermint/tests/ext.rs index aa87ee297..8fd6242d3 100644 --- a/coordinator/tributary/tendermint/tests/ext.rs +++ b/coordinator/tributary/tendermint/tests/ext.rs @@ -176,7 +176,7 @@ impl TestNetwork { TestBlock { id: 1u32.to_le_bytes(), valid: Ok(()) }, ) .await; - tokio::task::spawn(machine.run()); + tokio::spawn(machine.run()); write.push((messages, synced_block, synced_block_result)); } } diff --git a/deny.toml b/deny.toml index 980f78c48..b02e4c877 100644 --- a/deny.toml +++ b/deny.toml @@ -96,6 +96,7 @@ unknown-registry = "deny" unknown-git = "deny" allow-registry = ["https://github.com/rust-lang/crates.io-index"] allow-git = [ + "https://github.com/rust-lang-nursery/lazy-static.rs", "https://github.com/serai-dex/substrate-bip39", "https://github.com/serai-dex/substrate", "https://github.com/monero-rs/base58-monero", diff --git a/docs/Getting Started.md b/docs/Getting Started.md index 287d12ee2..bfb098f1a 100644 --- a/docs/Getting Started.md +++ b/docs/Getting Started.md @@ -35,9 +35,9 @@ rustup target add wasm32-unknown-unknown --toolchain nightly ### Install Solidity ``` -sudo pip3 install solc-select -solc-select install 0.8.16 -solc-select use 0.8.16 +cargo install svm-rs +svm install 0.8.16 +svm use 0.8.16 ``` ### Install Solidity Compiler Version Manager diff --git a/docs/protocol/Constants.md b/docs/protocol/Constants.md index 88f730930..193a6433c 100644 --- a/docs/protocol/Constants.md +++ b/docs/protocol/Constants.md @@ -9,14 +9,13 @@ protocol. |-----------------|----------------------------------------------| | SeraiAddress | sr25519::Public (unchecked [u8; 32] wrapper) | | Amount | u64 | -| NetworkId | u16 | -| Coin | u32 | -| Network | Vec | +| NetworkId | NetworkId (Rust enum, SCALE-encoded) | +| Coin | Coin (Rust enum, SCALE-encoded) | | Session | u32 | -| Validator Set | (Session, NetworkId) | +| Validator Set | (NetworkId, Session) | | Key | BoundedVec\ | | KeyPair | (SeraiAddress, Key) | -| ExternalAddress | BoundedVec\ | +| ExternalAddress | BoundedVec\ | | Data | BoundedVec\ | ### Networks diff --git a/docs/protocol/Validator Sets.md b/docs/protocol/Validator Sets.md index 582fb3c0f..4f84eefa9 100644 --- a/docs/protocol/Validator Sets.md +++ b/docs/protocol/Validator Sets.md @@ -2,32 +2,29 @@ Validator Sets are defined at the protocol level, with the following parameters: - - `bond` (Amount): Amount of bond per key-share. - - `network` (Network): The network this validator set operates - over. - - `participants` (Vec\): List of participants within this set. + - `network` (NetworkId): The network this validator set + operates over. + - `allocation_per_key_share` (Amount): Amount of stake needing allocation + in order to receive a key share. -Validator Sets are referred to by `NetworkId` yet have their data accessible via -`ValidatorSetInstance`. +### Participation in Consensus -### Participation in consensus - -All Validator Sets participate in consensus. In the future, a dedicated group -to order Serai is planned. +The validator set for `NetworkId::Serai` participates in Serai's own consensus, +producing and finalizing blocks. ### Multisig Every Validator Set is expected to form a `t`-of-`n` multisig, where `n` is the amount of key shares in the Validator Set and `t` is `n * 2 / 3 + 1`, for each -of its networks. This multisig is secure to hold coins up to 67% of the -Validator Set's bonded value. If the coins exceed that threshold, there's more -value in the multisig than in the supermajority of bond that must be put forth -to control it. Accordingly, it'd be no longer financially secure, and it MUST -reject newly added coins which would cross that threshold. +of its networks. This multisig is secure to hold coins valued at up to 33% of +the Validator Set's allocated stake. If the coins exceed that threshold, there's +more value in the multisig and associated liquidity pool than in the +supermajority of allocated stake securing them both. Accordingly, it'd be no +longer financially secure, and it MUST reject newly added coins. ### Multisig Creation -Multisigs are created by processors, communicating via their Coordinators. +Multisigs are created by Processors, communicating via their Coordinators. They're then confirmed on chain via the `validator-sets` pallet. This is done by having 100% of participants agree on the resulting group key. While this isn't fault tolerant regarding liveliness, a malicious actor who forces a `t`-of-`n` @@ -41,28 +38,17 @@ successfully created or not. Processors cannot simply ask each other if they succeeded without creating an instance of the Byzantine Generals Problem. Placing results within a Byzantine Fault Tolerant system resolves this. -### Multisig Lifetime - -The keys for a Validator Set remain valid until its participants change. If a -Validator Set adds a new member, and then they leave, the set's historical keys -are not reused. +### Multisig Rotation -### Multisig Handoffs +Please see `processor/Multisig Rotation.md` for details on the timing. -Once new keys are confirmed for a given Validator Set, they become tracked and -the recommended set of keys for incoming coins. The old keys are still eligible -to receive coins for a provided grace period, requiring the current Validator -Set to track both sets of keys. The old keys are also prioritized for handling -outbound transfers, until the end of the grace period, at which point they're -no longer eligible to receive coins and they forward all of their coins to the -new set of keys. It is only then that validators in the previous instance of the -set, yet not the current instance, may unbond their stake. +Once the new multisig publishes its first `Batch`, the old multisig's keys are +cleared and the set is considered retired. After a one-session cooldown period, +they may deallocate their stake. ### Set Keys (message) - - `network` (Network): Network whose key is being voted for. + - `network` (Network): Network whose key is being set. - `key_pair` (KeyPair): Key pair being set for this `Session`. - `signature` (Signature): A MuSig-style signature of all validators, -confirming this key. - -Once a key is voted on by every member, it's adopted as detailed above. + confirming this key. diff --git a/processor/messages/src/lib.rs b/processor/messages/src/lib.rs index 01ab54cf8..dbb139c0f 100644 --- a/processor/messages/src/lib.rs +++ b/processor/messages/src/lib.rs @@ -139,9 +139,15 @@ pub mod coordinator { } } + #[derive(Clone, PartialEq, Eq, Debug, Zeroize, Encode, Decode, Serialize, Deserialize)] + pub struct PlanMeta { + pub key: Vec, + pub id: [u8; 32], + } + #[derive(Clone, PartialEq, Eq, Debug, Zeroize, Encode, Decode, Serialize, Deserialize)] pub enum ProcessorMessage { - SubstrateBlockAck { network: NetworkId, block: u64, plans: Vec<[u8; 32]> }, + SubstrateBlockAck { network: NetworkId, block: u64, plans: Vec }, BatchPreprocess { id: SignId, block: BlockHash, preprocess: Vec }, BatchShare { id: SignId, share: [u8; 32] }, } diff --git a/processor/src/main.rs b/processor/src/main.rs index d0382dda5..20471c9b1 100644 --- a/processor/src/main.rs +++ b/processor/src/main.rs @@ -13,7 +13,7 @@ use serai_client::{ validator_sets::primitives::{ValidatorSet, KeyPair}, }; -use messages::CoordinatorMessage; +use messages::{coordinator::PlanMeta, CoordinatorMessage}; use serai_env as env; @@ -350,7 +350,13 @@ async fn handle_coordinator_msg( .send(messages::coordinator::ProcessorMessage::SubstrateBlockAck { network: N::NETWORK, block: substrate_block, - plans: to_sign.iter().map(|signable| signable.1).collect(), + plans: to_sign + .iter() + .map(|signable| PlanMeta { + key: signable.0.to_bytes().as_ref().to_vec(), + id: signable.1, + }) + .collect(), }) .await; } diff --git a/processor/src/multisigs/mod.rs b/processor/src/multisigs/mod.rs index a6838fd5a..0071df6c5 100644 --- a/processor/src/multisigs/mod.rs +++ b/processor/src/multisigs/mod.rs @@ -11,7 +11,7 @@ use serai_client::{ in_instructions::primitives::{ InInstructionWithBalance, Batch, RefundableInInstruction, Shorthand, MAX_BATCH_SIZE, }, - tokens::primitives::{OutInstruction, OutInstructionWithBalance}, + coins::primitives::{OutInstruction, OutInstructionWithBalance}, }; use log::{info, error}; diff --git a/substrate/client/src/lib.rs b/substrate/client/src/lib.rs index e80b183f5..4949c0a0d 100644 --- a/substrate/client/src/lib.rs +++ b/substrate/client/src/lib.rs @@ -14,7 +14,7 @@ mod other_primitives { pub mod in_instructions { pub use serai_runtime::in_instructions::primitives; } - pub mod tokens { + pub mod coins { pub use serai_runtime::tokens::primitives; } pub mod validator_sets { diff --git a/substrate/client/src/serai/coins.rs b/substrate/client/src/serai/coins.rs new file mode 100644 index 000000000..1d6d255ac --- /dev/null +++ b/substrate/client/src/serai/coins.rs @@ -0,0 +1,94 @@ +use sp_core::sr25519::Public; +use serai_runtime::{ + primitives::{SeraiAddress, SubstrateAmount, Amount, Coin, Balance}, + assets::{AssetDetails, AssetAccount}, + tokens, Tokens, Runtime, +}; +pub use tokens::primitives; +use primitives::OutInstruction; + +use subxt::tx::Payload; + +use crate::{TemporalSerai, SeraiError, Composite, scale_value, scale_composite}; + +const PALLET: &str = "Tokens"; + +pub type TokensEvent = tokens::Event; + +#[derive(Clone, Copy)] +pub struct SeraiCoins<'a>(pub(crate) TemporalSerai<'a>); +impl<'a> SeraiCoins<'a> { + pub fn into_inner(self) -> TemporalSerai<'a> { + self.0 + } + + pub async fn mint_events(&self) -> Result, SeraiError> { + self.0.events::(|event| matches!(event, TokensEvent::Mint { .. })).await + } + + pub async fn burn_events(&self) -> Result, SeraiError> { + self.0.events::(|event| matches!(event, TokensEvent::Burn { .. })).await + } + + pub async fn sri_balance(&self, address: SeraiAddress) -> Result { + let data: Option< + serai_runtime::system::AccountInfo>, + > = self.0.storage("System", "Account", Some(vec![scale_value(address)])).await?; + Ok(data.map(|data| data.data.free).unwrap_or(0)) + } + + pub async fn token_supply(&self, coin: Coin) -> Result { + Ok(Amount( + self + .0 + .storage::>( + "Assets", + "Asset", + Some(vec![scale_value(coin)]), + ) + .await? + .map(|token| token.supply) + .unwrap_or(0), + )) + } + + pub async fn token_balance( + &self, + coin: Coin, + address: SeraiAddress, + ) -> Result { + Ok(Amount( + self + .0 + .storage::>( + "Assets", + "Account", + Some(vec![scale_value(coin), scale_value(address)]), + ) + .await? + .map(|account| account.balance()) + .unwrap_or(0), + )) + } + + pub fn transfer_sri(to: SeraiAddress, amount: Amount) -> Payload> { + Payload::new( + "Balances", + // TODO: Use transfer_allow_death? + // TODO: Replace the Balances pallet with something much simpler + "transfer", + scale_composite(serai_runtime::balances::Call::::transfer { + dest: to, + value: amount.0, + }), + ) + } + + pub fn burn(balance: Balance, instruction: OutInstruction) -> Payload> { + Payload::new( + PALLET, + "burn", + scale_composite(tokens::Call::::burn { balance, instruction }), + ) + } +} diff --git a/substrate/client/src/serai/in_instructions.rs b/substrate/client/src/serai/in_instructions.rs index 62e3d74a8..ab737fcb3 100644 --- a/substrate/client/src/serai/in_instructions.rs +++ b/substrate/client/src/serai/in_instructions.rs @@ -6,42 +6,42 @@ use subxt::utils::Encoded; use crate::{ primitives::{BlockHash, NetworkId}, - SeraiError, Serai, scale_value, + SeraiError, Serai, TemporalSerai, scale_value, }; pub type InInstructionsEvent = in_instructions::Event; const PALLET: &str = "InInstructions"; -impl Serai { - pub async fn get_latest_block_for_network( +#[derive(Clone, Copy)] +pub struct SeraiInInstructions<'a>(pub(crate) TemporalSerai<'a>); +impl<'a> SeraiInInstructions<'a> { + pub fn into_inner(self) -> TemporalSerai<'a> { + self.0 + } + + pub async fn latest_block_for_network( &self, - hash: [u8; 32], network: NetworkId, ) -> Result, SeraiError> { - self.storage(PALLET, "LatestNetworkBlock", Some(vec![scale_value(network)]), hash).await + self.0.storage(PALLET, "LatestNetworkBlock", Some(vec![scale_value(network)])).await } - pub async fn get_last_batch_for_network( + pub async fn last_batch_for_network( &self, - hash: [u8; 32], network: NetworkId, ) -> Result, SeraiError> { - self.storage(PALLET, "LastBatch", Some(vec![scale_value(network)]), hash).await + self.0.storage(PALLET, "LastBatch", Some(vec![scale_value(network)])).await } - pub async fn get_batch_events( - &self, - block: [u8; 32], - ) -> Result, SeraiError> { + pub async fn batch_events(&self) -> Result, SeraiError> { self - .events::(block, |event| { - matches!(event, InInstructionsEvent::Batch { .. }) - }) + .0 + .events::(|event| matches!(event, InInstructionsEvent::Batch { .. })) .await } pub fn execute_batch(batch: SignedBatch) -> Encoded { - Self::unsigned::(&in_instructions::Call::::execute_batch { batch }) + Serai::unsigned::(&in_instructions::Call::::execute_batch { batch }) } } diff --git a/substrate/client/src/serai/mod.rs b/substrate/client/src/serai/mod.rs index 667b4a106..bbe24bcd8 100644 --- a/substrate/client/src/serai/mod.rs +++ b/substrate/client/src/serai/mod.rs @@ -33,9 +33,12 @@ use serai_runtime::{ system::Config, support::traits::PalletInfo as PalletInfoTrait, PalletInfo, Runtime, }; -pub mod tokens; +pub mod coins; +pub use coins::SeraiCoins; pub mod in_instructions; +pub use in_instructions::SeraiInInstructions; pub mod validator_sets; +pub use validator_sets::SeraiValidatorSets; #[derive(Clone, Copy, PartialEq, Eq, Default, Debug, Encode, Decode)] pub struct Tip { @@ -136,57 +139,58 @@ pub enum SeraiError { #[derive(Clone)] pub struct Serai(OnlineClient); +#[derive(Clone, Copy)] +pub struct TemporalSerai<'a>(pub(crate) &'a Serai, pub(crate) [u8; 32]); + impl Serai { pub async fn new(url: &str) -> Result { Ok(Serai(OnlineClient::::from_url(url).await.map_err(SeraiError::RpcError)?)) } - async fn storage( - &self, - pallet: &'static str, - name: &'static str, - keys: Option>, - block: [u8; 32], - ) -> Result, SeraiError> { - let storage = self.0.storage(); - #[allow(clippy::unwrap_or_default)] - let address = subxt::dynamic::storage(pallet, name, keys.unwrap_or(vec![])); - debug_assert!(storage.validate(&address).is_ok(), "invalid storage address"); + fn unsigned(call: &C) -> Encoded { + // TODO: Should Serai purge the old transaction code AND set this to 0/1? + const TRANSACTION_VERSION: u8 = 4; - storage - .at(block.into()) - .fetch(&address) - .await - .map_err(SeraiError::RpcError)? - .map(|res| R::decode(&mut res.encoded()).map_err(|_| SeraiError::InvalidRuntime)) - .transpose() + // Protocol version + let mut bytes = vec![TRANSACTION_VERSION]; + + // Pallet index + bytes.push(u8::try_from(PalletInfo::index::

().unwrap()).unwrap()); + // Call + bytes.extend(call.encode()); + + // Prefix the length + let mut complete_bytes = scale::Compact(u32::try_from(bytes.len()).unwrap()).encode(); + complete_bytes.extend(bytes); + Encoded(complete_bytes) } - async fn events( + pub fn sign>( &self, - block: [u8; 32], - filter: impl Fn(&E) -> bool, - ) -> Result, SeraiError> { - let mut res = vec![]; - for event in self.0.events().at(block.into()).await.map_err(SeraiError::RpcError)?.iter() { - let event = event.map_err(|_| SeraiError::InvalidRuntime)?; - if PalletInfo::index::

().unwrap() == usize::from(event.pallet_index()) { - let mut with_variant: &[u8] = - &[[event.variant_index()].as_ref(), event.field_bytes()].concat(); - let event = E::decode(&mut with_variant).map_err(|_| SeraiError::InvalidRuntime)?; - if filter(&event) { - res.push(event); - } - } - } - Ok(res) + signer: &S, + payload: &Payload>, + nonce: u32, + params: BaseExtrinsicParamsBuilder, + ) -> Result { + TxClient::new(self.0.offline()) + .create_signed_with_nonce(payload, signer, nonce, params) + .map(|tx| Encoded(tx.into_encoded())) + // TODO: Don't have this potentially return an error (requires modifying the Payload type) + .map_err(|_| SeraiError::InvalidRuntime) } - pub async fn get_latest_block_hash(&self) -> Result<[u8; 32], SeraiError> { + pub async fn publish(&self, tx: &Encoded) -> Result<(), SeraiError> { + // Drop the hash, which is the hash of the raw TX, as TXs are allowed to share hashes and this + // hash is practically useless/unsafe + // If we are to return something, it should be block included in and position within block + self.0.rpc().submit_extrinsic(tx).await.map(|_| ()).map_err(SeraiError::RpcError) + } + + pub async fn latest_block_hash(&self) -> Result<[u8; 32], SeraiError> { Ok(self.0.rpc().finalized_head().await.map_err(SeraiError::RpcError)?.into()) } - pub async fn get_latest_block(&self) -> Result { + pub async fn latest_block(&self) -> Result { Block::new( self .0 @@ -203,7 +207,7 @@ impl Serai { // TODO: Add one to Serai pub async fn is_finalized(&self, header: &Header) -> Result, SeraiError> { // Get the latest finalized block - let finalized = self.get_latest_block_hash().await?.into(); + let finalized = self.latest_block_hash().await?.into(); // If the latest finalized block is this block, return true if finalized == header.hash() { return Ok(Some(true)); @@ -234,7 +238,7 @@ impl Serai { Ok(Some(header.hash() == hash)) } - pub async fn get_block(&self, hash: [u8; 32]) -> Result, SeraiError> { + pub async fn block(&self, hash: [u8; 32]) -> Result, SeraiError> { let Some(res) = self.0.rpc().block(Some(hash.into())).await.map_err(SeraiError::RpcError)? else { return Ok(None); @@ -248,18 +252,18 @@ impl Serai { Ok(Some(Block::new(res.block)?)) } - // Ideally, this would be get_block_hash, not get_block_by_number + // Ideally, this would be block_hash, not block_by_number // Unfortunately, in order to only operate over only finalized data, we have to check the // returned hash is for a finalized block. We can only do that by calling the extensive // is_finalized method, which at least requires the header // In practice, the block is likely more useful than the header - pub async fn get_block_by_number(&self, number: u64) -> Result, SeraiError> { + pub async fn block_by_number(&self, number: u64) -> Result, SeraiError> { let Some(hash) = self.0.rpc().block_hash(Some(number.into())).await.map_err(SeraiError::RpcError)? else { return Ok(None); }; - self.get_block(hash.into()).await + self.block(hash.into()).await } /// A stream which yields whenever new block(s) have been finalized. @@ -274,7 +278,7 @@ impl Serai { )) } - pub async fn get_nonce(&self, address: &SeraiAddress) -> Result { + pub async fn nonce(&self, address: &SeraiAddress) -> Result { self .0 .rpc() @@ -283,67 +287,72 @@ impl Serai { .map_err(SeraiError::RpcError) } - fn unsigned(call: &C) -> Encoded { - // TODO: Should Serai purge the old transaction code AND set this to 0/1? - const TRANSACTION_VERSION: u8 = 4; + /// Create a TemporalSerai using whatever is currently the latest block. + pub async fn with_current_latest_block(&self) -> Result { + let latest = self.latest_block_hash().await?; + Ok(TemporalSerai(self, latest)) + } - // Protocol version - let mut bytes = vec![TRANSACTION_VERSION]; + /// Returns a TemporalSerai able to retrieve state as of the specified block. + pub fn as_of(&self, block: [u8; 32]) -> TemporalSerai { + TemporalSerai(self, block) + } +} - // Pallet index - bytes.push(u8::try_from(PalletInfo::index::

().unwrap()).unwrap()); - // Call - bytes.extend(call.encode()); +impl<'a> TemporalSerai<'a> { + pub fn into_inner(&self) -> &Serai { + self.0 + } - // Prefix the length - let mut complete_bytes = scale::Compact(u32::try_from(bytes.len()).unwrap()).encode(); - complete_bytes.extend(bytes); - Encoded(complete_bytes) + async fn events( + &self, + filter: impl Fn(&E) -> bool, + ) -> Result, SeraiError> { + let mut res = vec![]; + for event in self.0 .0.events().at(self.1.into()).await.map_err(SeraiError::RpcError)?.iter() { + let event = event.map_err(|_| SeraiError::InvalidRuntime)?; + if PalletInfo::index::

().unwrap() == usize::from(event.pallet_index()) { + let mut with_variant: &[u8] = + &[[event.variant_index()].as_ref(), event.field_bytes()].concat(); + let event = E::decode(&mut with_variant).map_err(|_| SeraiError::InvalidRuntime)?; + if filter(&event) { + res.push(event); + } + } + } + Ok(res) } - pub fn sign>( + async fn storage( &self, - signer: &S, - payload: &Payload>, - nonce: u32, - params: BaseExtrinsicParamsBuilder, - ) -> Result { - TxClient::new(self.0.offline()) - .create_signed_with_nonce(payload, signer, nonce, params) - .map(|tx| Encoded(tx.into_encoded())) - // TODO: Don't have this potentially return an error (requires modifying the Payload type) - .map_err(|_| SeraiError::InvalidRuntime) + pallet: &'static str, + name: &'static str, + keys: Option>, + ) -> Result, SeraiError> { + let storage = self.0 .0.storage(); + #[allow(clippy::unwrap_or_default)] + let address = subxt::dynamic::storage(pallet, name, keys.unwrap_or(vec![])); + debug_assert!(storage.validate(&address).is_ok(), "invalid storage address"); + + storage + .at(self.1.into()) + .fetch(&address) + .await + .map_err(SeraiError::RpcError)? + .map(|res| R::decode(&mut res.encoded()).map_err(|_| SeraiError::InvalidRuntime)) + .transpose() } - pub async fn publish(&self, tx: &Encoded) -> Result<(), SeraiError> { - // Drop the hash, which is the hash of the raw TX, as TXs are allowed to share hashes and this - // hash is practically useless/unsafe - // If we are to return something, it should be block included in and position within block - self.0.rpc().submit_extrinsic(tx).await.map(|_| ()).map_err(SeraiError::RpcError) + pub fn coins(self) -> SeraiCoins<'a> { + SeraiCoins(self) } - pub async fn get_sri_balance( - &self, - block: [u8; 32], - address: SeraiAddress, - ) -> Result { - let data: Option< - serai_runtime::system::AccountInfo>, - > = self.storage("System", "Account", Some(vec![scale_value(address)]), block).await?; - Ok(data.map(|data| data.data.free).unwrap_or(0)) + pub fn in_instructions(self) -> SeraiInInstructions<'a> { + SeraiInInstructions(self) } - pub fn transfer_sri(to: SeraiAddress, amount: Amount) -> Payload> { - Payload::new( - "Balances", - // TODO: Use transfer_allow_death? - // TODO: Replace the Balances pallet with something much simpler - "transfer", - scale_composite(serai_runtime::balances::Call::::transfer { - dest: to, - value: amount.0, - }), - ) + pub fn validator_sets(self) -> SeraiValidatorSets<'a> { + SeraiValidatorSets(self) } } diff --git a/substrate/client/src/serai/tokens.rs b/substrate/client/src/serai/tokens.rs deleted file mode 100644 index 0f84ed013..000000000 --- a/substrate/client/src/serai/tokens.rs +++ /dev/null @@ -1,69 +0,0 @@ -use sp_core::sr25519::Public; -use serai_runtime::{ - primitives::{SeraiAddress, SubstrateAmount, Amount, Coin, Balance}, - assets::{AssetDetails, AssetAccount}, - tokens, Tokens, Runtime, -}; -pub use tokens::primitives; -use primitives::OutInstruction; - -use subxt::tx::Payload; - -use crate::{Serai, SeraiError, Composite, scale_value, scale_composite}; - -const PALLET: &str = "Tokens"; - -pub type TokensEvent = tokens::Event; - -impl Serai { - pub async fn get_mint_events(&self, block: [u8; 32]) -> Result, SeraiError> { - self.events::(block, |event| matches!(event, TokensEvent::Mint { .. })).await - } - - pub async fn get_token_supply(&self, block: [u8; 32], coin: Coin) -> Result { - Ok(Amount( - self - .storage::>( - "Assets", - "Asset", - Some(vec![scale_value(coin)]), - block, - ) - .await? - .map(|token| token.supply) - .unwrap_or(0), - )) - } - - pub async fn get_token_balance( - &self, - block: [u8; 32], - coin: Coin, - address: SeraiAddress, - ) -> Result { - Ok(Amount( - self - .storage::>( - "Assets", - "Account", - Some(vec![scale_value(coin), scale_value(address)]), - block, - ) - .await? - .map(|account| account.balance()) - .unwrap_or(0), - )) - } - - pub fn burn(balance: Balance, instruction: OutInstruction) -> Payload> { - Payload::new( - PALLET, - "burn", - scale_composite(tokens::Call::::burn { balance, instruction }), - ) - } - - pub async fn get_burn_events(&self, block: [u8; 32]) -> Result, SeraiError> { - self.events::(block, |event| matches!(event, TokensEvent::Burn { .. })).await - } -} diff --git a/substrate/client/src/serai/validator_sets.rs b/substrate/client/src/serai/validator_sets.rs index 1c34ff684..8294c6c66 100644 --- a/substrate/client/src/serai/validator_sets.rs +++ b/substrate/client/src/serai/validator_sets.rs @@ -1,75 +1,79 @@ use sp_core::sr25519::{Public, Signature}; -use serai_runtime::{validator_sets, ValidatorSets, Runtime}; +use serai_runtime::{primitives::Amount, validator_sets, ValidatorSets, Runtime}; pub use validator_sets::primitives; use primitives::{Session, ValidatorSet, KeyPair}; use subxt::utils::Encoded; -use crate::{primitives::NetworkId, Serai, SeraiError, scale_value}; +use crate::{primitives::NetworkId, Serai, TemporalSerai, SeraiError, scale_value}; const PALLET: &str = "ValidatorSets"; pub type ValidatorSetsEvent = validator_sets::Event; -impl Serai { - pub async fn get_new_set_events( - &self, - block: [u8; 32], - ) -> Result, SeraiError> { +#[derive(Clone, Copy)] +pub struct SeraiValidatorSets<'a>(pub(crate) TemporalSerai<'a>); +impl<'a> SeraiValidatorSets<'a> { + pub fn into_inner(self) -> TemporalSerai<'a> { + self.0 + } + + pub async fn new_set_events(&self) -> Result, SeraiError> { self - .events::(block, |event| matches!(event, ValidatorSetsEvent::NewSet { .. })) + .0 + .events::(|event| matches!(event, ValidatorSetsEvent::NewSet { .. })) .await } - pub async fn get_key_gen_events( - &self, - block: [u8; 32], - ) -> Result, SeraiError> { + pub async fn key_gen_events(&self) -> Result, SeraiError> { + self + .0 + .events::(|event| matches!(event, ValidatorSetsEvent::KeyGen { .. })) + .await + } + + pub async fn set_retired_events(&self) -> Result, SeraiError> { self - .events::(block, |event| matches!(event, ValidatorSetsEvent::KeyGen { .. })) + .0 + .events::(|event| matches!(event, ValidatorSetsEvent::SetRetired { .. })) .await } - pub async fn get_session( + pub async fn session(&self, network: NetworkId) -> Result, SeraiError> { + self.0.storage(PALLET, "CurrentSession", Some(vec![scale_value(network)])).await + } + + pub async fn participants(&self, network: NetworkId) -> Result>, SeraiError> { + self.0.storage(PALLET, "Participants", Some(vec![scale_value(network)])).await + } + + pub async fn allocation_per_key_share( &self, network: NetworkId, - at_hash: [u8; 32], - ) -> Result, SeraiError> { - self.storage(PALLET, "CurrentSession", Some(vec![scale_value(network)]), at_hash).await + ) -> Result, SeraiError> { + self.0.storage(PALLET, "AllocationPerKeyShare", Some(vec![scale_value(network)])).await } - pub async fn get_validator_set_participants( + pub async fn allocation( &self, network: NetworkId, - at_hash: [u8; 32], - ) -> Result>, SeraiError> { - self.storage(PALLET, "Participants", Some(vec![scale_value(network)]), at_hash).await + key: Public, + ) -> Result, SeraiError> { + self.0.storage(PALLET, "Allocations", Some(vec![scale_value(network), scale_value(key)])).await } - pub async fn get_validator_set_musig_key( - &self, - set: ValidatorSet, - at_hash: [u8; 32], - ) -> Result, SeraiError> { - self.storage(PALLET, "MuSigKeys", Some(vec![scale_value(set)]), at_hash).await + pub async fn musig_key(&self, set: ValidatorSet) -> Result, SeraiError> { + self.0.storage(PALLET, "MuSigKeys", Some(vec![scale_value(set)])).await } // TODO: Store these separately since we almost never need both at once? - pub async fn get_keys( - &self, - set: ValidatorSet, - at_hash: [u8; 32], - ) -> Result, SeraiError> { - self.storage(PALLET, "Keys", Some(vec![scale_value(set)]), at_hash).await + pub async fn keys(&self, set: ValidatorSet) -> Result, SeraiError> { + self.0.storage(PALLET, "Keys", Some(vec![scale_value(set)])).await } - pub fn set_validator_set_keys( - network: NetworkId, - key_pair: KeyPair, - signature: Signature, - ) -> Encoded { - Self::unsigned::(&validator_sets::Call::::set_keys { + pub fn set_keys(network: NetworkId, key_pair: KeyPair, signature: Signature) -> Encoded { + Serai::unsigned::(&validator_sets::Call::::set_keys { network, key_pair, signature, diff --git a/substrate/client/tests/batch.rs b/substrate/client/tests/batch.rs index 9fef6bed4..2b747747e 100644 --- a/substrate/client/tests/batch.rs +++ b/substrate/client/tests/batch.rs @@ -13,7 +13,7 @@ use serai_client::{ primitives::{InInstruction, InInstructionWithBalance, Batch}, InInstructionsEvent, }, - tokens::TokensEvent, + coins::TokensEvent, Serai, }; @@ -48,23 +48,25 @@ serai_test!( let block = provide_batch(batch.clone()).await; let serai = serai().await; - assert_eq!(serai.get_latest_block_for_network(block, network).await.unwrap(), Some(block_hash)); - let batches = serai.get_batch_events(block).await.unwrap(); - assert_eq!( - batches, - vec![InInstructionsEvent::Batch { - network, - id, - block: block_hash, - instructions_hash: Blake2b::::digest(batch.instructions.encode()).into(), - }] - ); + let serai = serai.as_of(block); + { + let serai = serai.in_instructions(); + assert_eq!(serai.latest_block_for_network(network).await.unwrap(), Some(block_hash)); + let batches = serai.batch_events().await.unwrap(); + assert_eq!( + batches, + vec![InInstructionsEvent::Batch { + network, + id, + block: block_hash, + instructions_hash: Blake2b::::digest(batch.instructions.encode()).into(), + }] + ); + } - assert_eq!( - serai.get_mint_events(block).await.unwrap(), - vec![TokensEvent::Mint { address, balance }], - ); - assert_eq!(serai.get_token_supply(block, coin).await.unwrap(), amount); - assert_eq!(serai.get_token_balance(block, coin, address).await.unwrap(), amount); + let serai = serai.coins(); + assert_eq!(serai.mint_events().await.unwrap(), vec![TokensEvent::Mint { address, balance }],); + assert_eq!(serai.token_supply(coin).await.unwrap(), amount); + assert_eq!(serai.token_balance(coin, address).await.unwrap(), amount); } ); diff --git a/substrate/client/tests/burn.rs b/substrate/client/tests/burn.rs index 8b7f5cc87..2a03e5665 100644 --- a/substrate/client/tests/burn.rs +++ b/substrate/client/tests/burn.rs @@ -19,8 +19,8 @@ use serai_client::{ InInstructionsEvent, primitives::{InInstruction, InInstructionWithBalance, Batch}, }, - tokens::{primitives::OutInstruction, TokensEvent}, - PairSigner, Serai, + coins::{primitives::OutInstruction, TokensEvent}, + PairSigner, Serai, SeraiCoins, }; mod common; @@ -55,7 +55,8 @@ serai_test!( let block = provide_batch(batch.clone()).await; let serai = serai().await; - let batches = serai.get_batch_events(block).await.unwrap(); + let serai = serai.as_of(block); + let batches = serai.in_instructions().batch_events().await.unwrap(); assert_eq!( batches, vec![InInstructionsEvent::Batch { @@ -67,11 +68,11 @@ serai_test!( ); assert_eq!( - serai.get_mint_events(block).await.unwrap(), + serai.coins().mint_events().await.unwrap(), vec![TokensEvent::Mint { address, balance }] ); - assert_eq!(serai.get_token_supply(block, coin).await.unwrap(), amount); - assert_eq!(serai.get_token_balance(block, coin, address).await.unwrap(), amount); + assert_eq!(serai.coins().token_supply(coin).await.unwrap(), amount); + assert_eq!(serai.coins().token_balance(coin, address).await.unwrap(), amount); // Now burn it let mut rand_bytes = vec![0; 32]; @@ -83,11 +84,12 @@ serai_test!( let data = Data::new(rand_bytes).unwrap(); let out = OutInstruction { address: external_address, data: Some(data) }; + let serai = serai.into_inner(); let block = publish_tx( &serai .sign( &PairSigner::new(pair), - &Serai::burn(balance, out.clone()), + &SeraiCoins::burn(balance, out.clone()), 0, BaseExtrinsicParamsBuilder::new(), ) @@ -95,9 +97,10 @@ serai_test!( ) .await; - let events = serai.get_burn_events(block).await.unwrap(); + let serai = serai.as_of(block).coins(); + let events = serai.burn_events().await.unwrap(); assert_eq!(events, vec![TokensEvent::Burn { address, balance, instruction: out }]); - assert_eq!(serai.get_token_supply(block, coin).await.unwrap(), Amount(0)); - assert_eq!(serai.get_token_balance(block, coin, address).await.unwrap(), Amount(0)); + assert_eq!(serai.token_supply(coin).await.unwrap(), Amount(0)); + assert_eq!(serai.token_balance(coin, address).await.unwrap(), Amount(0)); } ); diff --git a/substrate/client/tests/common/in_instructions.rs b/substrate/client/tests/common/in_instructions.rs index 45f903e83..8e26bf8fd 100644 --- a/substrate/client/tests/common/in_instructions.rs +++ b/substrate/client/tests/common/in_instructions.rs @@ -14,10 +14,10 @@ use serai_client::{ primitives::{Batch, SignedBatch, batch_message}, InInstructionsEvent, }, - Serai, + SeraiInInstructions, }; -use crate::common::{serai, tx::publish_tx, validator_sets::set_validator_set_keys}; +use crate::common::{serai, tx::publish_tx, validator_sets::set_keys}; #[allow(dead_code)] pub async fn provide_batch(batch: Batch) -> [u8; 32] { @@ -27,23 +27,23 @@ pub async fn provide_batch(batch: Batch) -> [u8; 32] { let set = ValidatorSet { session: Session(0), network: batch.network }; let pair = insecure_pair_from_name(&format!("ValidatorSet {:?}", set)); let keys = if let Some(keys) = - serai.get_keys(set, serai.get_latest_block_hash().await.unwrap()).await.unwrap() + serai.with_current_latest_block().await.unwrap().validator_sets().keys(set).await.unwrap() { keys } else { let keys = (pair.public(), vec![].try_into().unwrap()); - set_validator_set_keys(set, keys.clone()).await; + set_keys(set, keys.clone()).await; keys }; assert_eq!(keys.0, pair.public()); - let block = publish_tx(&Serai::execute_batch(SignedBatch { + let block = publish_tx(&SeraiInInstructions::execute_batch(SignedBatch { batch: batch.clone(), signature: pair.sign(&batch_message(&batch)), })) .await; - let batches = serai.get_batch_events(block).await.unwrap(); + let batches = serai.as_of(block).in_instructions().batch_events().await.unwrap(); // TODO: impl From for BatchEvent? assert_eq!( batches, diff --git a/substrate/client/tests/common/mod.rs b/substrate/client/tests/common/mod.rs index 57e305160..13cf06c81 100644 --- a/substrate/client/tests/common/mod.rs +++ b/substrate/client/tests/common/mod.rs @@ -60,7 +60,7 @@ macro_rules! serai_test { tokio::time::sleep(Duration::from_secs(1)).await; } let serai = serai().await; - while serai.get_latest_block_hash().await.is_err() { + while serai.latest_block_hash().await.is_err() { tokio::time::sleep(Duration::from_secs(1)).await; } // TODO: https://github.com/serai-dex/serai/247 diff --git a/substrate/client/tests/common/tx.rs b/substrate/client/tests/common/tx.rs index 43925096b..33ab79d24 100644 --- a/substrate/client/tests/common/tx.rs +++ b/substrate/client/tests/common/tx.rs @@ -11,7 +11,7 @@ pub async fn publish_tx(tx: &Encoded) -> [u8; 32] { let serai = serai().await; let mut latest = - serai.get_block(serai.get_latest_block_hash().await.unwrap()).await.unwrap().unwrap().number(); + serai.block(serai.latest_block_hash().await.unwrap()).await.unwrap().unwrap().number(); serai.publish(tx).await.unwrap(); @@ -24,7 +24,7 @@ pub async fn publish_tx(tx: &Encoded) -> [u8; 32] { let block = { let mut block; while { - block = serai.get_block_by_number(latest).await.unwrap(); + block = serai.block_by_number(latest).await.unwrap(); block.is_none() } { sleep(Duration::from_secs(1)).await; diff --git a/substrate/client/tests/common/validator_sets.rs b/substrate/client/tests/common/validator_sets.rs index 89cf18ebb..78f08d2db 100644 --- a/substrate/client/tests/common/validator_sets.rs +++ b/substrate/client/tests/common/validator_sets.rs @@ -15,13 +15,13 @@ use serai_client::{ primitives::{ValidatorSet, KeyPair, musig_context, musig_key, set_keys_message}, ValidatorSetsEvent, }, - Serai, + SeraiValidatorSets, }; use crate::common::{serai, tx::publish_tx}; #[allow(dead_code)] -pub async fn set_validator_set_keys(set: ValidatorSet, key_pair: KeyPair) -> [u8; 32] { +pub async fn set_keys(set: ValidatorSet, key_pair: KeyPair) -> [u8; 32] { let pair = insecure_pair_from_name("Alice"); let public = pair.public(); @@ -29,7 +29,11 @@ pub async fn set_validator_set_keys(set: ValidatorSet, key_pair: KeyPair) -> [u8 let public_key = ::read_G::<&[u8]>(&mut public.0.as_ref()).unwrap(); assert_eq!( serai - .get_validator_set_musig_key(set, serai.get_latest_block_hash().await.unwrap()) + .with_current_latest_block() + .await + .unwrap() + .validator_sets() + .musig_key(set) .await .unwrap() .unwrap(), @@ -45,7 +49,11 @@ pub async fn set_validator_set_keys(set: ValidatorSet, key_pair: KeyPair) -> [u8 musig::(&musig_context(set), &Zeroizing::new(secret_key), &[public_key]).unwrap(); assert_eq!( serai - .get_validator_set_musig_key(set, serai.get_latest_block_hash().await.unwrap()) + .with_current_latest_block() + .await + .unwrap() + .validator_sets() + .musig_key(set) .await .unwrap() .unwrap(), @@ -63,7 +71,7 @@ pub async fn set_validator_set_keys(set: ValidatorSet, key_pair: KeyPair) -> [u8 ); // Vote in a key pair - let block = publish_tx(&Serai::set_validator_set_keys( + let block = publish_tx(&SeraiValidatorSets::set_keys( set.network, key_pair.clone(), Signature(sig.to_bytes()), @@ -71,10 +79,10 @@ pub async fn set_validator_set_keys(set: ValidatorSet, key_pair: KeyPair) -> [u8 .await; assert_eq!( - serai.get_key_gen_events(block).await.unwrap(), + serai.as_of(block).validator_sets().key_gen_events().await.unwrap(), vec![ValidatorSetsEvent::KeyGen { set, key_pair: key_pair.clone() }] ); - assert_eq!(serai.get_keys(set, block).await.unwrap(), Some(key_pair)); + assert_eq!(serai.as_of(block).validator_sets().keys(set).await.unwrap(), Some(key_pair)); block } diff --git a/substrate/client/tests/time.rs b/substrate/client/tests/time.rs index 5eac481dd..78ff6f878 100644 --- a/substrate/client/tests/time.rs +++ b/substrate/client/tests/time.rs @@ -11,11 +11,11 @@ serai_test!( async fn time() { let serai = serai().await; - let mut number = serai.get_latest_block().await.unwrap().number(); + let mut number = serai.latest_block().await.unwrap().number(); let mut done = 0; while done < 3 { // Wait for the next block - let block = serai.get_latest_block().await.unwrap(); + let block = serai.latest_block().await.unwrap(); if block.number() == number { sleep(Duration::from_secs(1)).await; continue; diff --git a/substrate/client/tests/validator_sets.rs b/substrate/client/tests/validator_sets.rs index 2a5c9068f..de9168b98 100644 --- a/substrate/client/tests/validator_sets.rs +++ b/substrate/client/tests/validator_sets.rs @@ -12,10 +12,10 @@ use serai_client::{ }; mod common; -use common::{serai, validator_sets::set_validator_set_keys}; +use common::{serai, validator_sets::set_keys}; serai_test!( - async fn set_validator_set_keys_test() { + async fn set_keys_test() { let network = NetworkId::Bitcoin; let set = ValidatorSet { session: Session(0), network }; @@ -35,7 +35,9 @@ serai_test!( // Make sure the genesis is as expected assert_eq!( serai - .get_new_set_events(serai.get_block_by_number(0).await.unwrap().unwrap().hash()) + .as_of(serai.block_by_number(0).await.unwrap().unwrap().hash()) + .validator_sets() + .new_set_events() .await .unwrap(), NETWORKS @@ -47,30 +49,23 @@ serai_test!( .collect::>(), ); - let participants = serai - .get_validator_set_participants(set.network, serai.get_latest_block_hash().await.unwrap()) - .await - .unwrap() - .unwrap(); - let participants_ref: &[_] = participants.as_ref(); - assert_eq!(participants_ref, [public].as_ref()); - assert_eq!( - serai - .get_validator_set_musig_key(set, serai.get_latest_block_hash().await.unwrap()) - .await - .unwrap() - .unwrap(), - musig_key(set, &[public]).0 - ); + { + let vs_serai = serai.with_current_latest_block().await.unwrap().validator_sets(); + let participants = vs_serai.participants(set.network).await.unwrap().unwrap(); + let participants_ref: &[_] = participants.as_ref(); + assert_eq!(participants_ref, [public].as_ref()); + assert_eq!(vs_serai.musig_key(set).await.unwrap().unwrap(), musig_key(set, &[public]).0); + } - let block = set_validator_set_keys(set, key_pair.clone()).await; + let block = set_keys(set, key_pair.clone()).await; - // While the set_validator_set_keys function should handle this, it's beneficial to + // While the set_keys function should handle this, it's beneficial to // independently test it + let serai = serai.as_of(block).validator_sets(); assert_eq!( - serai.get_key_gen_events(block).await.unwrap(), + serai.key_gen_events().await.unwrap(), vec![ValidatorSetsEvent::KeyGen { set, key_pair: key_pair.clone() }] ); - assert_eq!(serai.get_keys(set, block).await.unwrap(), Some(key_pair)); + assert_eq!(serai.keys(set).await.unwrap(), Some(key_pair)); } ); diff --git a/substrate/in-instructions/pallet/src/lib.rs b/substrate/in-instructions/pallet/src/lib.rs index 4bb943acb..d7c67e452 100644 --- a/substrate/in-instructions/pallet/src/lib.rs +++ b/substrate/in-instructions/pallet/src/lib.rs @@ -174,7 +174,10 @@ pub mod pallet { // key is publishing `Batch`s. This should only happen once the current key has verified all // `Batch`s published by the prior key, meaning they are accepting the hand-over. if prior.is_some() && (!valid_by_prior) { - ValidatorSets::::retire_session(network, Session(current_session.0 - 1)); + ValidatorSets::::retire_set(ValidatorSet { + network, + session: Session(current_session.0 - 1), + }); } // check that this validator set isn't publishing a batch more than once per block diff --git a/substrate/staking/pallet/src/lib.rs b/substrate/staking/pallet/src/lib.rs index 1ec7d1353..7c4fa730a 100644 --- a/substrate/staking/pallet/src/lib.rs +++ b/substrate/staking/pallet/src/lib.rs @@ -13,7 +13,10 @@ pub mod pallet { use serai_primitives::{NetworkId, Amount, PublicKey}; - use validator_sets_pallet::{primitives::Session, Config as VsConfig, Pallet as VsPallet}; + use validator_sets_pallet::{ + primitives::{Session, ValidatorSet}, + Config as VsConfig, Pallet as VsPallet, + }; use pallet_session::{Config as SessionConfig, SessionManager}; #[pallet::error] @@ -183,7 +186,12 @@ pub mod pallet { Some(VsPallet::::select_validators(NetworkId::Serai)) } - fn end_session(_end_index: u32) {} + fn end_session(end_index: u32) { + VsPallet::::retire_set(ValidatorSet { + network: NetworkId::Serai, + session: Session(end_index), + }) + } fn start_session(_start_index: u32) {} } diff --git a/substrate/validator-sets/pallet/Cargo.toml b/substrate/validator-sets/pallet/Cargo.toml index e8f119b30..c5b69c5e7 100644 --- a/substrate/validator-sets/pallet/Cargo.toml +++ b/substrate/validator-sets/pallet/Cargo.toml @@ -12,8 +12,6 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -hashbrown = { version = "0.14", default-features = false } - scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } scale-info = { version = "2", default-features = false, features = ["derive"] } diff --git a/substrate/validator-sets/pallet/src/lib.rs b/substrate/validator-sets/pallet/src/lib.rs index 816113900..9e69442fb 100644 --- a/substrate/validator-sets/pallet/src/lib.rs +++ b/substrate/validator-sets/pallet/src/lib.rs @@ -145,7 +145,8 @@ pub mod pallet { fn recover_key_from_sorted_allocation_key(key: &[u8]) -> Public { Public(key[(key.len() - 32) ..].try_into().unwrap()) } - fn set_allocation(network: NetworkId, key: Public, amount: Amount) { + // Returns if this validator already had an allocation set. + fn set_allocation(network: NetworkId, key: Public, amount: Amount) -> bool { let prior = Allocations::::take((network, key)); if let Some(amount) = prior { SortedAllocations::::remove(Self::sorted_allocation_key(network, key, amount)); @@ -154,6 +155,7 @@ pub mod pallet { Allocations::::set((network, key), Some(amount)); SortedAllocations::::set(Self::sorted_allocation_key(network, key, amount), Some(())); } + prior.is_some() } } @@ -235,6 +237,7 @@ pub mod pallet { pub enum Event { NewSet { set: ValidatorSet }, KeyGen { set: ValidatorSet, key_pair: KeyPair }, + SetRetired { set: ValidatorSet }, } impl Pallet { @@ -318,18 +321,12 @@ pub mod pallet { #[pallet::genesis_build] impl BuildGenesisConfig for GenesisConfig { fn build(&self) { - { - let hash_set = - self.participants.iter().map(|key| key.0).collect::>(); - if hash_set.len() != self.participants.len() { - panic!("participants contained duplicates"); - } - } - for (id, stake) in self.networks.clone() { AllocationPerKeyShare::::set(id, Some(stake)); for participant in self.participants.clone() { - Pallet::::set_allocation(id, participant, stake); + if Pallet::::set_allocation(id, participant, stake) { + panic!("participants contained duplicates"); + } } Pallet::::new_set(id); } @@ -592,10 +589,10 @@ pub mod pallet { Self::participants(network).into() } - pub fn retire_session(network: NetworkId, session: Session) { - let set = ValidatorSet { network, session }; + pub fn retire_set(set: ValidatorSet) { MuSigKeys::::remove(set); Keys::::remove(set); + Pallet::::deposit_event(Event::SetRetired { set }); } /// Take the amount deallocatable. diff --git a/tests/coordinator/src/lib.rs b/tests/coordinator/src/lib.rs index afad025e2..1995776c3 100644 --- a/tests/coordinator/src/lib.rs +++ b/tests/coordinator/src/lib.rs @@ -170,7 +170,7 @@ impl Processor { for _ in 0 .. 60 { tokio::time::sleep(Duration::from_secs(1)).await; let Ok(client) = Serai::new(&serai_rpc).await else { continue }; - if client.get_latest_block_hash().await.is_err() { + if client.latest_block_hash().await.is_err() { continue; } break; diff --git a/tests/coordinator/src/tests/batch.rs b/tests/coordinator/src/tests/batch.rs index caa567333..75b732dc9 100644 --- a/tests/coordinator/src/tests/batch.rs +++ b/tests/coordinator/src/tests/batch.rs @@ -172,7 +172,7 @@ pub async fn batch( let batch = SignedBatch { batch, signature }; let serai = processors[0].serai().await; - let mut last_serai_block = serai.get_latest_block().await.unwrap().number(); + let mut last_serai_block = serai.latest_block().await.unwrap().number(); for processor in processors.iter_mut() { processor @@ -187,11 +187,11 @@ pub async fn batch( tokio::time::sleep(Duration::from_secs(6)).await; } - while last_serai_block <= serai.get_latest_block().await.unwrap().number() { + while last_serai_block <= serai.latest_block().await.unwrap().number() { let batch_events = serai - .get_batch_events( - serai.get_block_by_number(last_serai_block).await.unwrap().unwrap().hash(), - ) + .as_of(serai.block_by_number(last_serai_block).await.unwrap().unwrap().hash()) + .in_instructions() + .batch_events() .await .unwrap(); @@ -213,7 +213,7 @@ pub async fn batch( } // Verify the coordinator sends SubstrateBlock to all processors - let last_block = serai.get_block_by_number(last_serai_block).await.unwrap().unwrap(); + let last_block = serai.block_by_number(last_serai_block).await.unwrap().unwrap(); for processor in processors.iter_mut() { assert_eq!( processor.recv_message().await, diff --git a/tests/coordinator/src/tests/key_gen.rs b/tests/coordinator/src/tests/key_gen.rs index 81a6aa15f..c7d801fbc 100644 --- a/tests/coordinator/src/tests/key_gen.rs +++ b/tests/coordinator/src/tests/key_gen.rs @@ -106,7 +106,7 @@ pub async fn key_gen( let network_key = (C::generator() * *network_priv_key).to_bytes().as_ref().to_vec(); let serai = processors[0].serai().await; - let mut last_serai_block = serai.get_latest_block().await.unwrap().number(); + let mut last_serai_block = serai.latest_block().await.unwrap().number(); wait_for_tributary().await; for (i, processor) in processors.iter_mut().enumerate() { @@ -148,11 +148,11 @@ pub async fn key_gen( tokio::time::sleep(Duration::from_secs(6)).await; } - while last_serai_block <= serai.get_latest_block().await.unwrap().number() { + while last_serai_block <= serai.latest_block().await.unwrap().number() { if !serai - .get_key_gen_events( - serai.get_block_by_number(last_serai_block).await.unwrap().unwrap().hash(), - ) + .as_of(serai.block_by_number(last_serai_block).await.unwrap().unwrap().hash()) + .validator_sets() + .key_gen_events() .await .unwrap() .is_empty() @@ -196,7 +196,9 @@ pub async fn key_gen( } assert_eq!( serai - .get_keys(set, serai.get_block_by_number(last_serai_block).await.unwrap().unwrap().hash()) + .as_of(serai.block_by_number(last_serai_block).await.unwrap().unwrap().hash()) + .validator_sets() + .keys(set) .await .unwrap() .unwrap(), diff --git a/tests/coordinator/src/tests/sign.rs b/tests/coordinator/src/tests/sign.rs index 4c9903386..3afac8afc 100644 --- a/tests/coordinator/src/tests/sign.rs +++ b/tests/coordinator/src/tests/sign.rs @@ -17,13 +17,14 @@ use serai_client::{ NetworkId, Coin, Amount, Balance, BlockHash, SeraiAddress, ExternalAddress, insecure_pair_from_name, }, - tokens::{ + coins::{ primitives::{OutInstruction, OutInstructionWithBalance}, TokensEvent, }, in_instructions::primitives::{InInstruction, InInstructionWithBalance, Batch}, + SeraiCoins, }; -use messages::{sign::SignId, SubstrateContext, CoordinatorMessage}; +use messages::{coordinator::PlanMeta, sign::SignId, SubstrateContext, CoordinatorMessage}; use crate::{*, tests::*}; @@ -210,7 +211,7 @@ async fn sign_test() { &serai .sign( &PairSigner::new(insecure_pair_from_name("Ferdie")), - &Serai::transfer_sri(address, Amount(1_000_000_000)), + &SeraiCoins::transfer_sri(address, Amount(1_000_000_000)), 0, Default::default(), ) @@ -243,27 +244,21 @@ async fn sign_test() { ) .await; - let block_included_in_hash = - serai.get_block_by_number(block_included_in).await.unwrap().unwrap().hash(); - - assert_eq!( - serai.get_sri_balance(block_included_in_hash, serai_addr).await.unwrap(), - 1_000_000_000 - ); - - // Verify the mint occurred as expected - assert_eq!( - serai.get_mint_events(block_included_in_hash).await.unwrap(), - vec![TokensEvent::Mint { address: serai_addr, balance }] - ); - assert_eq!( - serai.get_token_supply(block_included_in_hash, Coin::Bitcoin).await.unwrap(), - amount - ); - assert_eq!( - serai.get_token_balance(block_included_in_hash, Coin::Bitcoin, serai_addr).await.unwrap(), - amount - ); + { + let block_included_in_hash = + serai.block_by_number(block_included_in).await.unwrap().unwrap().hash(); + + let serai = serai.as_of(block_included_in_hash).coins(); + assert_eq!(serai.sri_balance(serai_addr).await.unwrap(), 1_000_000_000); + + // Verify the mint occurred as expected + assert_eq!( + serai.mint_events().await.unwrap(), + vec![TokensEvent::Mint { address: serai_addr, balance }] + ); + assert_eq!(serai.token_supply(Coin::Bitcoin).await.unwrap(), amount); + assert_eq!(serai.token_balance(Coin::Bitcoin, serai_addr).await.unwrap(), amount); + } // Trigger a burn let out_instruction = @@ -273,7 +268,7 @@ async fn sign_test() { &serai .sign( &serai_pair, - &Serai::burn(balance, out_instruction.clone()), + &SeraiCoins::burn(balance, out_instruction.clone()), 0, Default::default(), ) @@ -290,11 +285,11 @@ async fn sign_test() { tokio::time::sleep(Duration::from_secs(6)).await; } - while last_serai_block <= serai.get_latest_block().await.unwrap().number() { + while last_serai_block <= serai.latest_block().await.unwrap().number() { let burn_events = serai - .get_burn_events( - serai.get_block_by_number(last_serai_block).await.unwrap().unwrap().hash(), - ) + .as_of(serai.block_by_number(last_serai_block).await.unwrap().unwrap().hash()) + .coins() + .burn_events() .await .unwrap(); @@ -314,16 +309,11 @@ async fn sign_test() { } } - let last_serai_block = serai.get_block_by_number(last_serai_block).await.unwrap().unwrap(); + let last_serai_block = serai.block_by_number(last_serai_block).await.unwrap().unwrap(); let last_serai_block_hash = last_serai_block.hash(); - assert_eq!( - serai.get_token_supply(last_serai_block_hash, Coin::Bitcoin).await.unwrap(), - Amount(0) - ); - assert_eq!( - serai.get_token_balance(last_serai_block_hash, Coin::Bitcoin, serai_addr).await.unwrap(), - Amount(0) - ); + let serai = serai.as_of(last_serai_block_hash).coins(); + assert_eq!(serai.token_supply(Coin::Bitcoin).await.unwrap(), Amount(0)); + assert_eq!(serai.token_balance(Coin::Bitcoin, serai_addr).await.unwrap(), Amount(0)); let mut plan_id = [0; 32]; OsRng.fill_bytes(&mut plan_id); @@ -356,7 +346,10 @@ async fn sign_test() { messages::coordinator::ProcessorMessage::SubstrateBlockAck { network: NetworkId::Bitcoin, block: last_serai_block.number(), - plans: vec![plan_id], + plans: vec![PlanMeta { + key: (Secp256k1::generator() * *network_key).to_bytes().to_vec(), + id: plan_id, + }], }, )) .await; diff --git a/tests/full-stack/src/lib.rs b/tests/full-stack/src/lib.rs index e2f850c22..a9b6c4523 100644 --- a/tests/full-stack/src/lib.rs +++ b/tests/full-stack/src/lib.rs @@ -141,7 +141,7 @@ impl Handles { for _ in 0 .. 60 { tokio::time::sleep(Duration::from_secs(1)).await; let Ok(client) = Serai::new(&serai_rpc).await else { continue }; - if client.get_latest_block_hash().await.is_err() { + if client.latest_block_hash().await.is_err() { continue; } return client; diff --git a/tests/full-stack/src/tests/mint_and_burn.rs b/tests/full-stack/src/tests/mint_and_burn.rs index 098feb40f..e2236c858 100644 --- a/tests/full-stack/src/tests/mint_and_burn.rs +++ b/tests/full-stack/src/tests/mint_and_burn.rs @@ -15,8 +15,8 @@ use serai_client::{ }, validator_sets::primitives::{Session, ValidatorSet}, in_instructions::primitives::Shorthand, - tokens::primitives::OutInstruction, - PairTrait, PairSigner, + coins::primitives::OutInstruction, + PairTrait, PairSigner, SeraiCoins, }; use crate::tests::*; @@ -196,10 +196,11 @@ async fn mint_and_burn_test() { let print_at = halt_at / 2; for i in 0 .. halt_at { if let Some(key_pair) = serai - .get_keys( - ValidatorSet { network, session: Session(0) }, - serai.get_latest_block_hash().await.unwrap(), - ) + .with_current_latest_block() + .await + .unwrap() + .validator_sets() + .keys(ValidatorSet { network, session: Session(0) }) .await .unwrap() { @@ -240,7 +241,7 @@ async fn mint_and_burn_test() { &serai .sign( &PairSigner::new(insecure_pair_from_name("Ferdie")), - &Serai::transfer_sri(address, Amount(1_000_000_000)), + &SeraiCoins::transfer_sri(address, Amount(1_000_000_000)), 0, Default::default(), ) @@ -408,7 +409,11 @@ async fn mint_and_burn_test() { let print_at = halt_at / 2; for i in 0 .. halt_at { if serai - .get_last_batch_for_network(serai.get_latest_block_hash().await.unwrap(), network) + .with_current_latest_block() + .await + .unwrap() + .in_instructions() + .last_batch_for_network(network) .await .unwrap() .is_some() @@ -490,7 +495,7 @@ async fn mint_and_burn_test() { &serai .sign( serai_pair, - &Serai::burn(Balance { coin, amount: Amount(amount) }, out_instruction), + &SeraiCoins::burn(Balance { coin, amount: Amount(amount) }, out_instruction), nonce, Default::default(), ) diff --git a/tests/processor/src/tests/batch.rs b/tests/processor/src/tests/batch.rs index b78836649..f0ccb3c63 100644 --- a/tests/processor/src/tests/batch.rs +++ b/tests/processor/src/tests/batch.rs @@ -5,7 +5,7 @@ use std::{ use dkg::{Participant, tests::clone_without}; -use messages::{sign::SignId, SubstrateContext}; +use messages::{coordinator::PlanMeta, sign::SignId, SubstrateContext}; use serai_client::{ primitives::{BlockHash, crypto::RuntimePublic, PublicKey, SeraiAddress, NetworkId}, @@ -155,7 +155,7 @@ pub(crate) async fn sign_batch( pub(crate) async fn substrate_block( coordinator: &mut Coordinator, block: messages::substrate::CoordinatorMessage, -) -> Vec<[u8; 32]> { +) -> Vec { match block.clone() { messages::substrate::CoordinatorMessage::SubstrateBlock { context: _, diff --git a/tests/processor/src/tests/send.rs b/tests/processor/src/tests/send.rs index 67703b99b..158262791 100644 --- a/tests/processor/src/tests/send.rs +++ b/tests/processor/src/tests/send.rs @@ -10,7 +10,7 @@ use messages::{sign::SignId, SubstrateContext}; use serai_client::{ primitives::{BlockHash, NetworkId}, in_instructions::primitives::Batch, - tokens::primitives::{OutInstruction, OutInstructionWithBalance}, + coins::primitives::{OutInstruction, OutInstructionWithBalance}, }; use crate::{*, tests::*}; @@ -236,7 +236,7 @@ fn send_test() { let (mut id, mut preprocesses) = recv_sign_preprocesses(&mut coordinators, key_pair.1.to_vec(), 0).await; // TODO: Should this use the Substrate key? - assert_eq!(id, SignId { key: key_pair.1.to_vec(), id: plans[0], attempt: 0 }); + assert_eq!(id, SignId { key: key_pair.1.to_vec(), id: plans[0].id, attempt: 0 }); // Trigger a random amount of re-attempts for attempt in 1 ..= u32::try_from(OsRng.next_u64() % 4).unwrap() {